diff -Nru skimage-0.13.1/.appveyor.yml skimage-0.14.0/.appveyor.yml --- skimage-0.13.1/.appveyor.yml 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/.appveyor.yml 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,78 @@ +# AppVeyor.com is a Continuous Integration service to build and run tests under +# Windows + +environment: + matrix: + - PYTHON: C:\Python27-x64 + - PYTHON: C:\Python35-x64 + - PYTHON: C:\Python36 + - PYTHON: C:\Python36-x64 + +matrix: + fast_finish: true + +install: + - ECHO "Filesystem root:" + - ps: "ls \"C:/\"" + + # If there is a newer build queued for the same PR, cancel this one. + # The AppVeyor 'rollout builds' option is supposed to serve the same + # purpose but is problematic because it tends to cancel builds pushed + # directly to master instead of just PR builds. + # credits: JuliaLang developers. + - ps: if ($env:APPVEYOR_PULL_REQUEST_NUMBER -and $env:APPVEYOR_BUILD_NUMBER -ne ((Invoke-RestMethod ` + https://ci.appveyor.com/api/projects/$env:APPVEYOR_ACCOUNT_NAME/$env:APPVEYOR_PROJECT_SLUG/history?recordsNumber=50).builds | ` + Where-Object pullRequestId -eq $env:APPVEYOR_PULL_REQUEST_NUMBER)[0].buildNumber) { ` + throw "There are newer queued builds for this pull request, failing early." } + + - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" + - "python -m pip install --retries 3 -U pip" + + # Check that we have the expected version and architecture for Python + - "python --version" + - "python -c \"import struct; print(struct.calcsize('P') * 8)\"" + - "pip --version" + + # Get stdint headers needed by tifffile.c. + - "curl https://raw.githubusercontent.com/chemeris/msinttypes/master/inttypes.h -o skimage/external/tifffile/inttypes.h" + - "curl https://raw.githubusercontent.com/chemeris/msinttypes/master/stdint.h -o skimage/external/tifffile/stdint.h" + + # Install the build and runtime dependencies of the project. + # The --pre flag is necessary to grab a SciPy wheel, which is in + # pre-release at the time of writing (03-10-2017) + - pip install --retries 3 --pre -r requirements.txt + - pip install --retries 3 -r requirements/build.txt + - python setup.py bdist_wheel bdist_wininst + - ps: "ls dist" + + # Install the generated wheel package to test it + - "pip install --pre --no-index --find-links dist/ scikit-image" + +# Not a .NET project, we build scikit-image in the install step instead +build: false + +test_script: + ## Build the docs + #- pip install sphinx pytest-runner sphinx-gallery + #- SET PYTHON=%PYTHON%\\python.exe && cd doc && make html + + # Change to a non-source folder to make sure we run the tests on the + # installed library. + - "cd C:\\" + + # Use the Agg backend in Matplotlib + - echo backend:Agg > matplotlibrc + + # Run unit tests with pytest + - pytest -v --pyargs skimage + +artifacts: + # Archive the generated wheel package in the ci.appveyor.com build report. + - path: dist\* + +#on_success: +# - TODO: upload the content of dist/*.whl to a public wheelhouse + +cache: + # Avoid re-downloading large packages + - '%APPDATA%\pip\Cache' diff -Nru skimage-0.13.1/appveyor.yml skimage-0.14.0/appveyor.yml --- skimage-0.13.1/appveyor.yml 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/appveyor.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,80 +0,0 @@ -# AppVeyor.com is a Continuous Integration service to build and run tests under -# Windows - -environment: - global: - # SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the - # /E:ON and /V:ON options are not enabled in the batch script intepreter - # See: http://stackoverflow.com/a/13751649/163740 - CMD_IN_ENV: "cmd /E:ON /V:ON /C .\\tools\\appveyor\\run_with_env.cmd" - - matrix: - - PYTHON: "C:\\Python27-conda32" - PYTHON_VERSION: "2.7" - PYTHON_ARCH: "32" - - # disable other builds until they can be run in parallel - #- PYTHON: "C:\\Python27-conda64" - # PYTHON_VERSION: "2.7" - # PYTHON_ARCH: "64" - - #- PYTHON: "C:\\Python35-conda32" - # PYTHON_VERSION: "3.5" - # PYTHON_ARCH: "32" - - #- PYTHON: "C:\\Python35-conda64" - # PYTHON_VERSION: "3.5" - # PYTHON_ARCH: "64" - -install: - - ECHO "Filesystem root:" - - ps: "ls \"C:/\"" - - # Install Python and all the required packages. - - "powershell ./tools/appveyor/install.ps1" - - # Prepend newly installed Python to the PATH of this build - # (this cannot be done from inside the powershell script as it would require - # to restart the parent CMD process). - - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" - - # Check that we have the expected version and architecture for Python - - "python --version" - - "python -c \"import struct; print(struct.calcsize('P') * 8)\"" - - # Get stdint headers needed by tifffile.c. - - "curl https://raw.githubusercontent.com/chemeris/msinttypes/master/inttypes.h -o skimage/external/tifffile/inttypes.h" - - "curl https://raw.githubusercontent.com/chemeris/msinttypes/master/stdint.h -o skimage/external/tifffile/stdint.h" - - # Install the build and runtime dependencies of the project. - - "%CMD_IN_ENV% pip install -v -r requirements.txt" - - "%CMD_IN_ENV% python setup.py bdist_wheel bdist_wininst" - - ps: "ls dist" - - # Install the generated wheel package to test it - - "pip install --pre --no-index --find-links dist/ scikit-image" - -# Not a .NET project, we build scikit-image in the install step instead -build: false - -test_script: - # Build the docs - - pip install sphinx pytest-runner sphinx-gallery - - SET PYTHON=%PYTHON%\\python.exe && cd doc && make html - - # Change to a non-source folder to make sure we run the tests on the - # installed library. - - "cd C:\\" - - # Use the Agg backend in Matplotlib - - echo backend:Agg > matplotlibrc - - # Run unit tests with nose - - "python -c \"import nose; nose.main()\" -v -s skimage" - -artifacts: - # Archive the generated wheel package in the ci.appveyor.com build report. - - path: dist\* - -#on_success: -# - TODO: upload the content of dist/*.whl to a public wheelhouse diff -Nru skimage-0.13.1/bento.info skimage-0.14.0/bento.info --- skimage-0.13.1/bento.info 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/bento.info 1970-01-01 00:00:00.000000000 +0000 @@ -1,182 +0,0 @@ -Name: scikit-image -Version: 0.13.1 -Summary: Image processing routines for SciPy -Url: http://scikit-image.org -DownloadUrl: http://github.com/scikit-image/scikit-image -Description: Image Processing SciKit - - Image processing algorithms for SciPy, including IO, morphology, filtering, - warping, color manipulation, object detection, etc. - - Please refer to the online documentation at - http://scikit-image.org/ -Maintainer: Stefan van der Walt -MaintainerEmail: stefan@sun.ac.za -License: Modified BSD -Classifiers: - Development Status :: 4 - Beta, - Environment :: Console, - Intended Audience :: Developers, - Intended Audience :: Science/Research, - License :: OSI Approved :: BSD License, - Programming Language :: C, - Programming Language :: Python, - Programming Language :: Python :: 3, - Topic :: Scientific/Engineering, - Operating System :: Microsoft :: Windows, - Operating System :: POSIX, - Operating System :: Unix, - Operating System :: MacOS - -UseBackends: Waf - -Library: - Packages: - skimage, skimage.color, skimage.data, skimage.draw, skimage.exposure, - skimage.feature, skimage.filters, skimage.future, skimage.future.graph, - skimage.graph, skimage.io, - skimage.io._plugins, skimage.measure, skimage.morphology, - skimage.scripts, skimage.restoration, skimage.segmentation, - skimage.transform, skimage.util - Extension: skimage.io._plugins._colormixer - Sources: - skimage/io/_plugins/_colormixer.pyx - Extension: skimage.measure._pnpoly - Sources: - skimage/measure/_pnpoly.pyx - Extension: skimage.measure._find_contours_cy - Sources: - skimage/measure/_find_contours_cy.pyx - Extension: skimage.measure._moments_cy - Sources: - skimage/measure/_moments_cy.pyx - Extension: skimage.measure._marching_cubes_classic_cy - Sources: - skimage/measure/_marching_cubes_classic_cy.pyx - Extension: skimage.measure._marching_cubes_lewiner_cy - Sources: - skimage/measure/_marching_cubes_lewiner_cy.pyx - Extension: skimage.graph._mcp - Sources: - skimage/graph/_mcp.pyx - Extension: skimage.io._plugins._histograms - Sources: - skimage/io/_plugins/_histograms.pyx - Extension: skimage.transform._hough_transform - Sources: - skimage/transform/_hough_transform.pyx - Extension: skimage.filters._ctmf - Sources: - skimage/filters/_ctmf.pyx - Extension: skimage.measure._ccomp - Sources: - skimage/measure/_ccomp.pyx - Extension: skimage.morphology._watershed - Sources: - skimage/morphology/_watershed.pyx - Extension: skimage.morphology._convex_hull - Sources: - skimage/morphology/_convex_hull.pyx - Extension: skimage.draw._draw - Sources: - skimage/draw/_draw.pyx - Extension: skimage.graph._spath - Sources: - skimage/graph/_spath.pyx - Extension: skimage.graph.heap - Sources: - skimage/graph/heap.pyx - Extension: skimage.morphology._greyreconstruct - Sources: - skimage/morphology/_greyreconstruct.pyx - Extension: skimage.feature.censure_cy - Sources: - skimage/feature/censure_cy.pyx - Extension: skimage.feature.orb_cy - Sources: - skimage/feature/orb_cy.pyx - Extension: skimage.feature.brief_cy - Sources: - skimage/feature/brief_cy.pyx - Extension: skimage.feature.corner_cy - Sources: - skimage/feature/corner_cy.pyx - Extension: skimage.feature._hoghistogram - Sources: - skimage/feature/_hoghistogram.pyx - Extension: skimage.feature._texture - Sources: - skimage/feature/_texture.pyx - Extension: skimage._shared.transform - Sources: - skimage/_shared/transform.pyx - Extension: skimage._shared.interpolation - Sources: - skimage/_shared/interpolation.pyx - Extension: skimage.segmentation._slic - Sources: - skimage/segmentation/_slic.pyx - Extension: skimage.segmentation._quickshift_cy - Sources: - skimage/segmentation/_quickshift_cy.pyx - Extension: skimage.morphology._skeletonize_cy - Sources: - skimage/morphology/_skeletonize_cy.pyx - Extension: skimage.morphology._skeletonize_3d_cy - Sources: - skimage/morphology/_skeletonize_3d_cy.pyx - Extension: skimage.transform._radon_transform - Sources: - skimage/transform/_radon_transform.pyx - Extension: skimage.transform._warps_cy - Sources: - skimage/transform/_warps_cy.pyx - Extension: skimage.segmentation._felzenszwalb_cy - Sources: - skimage/segmentation/_felzenszwalb_cy.pyx - Extension: skimage._shared.geometry - Sources: - skimage/_shared/geometry.pyx - Extension: skimage.filters.rank.generic_cy - Sources: - skimage/filters/rank/generic_cy.pyx - Extension: skimage.filters.rank.percentile_cy - Sources: - skimage/filter/rank/percentile_cy.pyx - Extension: skimage.filters.rank.core_cy - Sources: - skimage/filter/rank/core_cy.pyx - Extension: skimage.filters.rank.bilateral_cy - Sources: - skimage/filters/rank/bilateral_cy.pyx - Extension: skimage.restoration._unwrap_1d - Sources: - skimage/restoration/_unwrap_1d.pyx - Extension: skimage.restoration._unwrap_2d - Sources: - skimage/restoration/_unwrap_2d.pyx skimage/exposure/unwrap_2d_ljmu.c - Extension: skimage.restoration._unwrap_3d - Sources: - skimage/restoration/_unwrap_3d.pyx skimage/exposure/unwrap_3d_ljmu.c - Extension: skimage.restoration._denoise_cy - Sources: - skimage/restoration/_denoise_cy.pyx - Extension: skimage.restoration._nl_means_denoising - Sources: - skimage/restoration/_nl_means_denoising.pyx - Extension: skimage.feature._hessian_det_appx - Sources: - skimage/exposure/_hessian_det_appx.pyx - Extension: skimage.future.graph._ncut_cy - Sources: - skimage/future/graph/_ncut_cy.pyx - Extension: skimage.external.tifffile._tifffile - Sources: - skimage/external/tifffile/_tifffile.c - Extension: skimage.transform._seam_carving - Sources: - skimage/transform/_seam_carving.pyx - -Executable: skivi - Module: skimage.scripts.skivi - Function: main diff -Nru skimage-0.13.1/conftest.py skimage-0.14.0/conftest.py --- skimage-0.13.1/conftest.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/conftest.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,19 @@ +# Use legacy numpy printing. This fix is made to keep doctests functional. +# For more info, see https://github.com/scikit-image/scikit-image/pull/2935 . +# TODO: remove this workaround once minimal required numpy is set to 1.14.0 +from distutils.version import LooseVersion as Version +import numpy as np + +if Version(np.__version__) >= Version('1.14'): + np.set_printoptions(legacy='1.13') + +# List of files that pytest should ignore +collect_ignore = ["setup.py", + "skimage/io/_plugins", + "doc/", + "tools/", + "viewer_examples"] +try: + import visvis +except ImportError: + collect_ignore.append("skimage/measure/mc_meta/visual_test.py") diff -Nru skimage-0.13.1/CONTRIBUTING.txt skimage-0.14.0/CONTRIBUTING.txt --- skimage-0.13.1/CONTRIBUTING.txt 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/CONTRIBUTING.txt 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,454 @@ +.. _howto_contribute: + +How to contribute to ``skimage`` +================================ + +Developing Open Source is great fun! Join us on the `scikit-image mailing +list `_ and tell us +which of the following challenges you'd like to solve. + +* Mentoring is available for those new to scientific programming in Python. +* If you're looking for something to implement, you can find a list of + `requested features on GitHub `__. + In addition, you can browse the + `open issues on GitHub `__. +* The technical detail of the `development process`_ is summed up below. + Refer to the :doc:`gitwash ` for a step-by-step tutorial. + +.. contents:: + :local: + +Development process +------------------- + +Here's the long and short of it: + +1. If you are a first-time contributor: + + * Go to `https://github.com/scikit-image/scikit-image + `_ and click the + "fork" button to create your own copy of the project. + + * Clone the project to your local computer:: + + git clone https://github.com/your-username/scikit-image.git + + * Change the directory:: + + cd scikit-image + + * Add the upstream repository:: + + git remote add upstream https://github.com/scikit-image/scikit-image.git + + * Now, you have remote repositories named: + + - ``upstream``, which refers to the ``scikit-image`` repository + - ``origin``, which refers to your personal fork + +2. Develop your contribution: + + * Pull the latest changes from upstream:: + + git checkout master + git pull upstream master + + * Create a branch for the feature you want to work on. Since the + branch name will appear in the merge message, use a sensible name + such as 'transform-speedups':: + + git checkout -b transform-speedups + + * Commit locally as you progress (``git add`` and ``git commit``) + +3. To submit your contribution: + + * Push your changes back to your fork on GitHub:: + + git push origin transform-speedups + + * Enter your GitHub username and password (repeat contributors or advanced + users can remove this step by connecting to GitHub with SSH. See detailed + instructions below if desired). + + * Go to GitHub. The new branch will show up with a green Pull Request + button - click it. + + * If you want, post on the `mailing list + `_ to explain your changes or + to ask for review. + +For a more detailed discussion, read these :doc:`detailed documents +` on how to use Git with ``scikit-image`` +(``_). + +4. Review process: + + * Reviewers (the other developers and interested community members) will + write inline and/or general comments on your Pull Request (PR) to help + you improve its implementation, documentation and style. Every single + developer working on the project has their code reviewed, and we've come + to see it as friendly conversation from which we all learn and the + overall code quality benefits. Therefore, please don't let the review + discourage you from contributing: its only aim is to improve the quality + of project, not to criticize (we are, after all, very grateful for the + time you're donating!). + + * To update your pull request, make your changes on your local repository + and commit. As soon as those changes are pushed up (to the same branch as + before) the pull request will update automatically. + + * `Travis-CI `__, a continuous integration service, + is triggered after each Pull Request update to build the code, run unit + tests, measure code coverage and check coding style (PEP8) of your + branch. The Travis tests must pass before your PR can be merged. If + Travis fails, you can find out why by clicking on the "failed" icon (red + cross) and inspecting the build and test log. + + * A pull request must be approved by two core team members before merging. + +5. Document changes + + If your change introduces any API modifications, please update + ``doc/source/api_changes.txt``. + + If your change introduces a deprecation, add a reminder to ``TODO.txt`` + for the team to remove the deprecated functionality in the future. + +.. note:: + + To reviewers: if it is not obvious from the PR description, add a short + explanation of what a branch did to the merge message and, if closing a + bug, also add "Closes #123" where 123 is the issue number. + + +Divergence between ``upstream master`` and your feature branch +-------------------------------------------------------------- + +If GitHub indicates that the branch of your Pull Request can no longer +be merged automatically, merge the master branch into yours:: + + git fetch upstream master + git merge upstream/master + +If any conflicts occur, they need to be fixed before continuing. See +which files are in conflict using:: + + git status + +Which displays a message like:: + + Unmerged paths: + (use "git add ..." to mark resolution) + + both modified: file_with_conflict.txt + +Inside the conflicted file, you'll find sections like these:: + + <<<<<<< HEAD + The way the text looks in your branch + ======= + The way the text looks in the master branch + >>>>>>> master + +Choose one version of the text that should be kept, and delete the +rest:: + + The way the text looks in your branch + +Now, add the fixed file:: + + git add file_with_conflict.txt + +Once you've fixed all merge conflicts, do:: + + git commit + +.. note:: + + Advanced Git users are encouraged to `rebase instead of merge + `__, + but we squash and merge most PRs either way. + +Guidelines +---------- + +* All code should have tests (see `test coverage`_ below for more details). +* All code should be documented, to the same + `standard `_ as NumPy and SciPy. +* For new functionality, always add an example to the gallery. +* No changes are ever committed without review and approval by two core + team members. Ask on the + `mailing list `_ if + you get no response to your pull request. + **Never merge your own pull request.** +* Examples in the gallery should have a maximum figure width of 8 inches. + +Stylistic Guidelines +-------------------- + +* Set up your editor to remove trailing whitespace. Follow `PEP08 + `__. Check code with pyflakes / flake8. + +* Use numpy data types instead of strings (``np.uint8`` instead of + ``"uint8"``). + +* Use the following import conventions:: + + import numpy as np + import matplotlib.pyplot as plt + from scipy import ndimage as ndi + + cimport numpy as cnp # in Cython code + +* When documenting array parameters, use ``image : (M, N) ndarray`` + and then refer to ``M`` and ``N`` in the docstring, if necessary. + +* Refer to array dimensions as (plane), row, column, not as x, y, z. See + :ref:`Coordinate conventions ` + in the user guide for more information. + +* Functions should support all input image dtypes. Use utility functions such + as ``img_as_float`` to help convert to an appropriate type. The output + format can be whatever is most efficient. This allows us to string together + several functions into a pipeline, e.g.:: + + hough(canny(my_image)) + +* Use ``Py_ssize_t`` as data type for all indexing, shape and size variables + in C/C++ and Cython code. + +* Use relative module imports, i.e. ``from .._shared import xyz`` rather than + ``from skimage._shared import xyz``. + +* Wrap Cython code in a pure Python function, which defines the API. This + improves compatibility with code introspection tools, which are often not + aware of Cython code. + +* For Cython functions, release the GIL whenever possible, using + ``with nogil:``. + + +Testing +------- +``scikit-image`` has an extensive test suite that ensures correct +execution on your system. The test suite has to pass before a pull +request can be merged, and tests should be added to cover any +modifications to the code base. + +We make use of the `pytest `__ +testing framework, with tests located in the various +``skimage/submodule/tests`` folders. + +To use ``pytest``, ensure that Cython extensions are built and that +the library is installed in development mode:: + + $ pip install -e . + +Now, run all tests using:: + + $ PYTHONPATH=. pytest skimage + +Or the tests for a specific submodule:: + + $ PYTHONPATH=. pytest skimage/morphology + +Or tests from a specific file:: + + $ PYTHONPATH=. pytest skimage/morphology/tests/test_grey.py + +Or a single test within that file:: + + $ PYTHONPATH=. pytest skimage/morphology/tests/test_grey.py::test_3d_fallback_black_tophat + +Use ``--doctest-modules`` to run doctests. +For example, run all tests and all doctests using:: + + $ PYTHONPATH=. pytest --doctest-modules skimage + +Test coverage +------------- + +Tests for a module should ideally cover all code in that module, +i.e., statement coverage should be at 100%. + +To measure the test coverage, install +`pytest-cov `__ +(using ``easy_install pytest-cov``) and then run:: + + $ make coverage + +This will print a report with one line for each file in `skimage`, +detailing the test coverage:: + + Name Stmts Exec Cover Missing + ------------------------------------------------------------------------------ + skimage/color/colorconv 77 77 100% + skimage/filter/__init__ 1 1 100% + ... + + +Activate Travis-CI for your fork (optional) +------------------------------------------- + +Travis-CI checks all unittests in the project to prevent breakage. + +Before sending a pull request, you may want to check that Travis-CI +successfully passes all tests. To do so, + +* Go to `Travis-CI `__ and follow the Sign In link at + the top + +* Go to your `profile page `__ and switch on + your scikit-image fork + +It corresponds to steps one and two in +`Travis-CI documentation `__ +(Step three is already done in scikit-image). + +Thus, as soon as you push your code to your fork, it will trigger Travis-CI, +and you will receive an email notification when the process is done. + +Every time Travis is triggered, it also calls on `Codecov +`_ to inspect the current test overage. + + +Building docs +------------- + +To build docs, run ``make`` from the ``doc`` directory. ``make help`` lists +all targets. + +Requirements +~~~~~~~~~~~~ + +`Sphinx `__ and LaTeX are needed to build +the documentation. + +**Sphinx:** + +.. code:: sh + + pip install sphinx + +**LaTeX Ubuntu:** + +.. code:: sh + + sudo apt-get install -qq texlive texlive-latex-extra dvipng + +**LaTeX Mac:** + +Install the full `MacTex `__ installation or +install the smaller +`BasicTex `__ and add *ucs* +and *dvipng* packages: + +.. code:: sh + + sudo tlmgr install ucs dvipng + +Fixing Warnings +~~~~~~~~~~~~~~~ + +- "citation not found: R###" There is probably an underscore after a + reference in the first line of a docstring (e.g. [1]\_). Use this + method to find the source file: $ cd doc/build; grep -rin R#### + +- "Duplicate citation R###, other instance in..."" There is probably a + [2] without a [1] in one of the docstrings + +- Make sure to use pre-sphinxification paths to images (not the + \_images directory) + +Auto-generating dev docs +~~~~~~~~~~~~~~~~~~~~~~~~ + +This set of instructions was used to create +scikit-image/tools/deploy-docs.sh + +- Go to Github account settings -> personal access tokens +- Create a new token with access rights ``public_repo`` and + ``user:email only`` +- Install the travis command line tool: ``gem install travis``. On OSX, + you can get gem via ``brew install ruby``. +- Take then token generated by Github and run + ``travis encrypt GH_TOKEN=`` from inside a scikit-image repo +- Paste the output into the secure: field of ``.travis.yml``. +- The decrypted GH\_TOKEN env var will be available for travis scripts + +https://help.github.com/articles/creating-an-access-token-for-command-line-use/ +http://docs.travis-ci.com/user/encryption-keys/ + +Deprecation cycle +----------------- + +If the behavior of the library has to be changed, a deprecation cycle must be +followed to warn users. + +- a deprecation cycle is *not* necessary when: + + * adding a new function, or + * adding a new keyword argument to the *end* of a function signature, or + * fixing what was buggy behaviour + +- a deprecation cycle is necessary for *any breaking API change*, meaning a + change where the function, invoked with the same arguments, would return a + different result after the change. This includes: + + * changing the order of arguments or keyword arguments, or + * adding arguments or keyword arguments to a function, or + * changing a function's name or submodule, or + * changing the default value of a function's arguments. + +Usually, our policy is to put in place a deprecation cycle over two releases. + +For the sake of illustration, we consider the modification of a default value in +a function signature. In version N (therefore, next release will be N+1), we +have + +.. code-block:: python + + def a_function(image, rescale=True): + out = do_something(image, rescale=rescale) + return out + +that has to be changed to + +.. code-block:: python + + def a_function(image, rescale=None): + if rescale is None: + warn('The default value of rescale will change to `False` in version N+3') + rescale = True + out = do_something(image, rescale=rescale) + return out + +and in version N+3 + +.. code-block:: python + + def a_function(image, rescale=False): + out = do_something(image, rescale=rescale) + return out + +Here is the process for a 2-release deprecation cycle: + +- In the signature, set default to `None`, and modify the docstring to specify + that it's `True`. +- In the function, _if_ rescale is set to `None`, set to `True` and warn that the + default will change to `False` in version N+3. +- In ``doc/release/release_dev.rst``, under deprecations, add "In + `a_function`, the `rescale` argument will default to `False` in N+3." +- In ``TODO.txt``, create an item in the section related to version N+3 and write + "change rescale default to False in a_function". + +Note that the 2-release deprecation cycle is not a strict rule and in some +cases, the developers can agree on a different procedure upon justification +(like when we can't detect the change, or it involves moving or deleting an +entire function for example). + +Bugs +---- + +Please `report bugs on GitHub `_. diff -Nru skimage-0.13.1/CONTRIBUTORS.txt skimage-0.14.0/CONTRIBUTORS.txt --- skimage-0.13.1/CONTRIBUTORS.txt 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/CONTRIBUTORS.txt 2018-05-29 01:27:44.000000000 +0000 @@ -1,4 +1,4 @@ -- Stefan van der Walt +- Stefan van der Walt Project coordination - Nicolas Pinto @@ -247,3 +247,9 @@ - Scott Sievert Wavelet denoising + +- Gleb Goussarov + Chan-Vese Segmentation + +- Kevin Mader + Montage improvements diff -Nru skimage-0.13.1/.coveragerc skimage-0.14.0/.coveragerc --- skimage-0.13.1/.coveragerc 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/.coveragerc 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -# Configuration for coverage.py - -[run] -branch = True -source = skimage -include = */skimage/* -omit = - */setup.py - */skimage/external/* - -[report] -exclude_lines = - def __repr__ - if __name__ == .__main__.: diff -Nru skimage-0.13.1/debian/changelog skimage-0.14.0/debian/changelog --- skimage-0.13.1/debian/changelog 2018-04-24 07:16:32.000000000 +0000 +++ skimage-0.14.0/debian/changelog 2018-06-27 07:36:55.000000000 +0000 @@ -1,3 +1,24 @@ +skimage (0.14.0-0ubuntu2) cosmic; urgency=medium + + * Patch upstream requirements so that dask won't become a Depends. + (because python-dask is not available) + * Add pytest to autopkgtest dependencies. + * Allow outputs to stderr during autopkgtest. + + -- Mo Zhou Wed, 27 Jun 2018 07:36:55 +0000 + +skimage (0.14.0-0ubuntu1) cosmic; urgency=medium + + * No-change backport to cosmic + + -- Graham Inggs Tue, 26 Jun 2018 08:06:56 +0000 + +skimage (0.14.0-1) UNRELEASED; urgency=medium + + * New upstream version 0.14.0 + + -- Mo Zhou Mon, 25 Jun 2018 10:50:12 +0000 + skimage (0.13.1-3) unstable; urgency=medium * Team upload. diff -Nru skimage-0.13.1/debian/control skimage-0.14.0/debian/control --- skimage-0.13.1/debian/control 2018-04-24 07:16:32.000000000 +0000 +++ skimage-0.14.0/debian/control 2018-06-26 08:05:47.000000000 +0000 @@ -15,18 +15,21 @@ python-numpy, python-numpydoc, python-pil, + python-pytest, python-pywt, python-scipy, python-setuptools, python-six (>= 1.4), python3-sphinx (>= 1.3.0), python3-all-dev, + python3-dask, python3-matplotlib, python3-networkx, python3-nose, python3-numpy, python3-numpydoc, python3-pil, + python3-pytest, python3-pywt, python3-scipy, python3-setuptools, diff -Nru skimage-0.13.1/debian/patches/deb_make_use_py3.patch skimage-0.14.0/debian/patches/deb_make_use_py3.patch --- skimage-0.13.1/debian/patches/deb_make_use_py3.patch 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/debian/patches/deb_make_use_py3.patch 2018-06-26 08:05:47.000000000 +0000 @@ -0,0 +1,14 @@ +--- a/doc/Makefile ++++ b/doc/Makefile +@@ -2,9 +2,9 @@ + # + + # You can set these variables from the command line. +-PYTHON ?= python ++PYTHON ?= python3 + SPHINXOPTS ?= -j 1 +-SPHINXBUILD ?= python $(shell which sphinx-build) ++SPHINXBUILD ?= python3 /usr/share/sphinx/scripts/python3/sphinx-build + SPHINXCACHE ?= build/doctrees + PAPER ?= + diff -Nru skimage-0.13.1/debian/patches/deb_no_dask_in_setup skimage-0.14.0/debian/patches/deb_no_dask_in_setup --- skimage-0.13.1/debian/patches/deb_no_dask_in_setup 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/debian/patches/deb_no_dask_in_setup 2018-06-27 07:36:55.000000000 +0000 @@ -0,0 +1,10 @@ +diff --git a/requirements/default.txt b/requirements/default.txt +index 2e080079..21c23c2b 100644 +--- a/requirements/default.txt ++++ b/requirements/default.txt +@@ -5,5 +5,4 @@ networkx>=1.8 + six>=1.10.0 + pillow>=4.3.0 + PyWavelets>=0.4.0 +-dask[array]>=0.9.0 + cloudpickle>=0.2.1 diff -Nru skimage-0.13.1/debian/patches/deb_py2_dask_impossible.patch skimage-0.14.0/debian/patches/deb_py2_dask_impossible.patch --- skimage-0.13.1/debian/patches/deb_py2_dask_impossible.patch 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/debian/patches/deb_py2_dask_impossible.patch 2018-06-26 08:05:47.000000000 +0000 @@ -0,0 +1,14 @@ +--- a/skimage/restoration/_cycle_spin.py ++++ b/skimage/restoration/_cycle_spin.py +@@ -3,7 +3,10 @@ from __future__ import division + from itertools import product + + import numpy as np +-import dask ++try: ++ import dask ++except ImportError as e: ++ print("Could not import 'dask'. Please install using 'pip install dask'") + + + def _generate_shifts(ndim, multichannel, max_shifts, shift_steps=1): diff -Nru skimage-0.13.1/debian/patches/fix-doc-links.patch skimage-0.14.0/debian/patches/fix-doc-links.patch --- skimage-0.13.1/debian/patches/fix-doc-links.patch 2018-04-24 07:16:32.000000000 +0000 +++ skimage-0.14.0/debian/patches/fix-doc-links.patch 2018-06-26 08:05:47.000000000 +0000 @@ -7,12 +7,10 @@ +++ b/doc/source/_templates/navbar.html @@ -1,7 +1,7 @@ -
  • Download
  • --
  • Gallery
  • --
  • Documentation
  • --
  • Community Guidelines
  • +
  • Download
  • -+
  • Gallery
  • -+
  • Documentation
  • +
  • Gallery
  • +
  • Documentation
  • +-
  • Community Guidelines
  • +
  • Community Guidelines
  • diff -Nru skimage-0.13.1/debian/patches/series skimage-0.14.0/debian/patches/series --- skimage-0.13.1/debian/patches/series 2018-04-24 07:16:32.000000000 +0000 +++ skimage-0.14.0/debian/patches/series 2018-06-27 07:36:55.000000000 +0000 @@ -1,6 +1,9 @@ +deb_no_dask_in_setup deb_no_sphinx_galery fix-doc-links.patch doc-privacy.patch skip-failing-test-on-big-endian.patch skip-failing-test-on-i386.patch -remove_deprecated_box-forced.patch +#remove_deprecated_box-forced.patch +deb_make_use_py3.patch +deb_py2_dask_impossible.patch diff -Nru skimage-0.13.1/debian/patches/skip-failing-test-on-big-endian.patch skimage-0.14.0/debian/patches/skip-failing-test-on-big-endian.patch --- skimage-0.13.1/debian/patches/skip-failing-test-on-big-endian.patch 2018-04-24 07:16:32.000000000 +0000 +++ skimage-0.14.0/debian/patches/skip-failing-test-on-big-endian.patch 2018-06-26 08:05:47.000000000 +0000 @@ -4,23 +4,20 @@ --- a/skimage/io/tests/test_pil.py +++ b/skimage/io/tests/test_pil.py -@@ -1,8 +1,10 @@ - import os.path - import numpy as np +@@ -1,5 +1,7 @@ + import os +import sys - from numpy.testing import ( - assert_array_equal, assert_array_almost_equal, assert_raises, - assert_allclose, run_module_suite) + import numpy as np +from numpy.testing.decorators import skipif - + from six import BytesIO from tempfile import NamedTemporaryFile -@@ -220,6 +222,8 @@ - color_check('pil', 'bmp') +@@ -255,6 +257,8 @@ def test_all_color(): + color_check('pil', 'bmp') +# https://github.com/scikit-image/scikit-image/issues/2120 +@skipif(sys.byteorder == 'big') def test_all_mono(): - mono_check('pil') - + with expected_warnings(['.* is a boolean image']): + mono_check('pil') diff -Nru skimage-0.13.1/debian/patches/skip-failing-test-on-i386.patch skimage-0.14.0/debian/patches/skip-failing-test-on-i386.patch --- skimage-0.13.1/debian/patches/skip-failing-test-on-i386.patch 2018-04-24 07:16:32.000000000 +0000 +++ skimage-0.14.0/debian/patches/skip-failing-test-on-i386.patch 2018-06-26 08:05:47.000000000 +0000 @@ -2,18 +2,18 @@ Last-Update: 2017-08-17 22:42:30 +0200 Description: Skip failing test on i386 ---- a/skimage/feature/tests/test_orb.py -+++ b/skimage/feature/tests/test_orb.py -@@ -1,6 +1,8 @@ +Index: skimage/skimage/feature/tests/test_orb.py +=================================================================== +--- skimage.orig/skimage/feature/tests/test_orb.py ++++ skimage/skimage/feature/tests/test_orb.py +@@ -1,4 +1,6 @@ +import os import numpy as np - from numpy.testing import (assert_equal, assert_almost_equal, run_module_suite, - assert_raises) +from numpy.testing.decorators import skipif + from skimage._shared.testing import assert_equal, assert_almost_equal from skimage.feature import ORB - from skimage import data - from skimage._shared.testing import test_parallel -@@ -69,6 +71,8 @@ + from skimage._shared import testing +@@ -69,6 +71,8 @@ def test_keypoints_orb_less_than_desired assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1]) @@ -22,9 +22,11 @@ def test_descriptor_orb(): detector_extractor = ORB(fast_n=12, fast_threshold=0.20) ---- a/skimage/filters/rank/tests/test_rank.py -+++ b/skimage/filters/rank/tests/test_rank.py -@@ -10,6 +10,7 @@ +Index: skimage/skimage/filters/rank/tests/test_rank.py +=================================================================== +--- skimage.orig/skimage/filters/rank/tests/test_rank.py ++++ skimage/skimage/filters/rank/tests/test_rank.py +@@ -11,6 +11,7 @@ from skimage.filters import rank from skimage._shared._warnings import expected_warnings from skimage._shared.testing import test_parallel @@ -32,7 +34,7 @@ class TestRank(): def setup(self): -@@ -82,10 +83,12 @@ +@@ -83,10 +84,12 @@ class TestRank(): rank.tophat(self.image, selem)) assert_equal(refs["noise_filter"], rank.noise_filter(self.image, selem)) diff -Nru skimage-0.13.1/debian/rules skimage-0.14.0/debian/rules --- skimage-0.13.1/debian/rules 2018-04-24 07:16:32.000000000 +0000 +++ skimage-0.14.0/debian/rules 2018-06-26 08:05:47.000000000 +0000 @@ -46,7 +46,7 @@ ifneq (,$(findstring -a,$(DH_INTERNAL_OPTIONS))) : # not building documentation in -a else - export PYTHONPATH=$(PKG_TMP)/usr/lib/python$(PYVER)/dist-packages; \ + export PYTHONPATH=$(PKG_TMP)/usr/lib/python3/dist-packages; \ cd doc; test -d build/html || $(MAKE) html endif endif @@ -83,11 +83,14 @@ : # no documentation in -a -- surprised that sphinxdoc doesn't know that else dh_sphinxdoc -ppython-skimage-doc + find debian -type f -exec sed -i -e \ + 's@https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js@file:///usr/share/javascript/mathjax/MathJax.js@g' \ + '{}' + endif endif override_dh_installdocs: - dh_installdocs -A CONTRIBUTORS.txt README.md .github/CONTRIBUTING.txt TASKS.txt + dh_installdocs -A CONTRIBUTORS.txt README.md CONTRIBUTING.txt TODO.txt dh_link -ppython-skimage-doc /usr/share/twitter-bootstrap/files/js/bootstrap.min.js \ usr/share/doc/python-skimage-doc/html/_static/js/bootstrap.min.js dh_link -ppython-skimage-doc /usr/share/twitter-bootstrap/files/css/bootstrap.min.css \ diff -Nru skimage-0.13.1/debian/tests/control skimage-0.14.0/debian/tests/control --- skimage-0.13.1/debian/tests/control 2018-04-24 07:16:32.000000000 +0000 +++ skimage-0.14.0/debian/tests/control 2018-06-27 07:36:55.000000000 +0000 @@ -1,5 +1,7 @@ Tests: python2 -Depends: python-all, python-nose, python-skimage, python-pil | python-imaging, python-matplotlib, python-tk, python-networkx, xauth, xvfb +Depends: python-all, python-nose, python-skimage, python-pil | python-imaging, python-matplotlib, python-tk, python-networkx, xauth, xvfb, python-pytest +Restrictions: allow-stderr Tests: python3 -Depends: python3-all, python3-nose, python3-skimage, python3-pil | python3-imaging, python3-matplotlib, python3-tk, python3-networkx, xauth, xvfb +Depends: python3-all, python3-nose, python3-skimage, python3-pil | python3-imaging, python3-matplotlib, python3-tk, python3-networkx, xauth, xvfb, python3-pytest, python3-dask +Restrictions: allow-stderr diff -Nru skimage-0.13.1/debian/tests/python2 skimage-0.14.0/debian/tests/python2 --- skimage-0.13.1/debian/tests/python2 2018-04-24 07:16:32.000000000 +0000 +++ skimage-0.14.0/debian/tests/python2 2018-06-26 08:05:47.000000000 +0000 @@ -1,19 +1,21 @@ #!/bin/sh -set -efu +set -efux pys="$(pyversions -rv 2>/dev/null)" pkgbuild=${pkgbuild:-no} +keyword="not test_cycle_spinning_num_workers and not test_cycle_spinning_multichannel" srcdir=$PWD for py in $pys; do echo "=== python$py ===" if [ "$pkgbuild" = "yes" ]; then - export PYTHONPATH="$srcdir/debian/tmp/usr/lib/python$py/dist-packages" + module="$srcdir/debian/tmp/usr/lib/python$py/dist-packages/skimage" cd "$srcdir/build/" else + module="/usr/lib/python$py/dist-packages/skimage" cd "$ADTTMP" fi - xvfb-run -a python$py /usr/bin/nosetests -s -v --exclude test_update_on_save skimage 2>&1 + xvfb-run -a python$py /usr/bin/pytest -s -v -k "$keyword" $module 2>&1 done diff -Nru skimage-0.13.1/debian/tests/python3 skimage-0.14.0/debian/tests/python3 --- skimage-0.13.1/debian/tests/python3 2018-04-24 07:16:32.000000000 +0000 +++ skimage-0.14.0/debian/tests/python3 2018-06-26 08:05:47.000000000 +0000 @@ -9,11 +9,12 @@ for py in $pys; do echo "=== python$py ===" if [ "$pkgbuild" = "yes" ]; then - export PYTHONPATH="$srcdir/debian/tmp/usr/lib/python3/dist-packages" + module="$srcdir/debian/tmp/usr/lib/python3/dist-packages/skimage" cd "$srcdir/build/" else + module="/usr/lib/python3/dist-packages/skimage" cd "$ADTTMP" fi - xvfb-run -apython$py /usr/bin/nosetests3 -s -v skimage --exclude test_update_on_save 2>&1 + xvfb-run -apython$py /usr/bin/pytest-3 -s -v $module 2>&1 done diff -Nru skimage-0.13.1/DEPENDS.txt skimage-0.14.0/DEPENDS.txt --- skimage-0.13.1/DEPENDS.txt 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/DEPENDS.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -Build Requirements ------------------- -* `Python >= 2.7 `__ -* `Numpy >= 1.11 `__ -* `Cython >= 0.23 `__ -* `Six >=1.7.3 `__ -* `SciPy >=0.17.0 `__ -* `numpydoc >=0.6 `__ - -Runtime requirements --------------------- -* `Python >= 2.7 `__ -* `Numpy >= 1.11 `__ -* `SciPy >= 0.17.0 `__ -* `Matplotlib >= 1.3.1 `__ -* `NetworkX >= 1.8 `__ -* `Six >=1.7.3 `__ -* `Pillow >= 2.1.0 `__ - (or `PIL `__) -* `PyWavelets>=0.4.0 `__ - -You can use pip to automatically install the runtime dependencies as follows:: - - $ pip install -r requirements.txt - -Optional Requirements ---------------------- -You can use this scikit with the basic requirements listed above, but some -functionality is only available with the following installed: - -* `dask[array] >= 0.5.0 `__. - For parallel computation using `skimage.util.apply_parallel`. - -* `PyQt4 `__ - The ``qt`` plugin that provides ``imshow(x, fancy=True)`` and `skivi`. - -* `FreeImage `__ - The ``freeimage`` plugin provides support for reading various types of - image file formats, including multi-page TIFFs. - -* `PyAMG `__ - The ``pyamg`` module is used for the fast `cg_mg` mode of random - walker segmentation. - -* `Astropy `__ provides FITS io capability. - -* `SimpleITK `__ - Optional io plugin providing a wide variety of `formats `__. - including specialized formats using in medical imaging. - -* `imread `__ - Optional io plugin providing most standard `formats `__. - - -Testing requirements --------------------- -* `Nose `__ - A Python Unit Testing Framework -* `Coverage.py `__ - A tool that generates a unit test code coverage report - - -Documentation requirements --------------------------- - -`sphinx >= 1.3 `_ is required to build the -documentation. diff -Nru skimage-0.13.1/doc/examples/color_exposure/plot_adapt_rgb.py skimage-0.14.0/doc/examples/color_exposure/plot_adapt_rgb.py --- skimage-0.13.1/doc/examples/color_exposure/plot_adapt_rgb.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/color_exposure/plot_adapt_rgb.py 2018-05-29 01:27:44.000000000 +0000 @@ -46,18 +46,14 @@ image = data.astronaut() -fig = plt.figure(figsize=(14, 7)) -ax_each = fig.add_subplot(121, adjustable='box-forced') -ax_hsv = fig.add_subplot(122, sharex=ax_each, sharey=ax_each, - adjustable='box-forced') +fig, (ax_each, ax_hsv) = plt.subplots(ncols=2, figsize=(14, 7)) -# We use 1 - sobel_each(image) -# but this will not work if image is not normalized +# We use 1 - sobel_each(image) but this won't work if image is not normalized ax_each.imshow(rescale_intensity(1 - sobel_each(image))) ax_each.set_xticks([]), ax_each.set_yticks([]) ax_each.set_title("Sobel filter computed\n on individual RGB channels") -# We use 1 - sobel_hsv(image) but this will not work if image is not normalized +# We use 1 - sobel_hsv(image) but this won't work if image is not normalized ax_hsv.imshow(rescale_intensity(1 - sobel_hsv(image))) ax_hsv.set_xticks([]), ax_hsv.set_yticks([]) ax_hsv.set_title("Sobel filter computed\n on (V)alue converted image (HSV)") @@ -102,12 +98,10 @@ def sobel_gray(image): return filters.sobel(image) -fig = plt.figure(figsize=(7, 7)) -ax = fig.add_subplot(111, sharex=ax_each, sharey=ax_each, - adjustable='box-forced') -# We use 1 - sobel_gray(image) -# but this will not work if image is not normalized +fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(7, 7)) + +# We use 1 - sobel_gray(image) but this won't work if image is not normalized ax.imshow(rescale_intensity(1 - sobel_gray(image)), cmap=plt.cm.gray) ax.set_xticks([]), ax.set_yticks([]) ax.set_title("Sobel filter computed\n on the converted grayscale image") diff -Nru skimage-0.13.1/doc/examples/color_exposure/plot_equalize.py skimage-0.14.0/doc/examples/color_exposure/plot_equalize.py --- skimage-0.13.1/doc/examples/color_exposure/plot_equalize.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/color_exposure/plot_equalize.py 2018-05-29 01:27:44.000000000 +0000 @@ -29,28 +29,27 @@ matplotlib.rcParams['font.size'] = 8 -def plot_img_and_hist(img, axes, bins=256): +def plot_img_and_hist(image, axes, bins=256): """Plot an image along with its histogram and cumulative histogram. """ - img = img_as_float(img) + image = img_as_float(image) ax_img, ax_hist = axes ax_cdf = ax_hist.twinx() # Display image - ax_img.imshow(img, cmap=plt.cm.gray) + ax_img.imshow(image, cmap=plt.cm.gray) ax_img.set_axis_off() - ax_img.set_adjustable('box-forced') # Display histogram - ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black') + ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black') ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0)) ax_hist.set_xlabel('Pixel intensity') ax_hist.set_xlim(0, 1) ax_hist.set_yticks([]) # Display cumulative distribution - img_cdf, bins = exposure.cumulative_distribution(img, bins) + img_cdf, bins = exposure.cumulative_distribution(image, bins) ax_cdf.plot(bins, img_cdf, 'r') ax_cdf.set_yticks([]) diff -Nru skimage-0.13.1/doc/examples/color_exposure/plot_ihc_color_separation.py skimage-0.14.0/doc/examples/color_exposure/plot_ihc_color_separation.py --- skimage-0.13.1/doc/examples/color_exposure/plot_ihc_color_separation.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/color_exposure/plot_ihc_color_separation.py 2018-05-29 01:27:44.000000000 +0000 @@ -35,8 +35,7 @@ ihc_rgb = data.immunohistochemistry() ihc_hed = rgb2hed(ihc_rgb) -fig, axes = plt.subplots(2, 2, figsize=(7, 6), sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) +fig, axes = plt.subplots(2, 2, figsize=(7, 6), sharex=True, sharey=True) ax = axes.ravel() ax[0].imshow(ihc_rgb) @@ -61,7 +60,6 @@ # Now we can easily manipulate the hematoxylin and DAB "channels": import numpy as np - from skimage.exposure import rescale_intensity # Rescale hematoxylin and DAB signals and give them a fluorescence look @@ -70,7 +68,7 @@ zdh = np.dstack((np.zeros_like(h), d, h)) fig = plt.figure() -axis = plt.subplot(1, 1, 1, sharex=ax[0], sharey=ax[0], adjustable='box-forced') +axis = plt.subplot(1, 1, 1, sharex=ax[0], sharey=ax[0]) axis.imshow(zdh) axis.set_title("Stain separated image (rescaled)") axis.axis('off') diff -Nru skimage-0.13.1/doc/examples/color_exposure/plot_local_equalize.py skimage-0.14.0/doc/examples/color_exposure/plot_local_equalize.py --- skimage-0.13.1/doc/examples/color_exposure/plot_local_equalize.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/color_exposure/plot_local_equalize.py 2018-05-29 01:27:44.000000000 +0000 @@ -34,7 +34,7 @@ matplotlib.rcParams['font.size'] = 9 -def plot_img_and_hist(img, axes, bins=256): +def plot_img_and_hist(image, axes, bins=256): """Plot an image along with its histogram and cumulative histogram. """ @@ -42,19 +42,19 @@ ax_cdf = ax_hist.twinx() # Display image - ax_img.imshow(img, cmap=plt.cm.gray) + ax_img.imshow(image, cmap=plt.cm.gray) ax_img.set_axis_off() # Display histogram - ax_hist.hist(img.ravel(), bins=bins) + ax_hist.hist(image.ravel(), bins=bins) ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0)) ax_hist.set_xlabel('Pixel intensity') - xmin, xmax = dtype_range[img.dtype.type] + xmin, xmax = dtype_range[image.dtype.type] ax_hist.set_xlim(xmin, xmax) # Display cumulative distribution - img_cdf, bins = exposure.cumulative_distribution(img, bins) + img_cdf, bins = exposure.cumulative_distribution(image, bins) ax_cdf.plot(bins, img_cdf, 'r') return ax_img, ax_hist, ax_cdf @@ -74,11 +74,9 @@ # Display results fig = plt.figure(figsize=(8, 5)) axes = np.zeros((2, 3), dtype=np.object) -axes[0, 0] = plt.subplot(2, 3, 1, adjustable='box-forced') -axes[0, 1] = plt.subplot(2, 3, 2, sharex=axes[0, 0], sharey=axes[0, 0], - adjustable='box-forced') -axes[0, 2] = plt.subplot(2, 3, 3, sharex=axes[0, 0], sharey=axes[0, 0], - adjustable='box-forced') +axes[0, 0] = plt.subplot(2, 3, 1) +axes[0, 1] = plt.subplot(2, 3, 2, sharex=axes[0, 0], sharey=axes[0, 0]) +axes[0, 2] = plt.subplot(2, 3, 3, sharex=axes[0, 0], sharey=axes[0, 0]) axes[1, 0] = plt.subplot(2, 3, 4) axes[1, 1] = plt.subplot(2, 3, 5) axes[1, 2] = plt.subplot(2, 3, 6) diff -Nru skimage-0.13.1/doc/examples/color_exposure/plot_log_gamma.py skimage-0.14.0/doc/examples/color_exposure/plot_log_gamma.py --- skimage-0.13.1/doc/examples/color_exposure/plot_log_gamma.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/color_exposure/plot_log_gamma.py 2018-05-29 01:27:44.000000000 +0000 @@ -17,27 +17,27 @@ matplotlib.rcParams['font.size'] = 8 -def plot_img_and_hist(img, axes, bins=256): +def plot_img_and_hist(image, axes, bins=256): """Plot an image along with its histogram and cumulative histogram. """ - img = img_as_float(img) + image = img_as_float(image) ax_img, ax_hist = axes ax_cdf = ax_hist.twinx() # Display image - ax_img.imshow(img, cmap=plt.cm.gray) + ax_img.imshow(image, cmap=plt.cm.gray) ax_img.set_axis_off() # Display histogram - ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black') + ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black') ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0)) ax_hist.set_xlabel('Pixel intensity') ax_hist.set_xlim(0, 1) ax_hist.set_yticks([]) # Display cumulative distribution - img_cdf, bins = exposure.cumulative_distribution(img, bins) + img_cdf, bins = exposure.cumulative_distribution(image, bins) ax_cdf.plot(bins, img_cdf, 'r') ax_cdf.set_yticks([]) @@ -56,11 +56,9 @@ # Display results fig = plt.figure(figsize=(8, 5)) axes = np.zeros((2, 3), dtype=np.object) -axes[0, 0] = plt.subplot(2, 3, 1, adjustable='box-forced') -axes[0, 1] = plt.subplot(2, 3, 2, sharex=axes[0, 0], sharey=axes[0, 0], - adjustable='box-forced') -axes[0, 2] = plt.subplot(2, 3, 3, sharex=axes[0, 0], sharey=axes[0, 0], - adjustable='box-forced') +axes[0, 0] = plt.subplot(2, 3, 1) +axes[0, 1] = plt.subplot(2, 3, 2, sharex=axes[0, 0], sharey=axes[0, 0]) +axes[0, 2] = plt.subplot(2, 3, 3, sharex=axes[0, 0], sharey=axes[0, 0]) axes[1, 0] = plt.subplot(2, 3, 4) axes[1, 1] = plt.subplot(2, 3, 5) axes[1, 2] = plt.subplot(2, 3, 6) diff -Nru skimage-0.13.1/doc/examples/color_exposure/plot_regional_maxima.py skimage-0.14.0/doc/examples/color_exposure/plot_regional_maxima.py --- skimage-0.13.1/doc/examples/color_exposure/plot_regional_maxima.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/color_exposure/plot_regional_maxima.py 2018-05-29 01:27:44.000000000 +0000 @@ -44,17 +44,14 @@ ax0.imshow(image, cmap='gray') ax0.set_title('original image') ax0.axis('off') -ax0.set_adjustable('box-forced') ax1.imshow(dilated, vmin=image.min(), vmax=image.max(), cmap='gray') ax1.set_title('dilated') ax1.axis('off') -ax1.set_adjustable('box-forced') ax2.imshow(image - dilated, cmap='gray') ax2.set_title('image - dilated') ax2.axis('off') -ax2.set_adjustable('box-forced') fig.tight_layout() diff -Nru skimage-0.13.1/doc/examples/color_exposure/plot_tinting_grayscale_images.py skimage-0.14.0/doc/examples/color_exposure/plot_tinting_grayscale_images.py --- skimage-0.13.1/doc/examples/color_exposure/plot_tinting_grayscale_images.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/color_exposure/plot_tinting_grayscale_images.py 2018-05-29 01:27:44.000000000 +0000 @@ -28,11 +28,10 @@ red_multiplier = [1, 0, 0] yellow_multiplier = [1, 1, 0] -fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), sharex=True, sharey=True) +fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), + sharex=True, sharey=True) ax1.imshow(red_multiplier * image) ax2.imshow(yellow_multiplier * image) -ax1.set_adjustable('box-forced') -ax2.set_adjustable('box-forced') ###################################################################### # In many cases, dealing with RGB values may not be ideal. Because of that, @@ -105,7 +104,6 @@ tinted_image = colorize(image, hue, saturation=0.3) ax.imshow(tinted_image, vmin=0, vmax=1) ax.set_axis_off() - ax.set_adjustable('box-forced') fig.tight_layout() ###################################################################### @@ -134,11 +132,10 @@ masked_image = image.copy() masked_image[textured_regions, :] *= red_multiplier -fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(8, 4), sharex=True, sharey=True) +fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(8, 4), + sharex=True, sharey=True) ax1.imshow(sliced_image) ax2.imshow(masked_image) -ax1.set_adjustable('box-forced') -ax2.set_adjustable('box-forced') plt.show() diff -Nru skimage-0.13.1/doc/examples/edges/plot_active_contours.py skimage-0.14.0/doc/examples/edges/plot_active_contours.py --- skimage-0.13.1/doc/examples/edges/plot_active_contours.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/edges/plot_active_contours.py 2018-05-29 01:27:44.000000000 +0000 @@ -31,15 +31,6 @@ from skimage.filters import gaussian from skimage.segmentation import active_contour -# Test scipy version, since active contour is only possible -# with recent scipy version -import scipy -split_version = scipy.__version__.split('.') -if not(split_version[-1].isdigit()): # Remove dev string if present - split_version.pop() -scipy_version = list(map(int, split_version)) -new_scipy = scipy_version[0] > 0 or \ - (scipy_version[0] == 0 and scipy_version[1] >= 14) img = data.astronaut() img = rgb2gray(img) @@ -49,23 +40,15 @@ y = 100 + 100*np.sin(s) init = np.array([x, y]).T -if not new_scipy: - print('You are using an old version of scipy. ' - 'Active contours is implemented for scipy versions ' - '0.14.0 and above.') - -if new_scipy: - snake = active_contour(gaussian(img, 3), - init, alpha=0.015, beta=10, gamma=0.001) - - fig = plt.figure(figsize=(7, 7)) - ax = fig.add_subplot(111) - plt.gray() - ax.imshow(img) - ax.plot(init[:, 0], init[:, 1], '--r', lw=3) - ax.plot(snake[:, 0], snake[:, 1], '-b', lw=3) - ax.set_xticks([]), ax.set_yticks([]) - ax.axis([0, img.shape[1], img.shape[0], 0]) +snake = active_contour(gaussian(img, 3), + init, alpha=0.015, beta=10, gamma=0.001) + +fig, ax = plt.subplots(figsize=(7, 7)) +ax.imshow(img, cmap=plt.cm.gray) +ax.plot(init[:, 0], init[:, 1], '--r', lw=3) +ax.plot(snake[:, 0], snake[:, 1], '-b', lw=3) +ax.set_xticks([]), ax.set_yticks([]) +ax.axis([0, img.shape[1], img.shape[0], 0]) ###################################################################### # Here we initialize a straight line between two points, `(5, 136)` and @@ -79,17 +62,14 @@ y = np.linspace(136, 50, 100) init = np.array([x, y]).T -if new_scipy: - snake = active_contour(gaussian(img, 1), init, bc='fixed', - alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1) - - fig = plt.figure(figsize=(9, 5)) - ax = fig.add_subplot(111) - plt.gray() - ax.imshow(img) - ax.plot(init[:, 0], init[:, 1], '--r', lw=3) - ax.plot(snake[:, 0], snake[:, 1], '-b', lw=3) - ax.set_xticks([]), ax.set_yticks([]) - ax.axis([0, img.shape[1], img.shape[0], 0]) +snake = active_contour(gaussian(img, 1), init, bc='fixed', + alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1) + +fig, ax = plt.subplots(figsize=(9, 5)) +ax.imshow(img, cmap=plt.cm.gray) +ax.plot(init[:, 0], init[:, 1], '--r', lw=3) +ax.plot(snake[:, 0], snake[:, 1], '-b', lw=3) +ax.set_xticks([]), ax.set_yticks([]) +ax.axis([0, img.shape[1], img.shape[0], 0]) plt.show() diff -Nru skimage-0.13.1/doc/examples/edges/plot_circular_elliptical_hough_transform.py skimage-0.14.0/doc/examples/edges/plot_circular_elliptical_hough_transform.py --- skimage-0.13.1/doc/examples/edges/plot_circular_elliptical_hough_transform.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/edges/plot_circular_elliptical_hough_transform.py 2018-05-29 01:27:44.000000000 +0000 @@ -126,9 +126,8 @@ edges = color.gray2rgb(img_as_ubyte(edges)) edges[cy, cx] = (250, 0, 0) -fig2, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(8, 4), sharex=True, - sharey=True, - subplot_kw={'adjustable':'box-forced'}) +fig2, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(8, 4), + sharex=True, sharey=True) ax1.set_title('Original picture') ax1.imshow(image_rgb) diff -Nru skimage-0.13.1/doc/examples/edges/plot_line_hough_transform.py skimage-0.14.0/doc/examples/edges/plot_line_hough_transform.py --- skimage-0.13.1/doc/examples/edges/plot_line_hough_transform.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/edges/plot_line_hough_transform.py 2018-05-29 01:27:44.000000000 +0000 @@ -76,8 +76,7 @@ h, theta, d = hough_line(image) # Generating figure 1 -fig, axes = plt.subplots(1, 3, figsize=(15, 6), - subplot_kw={'adjustable': 'box-forced'}) +fig, axes = plt.subplots(1, 3, figsize=(15, 6)) ax = axes.ravel() ax[0].imshow(image, cmap=cm.gray) @@ -131,7 +130,6 @@ for a in ax: a.set_axis_off() - a.set_adjustable('box-forced') plt.tight_layout() plt.show() diff -Nru skimage-0.13.1/doc/examples/edges/plot_marching_cubes.py skimage-0.14.0/doc/examples/edges/plot_marching_cubes.py --- skimage-0.13.1/doc/examples/edges/plot_marching_cubes.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/edges/plot_marching_cubes.py 2018-05-29 01:27:44.000000000 +0000 @@ -34,10 +34,10 @@ ellip_base[2:, ...]), axis=0) # Use marching cubes to obtain the surface mesh of these ellipsoids -verts, faces, normals, values = measure.marching_cubes(ellip_double, 0) +verts, faces, normals, values = measure.marching_cubes_lewiner(ellip_double, 0) # Display resulting triangular mesh using Matplotlib. This can also be done -# with mayavi (see skimage.measure.marching_cubes docstring). +# with mayavi (see skimage.measure.marching_cubes_lewiner docstring). fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(111, projection='3d') diff -Nru skimage-0.13.1/doc/examples/edges/plot_random_shapes.py skimage-0.14.0/doc/examples/edges/plot_random_shapes.py --- skimage-0.13.1/doc/examples/edges/plot_random_shapes.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/doc/examples/edges/plot_random_shapes.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,59 @@ +""" +============= +Random Shapes +============= + +Example of generating random shapes with particular properties. +""" + +import matplotlib.pyplot as plt + +from skimage.draw import random_shapes + +# Let's start simple and generate a 128x128 image +# with a single grayscale rectangle. +result = random_shapes((128, 128), max_shapes=1, shape='rectangle', + multichannel=False) + +# We get back a tuple consisting of (1) the image with the generated shapes +# and (2) a list of label tuples with the kind of shape (e.g. circle, +# rectangle) and ((r0, r1), (c0, c1)) coordinates. +image, labels = result +print('Image shape: {}\nLabels: {}'.format(image.shape, labels)) + +# We can visualize the images. +fig, axes = plt.subplots(nrows=2, ncols=3) +ax = axes.ravel() +ax[0].imshow(image, cmap='gray') +ax[0].set_title('Grayscale shape') + +# The generated images can be much more complex. For example, let's try many +# shapes of any color. If we want the colors to be particularly light, we can +# set the `intensity_range` to an upper subrange of (0,255). +image1, _ = random_shapes((128, 128), max_shapes=10, + intensity_range=((100, 255),)) + +# Moar :) +image2, _ = random_shapes((128, 128), max_shapes=10, + intensity_range=((200, 255),)) +image3, _ = random_shapes((128, 128), max_shapes=10, + intensity_range=((50, 255),)) +image4, _ = random_shapes((128, 128), max_shapes=10, + intensity_range=((0, 255),)) + +for i, image in enumerate([image1, image2, image3, image4], 1): + ax[i].imshow(image) + ax[i].set_title('Colored shapes, #{}'.format(i-1)) + +# These shapes are well suited to test segmentation algorithms. Often, we +# want shapes to overlap to test the algorithm. This is also possible: +image, _ = random_shapes((128, 128), min_shapes=5, max_shapes=10, + min_size=20, allow_overlap=True) +ax[5].imshow(image) +ax[5].set_title('Overlapping shapes') + +for a in ax: + a.set_xticklabels([]) + a.set_yticklabels([]) + +plt.show() diff -Nru skimage-0.13.1/doc/examples/edges/plot_skeleton.py skimage-0.14.0/doc/examples/edges/plot_skeleton.py --- skimage-0.13.1/doc/examples/edges/plot_skeleton.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/edges/plot_skeleton.py 2018-05-29 01:27:44.000000000 +0000 @@ -23,8 +23,7 @@ # display results fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 4), - sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) + sharex=True, sharey=True) ax = axes.ravel() @@ -81,8 +80,7 @@ skeleton = skeletonize(data) skeleton3d = skeletonize_3d(data) -fig, axes = plt.subplots(1, 3, figsize=(8, 4), sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) +fig, axes = plt.subplots(1, 3, figsize=(8, 4), sharex=True, sharey=True) ax = axes.ravel() ax[0].imshow(data, cmap=plt.cm.gray, interpolation='nearest') @@ -135,8 +133,7 @@ from skimage.util.colormap import magma -fig, axes = plt.subplots(2, 2, figsize=(8, 8), sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) +fig, axes = plt.subplots(2, 2, figsize=(8, 8), sharex=True, sharey=True) ax = axes.ravel() ax[0].imshow(data, cmap=plt.cm.gray, interpolation='nearest') @@ -179,8 +176,7 @@ thinned = thin(image) thinned_partial = thin(image, max_iter=25) -fig, axes = plt.subplots(2, 2, figsize=(8, 8), sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) +fig, axes = plt.subplots(2, 2, figsize=(8, 8), sharex=True, sharey=True) ax = axes.ravel() ax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest') diff -Nru skimage-0.13.1/doc/examples/features_detection/plot_blob.py skimage-0.14.0/doc/examples/features_detection/plot_blob.py --- skimage-0.13.1/doc/examples/features_detection/plot_blob.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/features_detection/plot_blob.py 2018-05-29 01:27:44.000000000 +0000 @@ -63,8 +63,7 @@ 'Determinant of Hessian'] sequence = zip(blobs_list, colors, titles) -fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) +fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True) ax = axes.ravel() for idx, (blobs, color, title) in enumerate(sequence): diff -Nru skimage-0.13.1/doc/examples/features_detection/plot_gabors_from_astronaut.py skimage-0.14.0/doc/examples/features_detection/plot_gabors_from_astronaut.py --- skimage-0.13.1/doc/examples/features_detection/plot_gabors_from_astronaut.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/features_detection/plot_gabors_from_astronaut.py 2018-05-29 01:27:44.000000000 +0000 @@ -28,13 +28,13 @@ .. [1] http://en.wikipedia.org/wiki/Gabor_filter .. [2] http://en.wikipedia.org/wiki/Simple_cell .. [3] http://en.wikipedia.org/wiki/Receptive_field -.. [4] http://en.wikipedia.org/wiki/K-means_clustering -.. [5] http://en.wikipedia.org/wiki/Lateral_geniculate_nucleus -.. [6] D. H. Hubel and T. N., Wiesel Receptive Fields of Single Neurones +.. [4] D. H. Hubel and T. N., Wiesel Receptive Fields of Single Neurones in the Cat's Striate Cortex, J. Physiol. pp. 574-591 (148) 1959 -.. [7] D. H. Hubel and T. N., Wiesel Receptive Fields, Binocular +.. [5] D. H. Hubel and T. N., Wiesel Receptive Fields, Binocular Interaction, and Functional Architecture in the Cat's Visual Cortex, J. Physiol. 160 pp. 106-154 1962 +.. [6] http://en.wikipedia.org/wiki/K-means_clustering +.. [7] http://en.wikipedia.org/wiki/Lateral_geniculate_nucleus """ import numpy as np from scipy.cluster.vq import kmeans2 @@ -44,7 +44,7 @@ from skimage import data from skimage import color from skimage.util.shape import view_as_windows -from skimage.util.montage import montage2d +from skimage.util import montage np.random.seed(42) @@ -58,7 +58,7 @@ patches1 = patches1.reshape(-1, patch_shape[0] * patch_shape[1])[::8] fb1, _ = kmeans2(patches1, n_filters, minit='points') fb1 = fb1.reshape((-1,) + patch_shape) -fb1_montage = montage2d(fb1, rescale_intensity=True) +fb1_montage = montage(fb1, rescale_intensity=True) # -- filterbank2 LGN-like image astro_dog = ndi.gaussian_filter(astro, .5) - ndi.gaussian_filter(astro, 1) @@ -66,9 +66,9 @@ patches2 = patches2.reshape(-1, patch_shape[0] * patch_shape[1])[::8] fb2, _ = kmeans2(patches2, n_filters, minit='points') fb2 = fb2.reshape((-1,) + patch_shape) -fb2_montage = montage2d(fb2, rescale_intensity=True) +fb2_montage = montage(fb2, rescale_intensity=True) -# -- +# -- plotting fig, axes = plt.subplots(2, 2, figsize=(7, 6)) ax = axes.ravel() diff -Nru skimage-0.13.1/doc/examples/features_detection/plot_haar.py skimage-0.14.0/doc/examples/features_detection/plot_haar.py --- skimage-0.13.1/doc/examples/features_detection/plot_haar.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/doc/examples/features_detection/plot_haar.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,71 @@ +""" +============================ +Haar-like feature descriptor +============================ + +Haar-like features are simple digital image features that were introduced in a +real-time face detector [1]_. These features can be efficiently computed on any +scale in constant time, using an integral image [1]_. After that, a small +number of critical features is selected from this large set of potential +features (e.g., using AdaBoost learning algorithm as in [1]_). The following +example will show the mechanism to build this family of descriptors. + +References +---------- + +.. [1] Viola, Paul, and Michael J. Jones. "Robust real-time face + detection." International journal of computer vision 57.2 + (2004): 137-154. + http://www.merl.com/publications/docs/TR2004-043.pdf + DOI: 10.1109/CVPR.2001.990517 + +""" +from __future__ import print_function + +import numpy as np +import matplotlib.pyplot as plt + +from skimage.feature import haar_like_feature_coord +from skimage.feature import draw_haar_like_feature + +print(__doc__) + +############################################################################### +# Different types of Haar-like feature descriptors +############################################################################### +# The Haar-like feature descriptors come into 5 different types as illustrated +# in the figure below. The value of the descriptor is equal to the difference +# between the sum of intensity values in the green and the red one. + +images = [np.zeros((2, 2)), np.zeros((2, 2)), + np.zeros((3, 3)), np.zeros((3, 3)), + np.zeros((2, 2))] + +feature_types = ['type-2-x', 'type-2-y', + 'type-3-x', 'type-3-y', + 'type-4'] + +fig, axs = plt.subplots(3, 2) +for ax, img, feat_t in zip(np.ravel(axs), images, feature_types): + coord, _ = haar_like_feature_coord(img.shape[0], img.shape[1], feat_t) + haar_feature = draw_haar_like_feature(img, 0, 0, + img.shape[0], + img.shape[1], + coord, + max_n_features=1, + random_state=0) + ax.imshow(haar_feature) + ax.set_title(feat_t) + ax.set_xticks([]) + ax.set_yticks([]) + +fig.suptitle('The different Haar-like feature descriptors') +plt.axis('off') +plt.show() + +############################################################################### +# The value of the descriptor is equal to the difference between the sum of the +# intensity values in the green rectangle and the red one. the red area is +# subtracted to the sum of the pixel intensities of the green In practice, the +# Haar-like features will be placed in all possible location of an image and a +# feature value will be computed for each of these locations. diff -Nru skimage-0.13.1/doc/examples/features_detection/plot_hog.py skimage-0.14.0/doc/examples/features_detection/plot_hog.py --- skimage-0.13.1/doc/examples/features_detection/plot_hog.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/features_detection/plot_hog.py 2018-05-29 01:27:44.000000000 +0000 @@ -24,7 +24,7 @@ The first stage applies an optional global image normalisation equalisation that is designed to reduce the influence of illumination effects. In practice we use gamma (power law) compression, either -computing the square root or the log of each colour channel. +computing the square root or the log of each color channel. Image texture strength is typically proportional to the local surface illumination so this compression helps to reduce the effects of local shadowing and illumination variations. @@ -32,7 +32,7 @@ The second stage computes first order image gradients. These capture contour, silhouette and some texture information, while providing further resistance to illumination variations. The locally dominant -colour channel is used, which provides colour invariance to a large +color channel is used, which provides color invariance to a large extent. Variant methods may also include second order image derivatives, which act as primitive bar detectors - a useful feature for capturing, e.g. bar like structures in bicycles and limbs in humans. @@ -82,26 +82,24 @@ import matplotlib.pyplot as plt from skimage.feature import hog -from skimage import data, color, exposure +from skimage import data, exposure -image = color.rgb2gray(data.astronaut()) +image = data.astronaut() fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16), - cells_per_block=(1, 1), visualise=True) + cells_per_block=(1, 1), visualize=True, multichannel=True) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True) ax1.axis('off') ax1.imshow(image, cmap=plt.cm.gray) ax1.set_title('Input image') -ax1.set_adjustable('box-forced') # Rescale histogram for better display -hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02)) +hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 10)) ax2.axis('off') ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray) ax2.set_title('Histogram of Oriented Gradients') -ax1.set_adjustable('box-forced') plt.show() diff -Nru skimage-0.13.1/doc/examples/features_detection/plot_holes_and_peaks.py skimage-0.14.0/doc/examples/features_detection/plot_holes_and_peaks.py --- skimage-0.13.1/doc/examples/features_detection/plot_holes_and_peaks.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/features_detection/plot_holes_and_peaks.py 2018-05-29 01:27:44.000000000 +0000 @@ -54,8 +54,7 @@ seed[1:-1, 1:-1] = image.min() rec = reconstruction(seed, mask, method='dilation') -fig, ax = plt.subplots(2, 2, figsize=(5, 4), sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) +fig, ax = plt.subplots(2, 2, figsize=(5, 4), sharex=True, sharey=True) ax = ax.ravel() ax[0].imshow(image, cmap='gray') diff -Nru skimage-0.13.1/doc/examples/features_detection/plot_template.py skimage-0.14.0/doc/examples/features_detection/plot_template.py --- skimage-0.13.1/doc/examples/features_detection/plot_template.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/features_detection/plot_template.py 2018-05-29 01:27:44.000000000 +0000 @@ -35,8 +35,8 @@ fig = plt.figure(figsize=(8, 3)) ax1 = plt.subplot(1, 3, 1) -ax2 = plt.subplot(1, 3, 2, adjustable='box-forced') -ax3 = plt.subplot(1, 3, 3, sharex=ax2, sharey=ax2, adjustable='box-forced') +ax2 = plt.subplot(1, 3, 2) +ax3 = plt.subplot(1, 3, 3, sharex=ax2, sharey=ax2) ax1.imshow(coin, cmap=plt.cm.gray) ax1.set_axis_off() diff -Nru skimage-0.13.1/doc/examples/filters/plot_cycle_spinning.py skimage-0.14.0/doc/examples/filters/plot_cycle_spinning.py --- skimage-0.13.1/doc/examples/filters/plot_cycle_spinning.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/doc/examples/filters/plot_cycle_spinning.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,89 @@ +""" +================= +Wavelet denoising +================= + +The discrete wavelet transform is not `shift-invariant`_. Shift invariance can +be achieved through an undecimated wavelet transform (also called stationary +wavelet transform), at cost of increased redundancy (i.e. more wavelet +coefficients than input image pixels). An alternative way to approximate +shift-invariance in the context of image denoising with the discrete wavelet +transform is to use the technique known as "cycle spinning". This involves +averaging the results of the following 3-step procedure for multiple spatial +shifts, n: + +1.) (circularly) shift the signal by an amount, n +2.) apply denoising +3.) apply the inverse shift + +For 2D image denoising, we demonstrate here that such cycle-spinning can +provide a substantial increase in quality, with much of the gain being +achieved simply by averaging shifts of only n=0 and n=1 on each axis. + +.. _`shift-invariant`: https://en.wikipedia.org/wiki/Shift-invariant_system +""" +import matplotlib.pyplot as plt + +from skimage.restoration import denoise_wavelet, cycle_spin +from skimage import data, img_as_float +from skimage.util import random_noise +from skimage.measure import compare_psnr + + +original = img_as_float(data.chelsea()[100:250, 50:300]) + +sigma = 0.155 +noisy = random_noise(original, var=sigma**2) + +fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(10, 4), + sharex=False, sharey=False) +ax = ax.ravel() + +psnr_noisy = compare_psnr(original, noisy) +ax[0].imshow(noisy) +ax[0].axis('off') +ax[0].set_title('Noisy\nPSNR={:0.4g}'.format(psnr_noisy)) + + +# Repeat denosing with different amounts of cycle spinning. e.g. +# max_shift = 0 -> no cycle spinning +# max_shift = 1 -> shifts of (0, 1) along each axis +# max_shift = 3 -> shifts of (0, 1, 2, 3) along each axis +# etc... + +denoise_kwargs = dict(multichannel=True, convert2ycbcr=True, wavelet='db1') + +all_psnr = [] +max_shifts = [0, 1, 3, 5] +for n, s in enumerate(max_shifts): + im_bayescs = cycle_spin(noisy, func=denoise_wavelet, max_shifts=s, + func_kw=denoise_kwargs, multichannel=True) + ax[n+1].imshow(im_bayescs) + ax[n+1].axis('off') + psnr = compare_psnr(original, im_bayescs) + if s == 0: + ax[n+1].set_title( + "Denoised: no cycle shifts\nPSNR={:0.4g}".format(psnr)) + else: + ax[n+1].set_title( + "Denoised: {0}x{0} shifts\nPSNR={1:0.4g}".format(s+1, psnr)) + all_psnr.append(psnr) + +# plot PSNR as a function of the degree of cycle shifting +ax[5].plot(max_shifts, all_psnr, 'k.-') +ax[5].set_ylabel('PSNR (dB)') +ax[5].set_xlabel('max cycle shift along each axis') +ax[5].grid('on') +plt.subplots_adjust(wspace=0.35, hspace=0.35) + +# Annotate with a cyan arrow on the 6x6 case vs. no cycle shift case to +# illustrate a region with reduced block-like artifact with cycle shifting +arrowprops = dict(arrowstyle="simple,tail_width=0.1,head_width=0.5", + connectionstyle="arc3", + color='c') +for i in [1, 4]: + ax[i].annotate("", xy=(101, 39), xycoords='data', + xytext=(70, 70), textcoords='data', + arrowprops=arrowprops) + +plt.show() diff -Nru skimage-0.13.1/doc/examples/filters/plot_denoise.py skimage-0.14.0/doc/examples/filters/plot_denoise.py --- skimage-0.13.1/doc/examples/filters/plot_denoise.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/filters/plot_denoise.py 2018-05-29 01:27:44.000000000 +0000 @@ -52,8 +52,8 @@ sigma = 0.155 noisy = random_noise(original, var=sigma**2) -fig, ax = plt.subplots(nrows=2, ncols=4, figsize=(8, 5), sharex=True, - sharey=True, subplot_kw={'adjustable': 'box-forced'}) +fig, ax = plt.subplots(nrows=2, ncols=4, figsize=(8, 5), + sharex=True, sharey=True) plt.gray() diff -Nru skimage-0.13.1/doc/examples/filters/plot_denoise_wavelet.py skimage-0.14.0/doc/examples/filters/plot_denoise_wavelet.py --- skimage-0.13.1/doc/examples/filters/plot_denoise_wavelet.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/doc/examples/filters/plot_denoise_wavelet.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,104 @@ +""" +================= +Wavelet denoising +================= + +Wavelet denoising relies on the wavelet representation of the image. +Gaussian noise tends to be represented by small values in the wavelet domain +and can be removed by setting coefficients below a given threshold to zero +(hard thresholding) or shrinking all coefficients toward zero by a given amount +(soft thresholding). + +In this example, we illustrate two different methods for wavelet coefficient +threshold selection: BayesShrink and VisuShrink. + +VisuShrink +---------- +The VisuShrink approach employs a single, universal threshold to all wavelet +detail coefficients. This threshold is designed to remove additive Gaussian +noise with high probability, which tends to result in overly smooth image +appearance. By specifying a sigma that is smaller than the true noise standard +deviation, a more visually agreeable result can be obtained. + +BayesShrink +----------- +The BayesShrink algorithm is an adaptive approach to wavelet soft thresholding +where a unique threshold is estimated for each wavelet subband. This generally +results in an improvement over what can be obtained with a single threshold. + +""" +import matplotlib.pyplot as plt + +from skimage.restoration import (denoise_wavelet, estimate_sigma) +from skimage import data, img_as_float +from skimage.util import random_noise +from skimage.measure import compare_psnr + + +original = img_as_float(data.chelsea()[100:250, 50:300]) + +sigma = 0.12 +noisy = random_noise(original, var=sigma**2) + +fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(8, 5), + sharex=True, sharey=True) + +plt.gray() + +# Estimate the average noise standard deviation across color channels. +sigma_est = estimate_sigma(noisy, multichannel=True, average_sigmas=True) +# Due to clipping in random_noise, the estimate will be a bit smaller than the +# specified sigma. +print("Estimated Gaussian noise standard deviation = {}".format(sigma_est)) + +im_bayes = denoise_wavelet(noisy, multichannel=True, convert2ycbcr=True, + method='BayesShrink', mode='soft') +im_visushrink = denoise_wavelet(noisy, multichannel=True, convert2ycbcr=True, + method='VisuShrink', mode='soft', + sigma=sigma_est) + +# VisuShrink is designed to eliminate noise with high probability, but this +# results in a visually over-smooth appearance. Repeat, specifying a reduction +# in the threshold by factors of 2 and 4. +im_visushrink2 = denoise_wavelet(noisy, multichannel=True, convert2ycbcr=True, + method='VisuShrink', mode='soft', + sigma=sigma_est/2) +im_visushrink4 = denoise_wavelet(noisy, multichannel=True, convert2ycbcr=True, + method='VisuShrink', mode='soft', + sigma=sigma_est/4) + +# Compute PSNR as an indication of image quality +psnr_noisy = compare_psnr(original, noisy) +psnr_bayes = compare_psnr(original, im_bayes) +psnr_visushrink = compare_psnr(original, im_visushrink) +psnr_visushrink2 = compare_psnr(original, im_visushrink2) +psnr_visushrink4 = compare_psnr(original, im_visushrink4) + +ax[0, 0].imshow(noisy) +ax[0, 0].axis('off') +ax[0, 0].set_title('Noisy\nPSNR={:0.4g}'.format(psnr_noisy)) +ax[0, 1].imshow(im_bayes) +ax[0, 1].axis('off') +ax[0, 1].set_title( + 'Wavelet denoising\n(BayesShrink)\nPSNR={:0.4g}'.format(psnr_bayes)) +ax[0, 2].imshow(im_visushrink) +ax[0, 2].axis('off') +ax[0, 2].set_title( + ('Wavelet denoising\n(VisuShrink, $\sigma=\sigma_{est}$)\n' + 'PSNR=%0.4g' % psnr_visushrink)) +ax[1, 0].imshow(original) +ax[1, 0].axis('off') +ax[1, 0].set_title('Original') +ax[1, 1].imshow(im_visushrink2) +ax[1, 1].axis('off') +ax[1, 1].set_title( + ('Wavelet denoising\n(VisuShrink, $\sigma=\sigma_{est}/2$)\n' + 'PSNR=%0.4g' % psnr_visushrink2)) +ax[1, 2].imshow(im_visushrink4) +ax[1, 2].axis('off') +ax[1, 2].set_title( + ('Wavelet denoising\n(VisuShrink, $\sigma=\sigma_{est}/4$)\n' + 'PSNR=%0.4g' % psnr_visushrink4)) +fig.tight_layout() + +plt.show() diff -Nru skimage-0.13.1/doc/examples/filters/plot_entropy.py skimage-0.14.0/doc/examples/filters/plot_entropy.py --- skimage-0.13.1/doc/examples/filters/plot_entropy.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/filters/plot_entropy.py 2018-05-29 01:27:44.000000000 +0000 @@ -54,11 +54,8 @@ image = img_as_ubyte(data.camera()) -fig, (ax0, ax1) = plt.subplots(ncols=2, - figsize=(12, 4), - sharex=True, - sharey=True, - subplot_kw={"adjustable": "box-forced"}) +fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(12, 4), + sharex=True, sharey=True) img0 = ax0.imshow(image, cmap=plt.cm.gray) ax0.set_title("Image") diff -Nru skimage-0.13.1/doc/examples/filters/plot_frangi.py skimage-0.14.0/doc/examples/filters/plot_frangi.py --- skimage-0.13.1/doc/examples/filters/plot_frangi.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/filters/plot_frangi.py 2018-05-29 01:27:44.000000000 +0000 @@ -14,7 +14,7 @@ image = camera() -fig, ax = plt.subplots(ncols=3, subplot_kw={'adjustable': 'box-forced'}) +fig, ax = plt.subplots(ncols=3) ax[0].imshow(image, cmap=plt.cm.gray) ax[0].set_title('Original image') @@ -29,3 +29,4 @@ a.axis('off') plt.tight_layout() +plt.show() diff -Nru skimage-0.13.1/doc/examples/filters/plot_hysteresis.py skimage-0.14.0/doc/examples/filters/plot_hysteresis.py --- skimage-0.13.1/doc/examples/filters/plot_hysteresis.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/doc/examples/filters/plot_hysteresis.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +""" +======================= +Hysteresis thresholding +======================= + +*Hysteresis* is the lagging of an effect---a kind of inertia. In the +context of thresholding, it means that areas above some *low* threshold +are considered to be above the threshold *if* they are also connected +to areas above a higher, more stringent, threshold. They can thus be +seen as continuations of these high-confidence areas. + +Below, we compare normal thresholding to hysteresis thresholding. +Notice how hysteresis allows one to ignore "noise" outside of the coin +edges. +""" + +import matplotlib.pyplot as plt +from skimage import data, filters + +fig, ax = plt.subplots(nrows=2, ncols=2) + +image = data.coins() +edges = filters.sobel(image) + +low = 0.1 +high = 0.35 + +lowt = (edges > low).astype(int) +hight = (edges > high).astype(int) +hyst = filters.apply_hysteresis_threshold(edges, low, high) + +ax[0, 0].imshow(image, cmap='gray') +ax[0, 0].set_title('Original image') + +ax[0, 1].imshow(edges, cmap='magma') +ax[0, 1].set_title('Sobel edges') + +ax[1, 0].imshow(lowt, cmap='magma') +ax[1, 0].set_title('Low threshold') + +ax[1, 1].imshow(hight + hyst, cmap='magma') +ax[1, 1].set_title('Hysteresis threshold') + +for a in ax.ravel(): + a.axis('off') + +plt.tight_layout() + +plt.show() diff -Nru skimage-0.13.1/doc/examples/filters/plot_inpaint.py skimage-0.14.0/doc/examples/filters/plot_inpaint.py --- skimage-0.13.1/doc/examples/filters/plot_inpaint.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/filters/plot_inpaint.py 2018-05-29 01:27:44.000000000 +0000 @@ -9,14 +9,19 @@ exploiting the information presented in non-damaged regions. In this example, we show how the masked pixels get inpainted by -inpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_. +inpainting algorithm based on 'biharmonic equation'-assumption [2]_ [3]_ [4]_. .. [1] Wikipedia. Inpainting https://en.wikipedia.org/wiki/Inpainting .. [2] Wikipedia. Biharmonic equation https://en.wikipedia.org/wiki/Biharmonic_equation .. [3] N.S.Hoang, S.B.Damelin, "On surface completion and image - inpainting by biharmonic functions: numerical aspects" + inpainting by biharmonic functions: numerical aspects", + https://arxiv.org/abs/1707.06567 +.. [4] C. K. Chui and H. N. Mhaskar, MRA Contextual-Recovery Extension of + Smooth Functions on Manifolds, Appl. and Comp. Harmonic Anal., + 28 (2010), 104-113, + DOI: 10.1016/j.acha.2009.04.004 """ import numpy as np diff -Nru skimage-0.13.1/doc/examples/filters/plot_nonlocal_means.py skimage-0.14.0/doc/examples/filters/plot_nonlocal_means.py --- skimage-0.13.1/doc/examples/filters/plot_nonlocal_means.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/filters/plot_nonlocal_means.py 2018-05-29 01:27:44.000000000 +0000 @@ -10,32 +10,100 @@ the average is performed only for pixels that have patches close to the current patch. As a result, this algorithm can restore well textures, that would be blurred by other denoising algoritm. + +When the `fast_mode` argument is `False`, a spatial Gaussian weighting is +applied to the patches when computing patch distances. When `fast_mode` is +`True` a faster algorithm employing uniform spatial weighting on the patches +is applied. + +For either of these cases, if the noise standard deviation, `sigma`, is +provided, the expected noise variance is subtracted out when computing patch +distances. This can lead to a modest improvement in image quality. + +The `estimate_sigma` function can provide a good starting point for setting +the `h` (and optionally, `sigma`) parameters for the non-local means algorithm. +`h` is a constant that controls the decay in patch weights as a function of the +distance between patches. Larger `h` allows more smoothing between disimilar +patches. + +In this demo, `h`, was hand-tuned to give the approximate best-case performance +of each variant. + """ import numpy as np import matplotlib.pyplot as plt from skimage import data, img_as_float -from skimage.restoration import denoise_nl_means +from skimage.restoration import denoise_nl_means, estimate_sigma +from skimage.measure import compare_psnr astro = img_as_float(data.astronaut()) astro = astro[30:180, 150:300] -noisy = astro + 0.3 * np.random.random(astro.shape) +sigma = 0.08 +noisy = astro + sigma * np.random.standard_normal(astro.shape) noisy = np.clip(noisy, 0, 1) -denoise = denoise_nl_means(noisy, 7, 9, 0.08, multichannel=True) - -fig, ax = plt.subplots(ncols=2, figsize=(8, 4), sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) - -ax[0].imshow(noisy) -ax[0].axis('off') -ax[0].set_title('noisy') -ax[1].imshow(denoise) -ax[1].axis('off') -ax[1].set_title('non-local means') +# estimate the noise standard deviation from the noisy image +sigma_est = np.mean(estimate_sigma(noisy, multichannel=True)) +print("estimated noise standard deviation = {}".format(sigma_est)) + +patch_kw = dict(patch_size=5, # 5x5 patches + patch_distance=6, # 13x13 search area + multichannel=True) + +# slow algorithm +denoise = denoise_nl_means(noisy, h=1.15 * sigma_est, fast_mode=False, + **patch_kw) + +# slow algorithm, sigma provided +denoise2 = denoise_nl_means(noisy, h=0.8 * sigma_est, sigma=sigma_est, + fast_mode=False, **patch_kw) + +# fast algorithm +denoise_fast = denoise_nl_means(noisy, h=0.8 * sigma_est, fast_mode=True, + **patch_kw) + +# fast algorithm, sigma provided +denoise2_fast = denoise_nl_means(noisy, h=0.6 * sigma_est, sigma=sigma_est, + fast_mode=True, **patch_kw) + +fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(8, 6), + sharex=True, sharey=True) + +ax[0, 0].imshow(noisy) +ax[0, 0].axis('off') +ax[0, 0].set_title('noisy') +ax[0, 1].imshow(denoise) +ax[0, 1].axis('off') +ax[0, 1].set_title('non-local means\n(slow)') +ax[0, 2].imshow(denoise2) +ax[0, 2].axis('off') +ax[0, 2].set_title('non-local means\n(slow, using $\sigma_{est}$)') +ax[1, 0].imshow(astro) +ax[1, 0].axis('off') +ax[1, 0].set_title('original\n(noise free)') +ax[1, 1].imshow(denoise_fast) +ax[1, 1].axis('off') +ax[1, 1].set_title('non-local means\n(fast)') +ax[1, 2].imshow(denoise2_fast) +ax[1, 2].axis('off') +ax[1, 2].set_title('non-local means\n(fast, using $\sigma_{est}$)') fig.tight_layout() +# print PSNR metric for each case +psnr_noisy = compare_psnr(astro, noisy) +psnr = compare_psnr(astro, denoise) +psnr2 = compare_psnr(astro, denoise2) +psnr_fast = compare_psnr(astro, denoise_fast) +psnr2_fast = compare_psnr(astro, denoise2_fast) + +print("PSNR (noisy) = {:0.2f}".format(psnr_noisy)) +print("PSNR (slow) = {:0.2f}".format(psnr)) +print("PSNR (slow, using sigma) = {:0.2f}".format(psnr2)) +print("PSNR (fast) = {:0.2f}".format(psnr_fast)) +print("PSNR (fast, using sigma) = {:0.2f}".format(psnr2_fast)) + plt.show() diff -Nru skimage-0.13.1/doc/examples/filters/plot_phase_unwrap.py skimage-0.14.0/doc/examples/filters/plot_phase_unwrap.py --- skimage-0.13.1/doc/examples/filters/plot_phase_unwrap.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/filters/plot_phase_unwrap.py 2018-05-29 01:27:44.000000000 +0000 @@ -26,7 +26,7 @@ # Perform phase unwrapping image_unwrapped = unwrap_phase(image_wrapped) -fig, ax = plt.subplots(2, 2, sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'}) +fig, ax = plt.subplots(2, 2, sharex=True, sharey=True) ax1, ax2, ax3, ax4 = ax.ravel() fig.colorbar(ax1.imshow(image, cmap='gray', vmin=0, vmax=4 * np.pi), ax=ax1) diff -Nru skimage-0.13.1/doc/examples/filters/plot_rank_mean.py skimage-0.14.0/doc/examples/filters/plot_rank_mean.py --- skimage-0.13.1/doc/examples/filters/plot_rank_mean.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/filters/plot_rank_mean.py 2018-05-29 01:27:44.000000000 +0000 @@ -41,7 +41,6 @@ for n in range(0, len(imgs)): ax[n].imshow(imgs[n], cmap=plt.cm.gray) ax[n].set_title(titles[n]) - ax[n].set_adjustable('box-forced') ax[n].axis('off') plt.tight_layout() diff -Nru skimage-0.13.1/doc/examples/filters/plot_restoration.py skimage-0.14.0/doc/examples/filters/plot_restoration.py --- skimage-0.13.1/doc/examples/filters/plot_restoration.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/filters/plot_restoration.py 2018-05-29 01:27:44.000000000 +0000 @@ -43,8 +43,7 @@ deconvolved, _ = restoration.unsupervised_wiener(astro, psf) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 5), - sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) + sharex=True, sharey=True) plt.gray() diff -Nru skimage-0.13.1/doc/examples/segmentation/plot_chan_vese.py skimage-0.14.0/doc/examples/segmentation/plot_chan_vese.py --- skimage-0.13.1/doc/examples/segmentation/plot_chan_vese.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/doc/examples/segmentation/plot_chan_vese.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,76 @@ +""" +====================== +Chan-Vese Segmentation +====================== + +The Chan-Vese segmentation algorithm is designed to segment objects without +clearly defined boundaries. This algorithm is based on level sets that are +evolved iteratively to minimize an energy, which is defined by +weighted values corresponding to the sum of differences intensity +from the average value outside the segmented region, the sum of +differences from the average value inside the segmented region, and a +term which is dependent on the length of the boundary of the segmented +region. + +This algorithm was first proposed by Tony Chan and Luminita Vese, in +a publication entitled "An Active Countour Model Without Edges" [1]_. See also +[2]_, [3]_. + +This implementation of the algorithm is somewhat simplified in the +sense that the area factor 'nu' described in the original paper is not +implemented, and is only suitable for grayscale images. + +Typical values for ``lambda1`` and ``lambda2`` are 1. If the 'background' is +very different from the segmented object in terms of distribution (for +example, a uniform black image with figures of varying intensity), then +these values should be different from each other. + +Typical values for ``mu`` are between 0 and 1, though higher values can be +used when dealing with shapes with very ill-defined contours. + +The algorithm also returns a list of values that corresponds to the +energy at each iteration. This can be used to adjust the various +parameters described above. + +References +---------- +.. [1] An Active Contour Model without Edges, Tony Chan and + Luminita Vese, Scale-Space Theories in Computer Vision, 1999, + DOI:10.1007/3-540-48236-9_13 +.. [2] Chan-Vese Segmentation, Pascal Getreuer, Image Processing On + Line, 2 (2012), pp. 214-224, + DOI:10.5201/ipol.2012.g-cv +.. [3] The Chan-Vese Algorithm - Project Report, Rami Cohen, + http://arxiv.org/abs/1107.2782 , 2011 +""" +import numpy as np +import matplotlib.pyplot as plt +from skimage import data, img_as_float +from skimage.segmentation import chan_vese + +image = img_as_float(data.camera()) +# Feel free to play around with the parameters to see how they impact the result +cv = chan_vese(image, mu=0.25, lambda1=1, lambda2=1, tol=1e-3, max_iter=200, + dt=0.5, init_level_set="checkerboard", extended_output=True) + +fig, axes = plt.subplots(2, 2, figsize=(8, 8)) +ax = axes.flatten() + +ax[0].imshow(image, cmap="gray") +ax[0].set_axis_off() +ax[0].set_title("Original Image", fontsize=12) + +ax[1].imshow(cv[0], cmap="gray") +ax[1].set_axis_off() +title = "Chan-Vese segmentation - {} iterations".format(len(cv[2])) +ax[1].set_title(title, fontsize=12) + +ax[2].imshow(cv[1], cmap="gray") +ax[2].set_axis_off() +ax[2].set_title("Final Level Set", fontsize=12) + +ax[3].plot(cv[2]) +ax[3].set_title("Evolution of energy over iterations", fontsize=12) + +fig.tight_layout() +plt.show() diff -Nru skimage-0.13.1/doc/examples/segmentation/plot_extrema.py skimage-0.14.0/doc/examples/segmentation/plot_extrema.py --- skimage-0.13.1/doc/examples/segmentation/plot_extrema.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/segmentation/plot_extrema.py 2018-05-29 01:27:44.000000000 +0000 @@ -19,19 +19,20 @@ color_image = data.hubble_deep_field() -img = color.rgb2grey(color_image) -# the rescaling is done only for visualization purpose. -# the algorithms would work identically in an unscaled version of the -# image. However, the parameter h needs to be adapted to the scale. -img = exposure.rescale_intensity(img) - -# for visualization, we work on a crop of the image. +# for illustration purposes, we work on a crop of the image. x_0 = 70 y_0 = 354 width = 100 height = 100 +img = color.rgb2gray(color_image)[y_0:(y_0 + height), x_0:(x_0 + width)] + +# the rescaling is done only for visualization purpose. +# the algorithms would work identically in an unscaled version of the +# image. However, the parameter h needs to be adapted to the scale. +img = exposure.rescale_intensity(img) + ############################################################## # MAXIMA DETECTION @@ -48,7 +49,7 @@ # We observed in the previous image, that there are many local maxima # that are caused by the noise in the image. # For this, we find all local maxima with a height of h. -# This height is the grey level value by which we need to descent +# This height is the gray level value by which we need to descent # in order to reach a higher maximum and it can be seen as a local # contrast measurement. # The value of h scales with the dynamic range of the image, i.e. @@ -67,18 +68,15 @@ # a new figure with 3 subplots fig, ax = plt.subplots(1, 3, figsize=(15, 5)) -ax[0].imshow(img[y_0:(y_0 + height), x_0:(x_0 + width)], cmap='gray', - interpolation='none') +ax[0].imshow(img, cmap='gray', interpolation='none') ax[0].set_title('Original image') ax[0].axis('off') -ax[1].imshow(overlay[y_0:(y_0 + height), x_0:(x_0 + width)], - interpolation='none') +ax[1].imshow(overlay, interpolation='none') ax[1].set_title('Local Maxima') ax[1].axis('off') -ax[2].imshow(overlay_h[y_0:(y_0 + height), x_0:(x_0 + width)], - interpolation='none') +ax[2].imshow(overlay_h, interpolation='none') ax[2].set_title('h maxima for h = %.2f' % h) ax[2].axis('off') plt.show() diff -Nru skimage-0.13.1/doc/examples/segmentation/plot_join_segmentations.py skimage-0.14.0/doc/examples/segmentation/plot_join_segmentations.py --- skimage-0.13.1/doc/examples/segmentation/plot_join_segmentations.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/segmentation/plot_join_segmentations.py 2018-05-29 01:27:44.000000000 +0000 @@ -43,8 +43,8 @@ segj = join_segmentations(seg1, seg2) # Show the segmentations. -fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(9, 5), sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) +fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(9, 5), + sharex=True, sharey=True) ax = axes.ravel() ax[0].imshow(coins, cmap='gray') ax[0].set_title('Image') diff -Nru skimage-0.13.1/doc/examples/segmentation/plot_marked_watershed.py skimage-0.14.0/doc/examples/segmentation/plot_marked_watershed.py --- skimage-0.13.1/doc/examples/segmentation/plot_marked_watershed.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/segmentation/plot_marked_watershed.py 2018-05-29 01:27:44.000000000 +0000 @@ -45,20 +45,21 @@ labels = watershed(gradient, markers) # display results -fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 8), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'}) +fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 8), + sharex=True, sharey=True) ax = axes.ravel() ax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest') ax[0].set_title("Original") -ax[1].imshow(gradient, cmap=plt.cm.spectral, interpolation='nearest') +ax[1].imshow(gradient, cmap=plt.cm.nipy_spectral, interpolation='nearest') ax[1].set_title("Local Gradient") -ax[2].imshow(markers, cmap=plt.cm.spectral, interpolation='nearest') +ax[2].imshow(markers, cmap=plt.cm.nipy_spectral, interpolation='nearest') ax[2].set_title("Markers") ax[3].imshow(image, cmap=plt.cm.gray, interpolation='nearest') -ax[3].imshow(labels, cmap=plt.cm.spectral, interpolation='nearest', alpha=.7) +ax[3].imshow(labels, cmap=plt.cm.nipy_spectral, interpolation='nearest', alpha=.7) ax[3].set_title("Segmented") for a in ax: diff -Nru skimage-0.13.1/doc/examples/segmentation/plot_morphsnakes.py skimage-0.14.0/doc/examples/segmentation/plot_morphsnakes.py --- skimage-0.13.1/doc/examples/segmentation/plot_morphsnakes.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/doc/examples/segmentation/plot_morphsnakes.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- + +""" +==================== +Morphological Snakes +==================== + +*Morphological Snakes* [1]_ are a family of methods for image segmentation. +Their behavior is similar to that of active contours (for example, *Geodesic +Active Contours* [2]_ or *Active Contours without Edges* [3]_). However, +*Morphological Snakes* use morphological operators (such as dilation or +erosion) over a binary array instead of solving PDEs over a floating point +array, which is the standard approach for active contours. This makes +*Morphological Snakes* faster and numerically more stable than their +traditional counterpart. + +There are two *Morphological Snakes* methods available in this implementation: +*Morphological Geodesic Active Contours* (**MorphGAC**, implemented in the +function ``morphological_geodesic_active_contour``) and *Morphological Active +Contours without Edges* (**MorphACWE**, implemented in the function +``morphological_chan_vese``). + +**MorphGAC** is suitable for images with visible contours, even when these +contours might be noisy, cluttered, or partially unclear. It requires, however, +that the image is preprocessed to highlight the contours. This can be done +using the function ``inverse_gaussian_gradient``, although the user might want +to define their own version. The quality of the **MorphGAC** segmentation +depends greatly on this preprocessing step. + +On the contrary, **MorphACWE** works well when the pixel values of the inside +and the outside regions of the object to segment have different averages. +Unlike **MorphGAC**, **MorphACWE** does not require that the contours of the +object are well defined, and it works over the original image without any +preceding processing. This makes **MorphACWE** easier to use and tune than +**MorphGAC**. + +References +---------- + +.. [1] A Morphological Approach to Curvature-based Evolution of Curves and + Surfaces, Pablo Márquez-Neila, Luis Baumela and Luis Álvarez. In IEEE + Transactions on Pattern Analysis and Machine Intelligence (PAMI), + 2014, DOI 10.1109/TPAMI.2013.106 +.. [2] Geodesic Active Contours, Vicent Caselles, Ron Kimmel and Guillermo + Sapiro. In International Journal of Computer Vision (IJCV), 1997, + DOI:10.1023/A:1007979827043 +.. [3] Active Contours without Edges, Tony Chan and Luminita Vese. In IEEE + Transactions on Image Processing, 2001, DOI:10.1109/83.902291 + +""" + +import numpy as np +import matplotlib.pyplot as plt +from skimage import data, img_as_float +from skimage.segmentation import (morphological_chan_vese, + morphological_geodesic_active_contour, + inverse_gaussian_gradient, + checkerboard_level_set) + + +def store_evolution_in(lst): + """Returns a callback function to store the evolution of the level sets in + the given list. + """ + + def _store(x): + lst.append(np.copy(x)) + + return _store + + +# Morphological ACWE +image = img_as_float(data.camera()) + +# Initial level set +init_ls = checkerboard_level_set(image.shape, 6) +# List with intermediate results for plotting the evolution +evolution = [] +callback = store_evolution_in(evolution) +ls = morphological_chan_vese(image, 35, init_level_set=init_ls, smoothing=3, + iter_callback=callback) + +fig, axes = plt.subplots(2, 2, figsize=(8, 8)) +ax = axes.flatten() + +ax[0].imshow(image, cmap="gray") +ax[0].set_axis_off() +ax[0].contour(ls, [0.5], colors='r') +ax[0].set_title("Morphological ACWE segmentation", fontsize=12) + +ax[1].imshow(ls, cmap="gray") +ax[1].set_axis_off() +contour = ax[1].contour(evolution[2], [0.5], colors='g') +contour.collections[0].set_label("Iteration 2") +contour = ax[1].contour(evolution[7], [0.5], colors='y') +contour.collections[0].set_label("Iteration 7") +contour = ax[1].contour(evolution[-1], [0.5], colors='r') +contour.collections[0].set_label("Iteration 35") +ax[1].legend(loc="upper right") +title = "Morphological ACWE evolution" +ax[1].set_title(title, fontsize=12) + + +# Morphological GAC +image = img_as_float(data.coins()) +gimage = inverse_gaussian_gradient(image) + +# Initial level set +init_ls = np.zeros(image.shape, dtype=np.int8) +init_ls[10:-10, 10:-10] = 1 +# List with intermediate results for plotting the evolution +evolution = [] +callback = store_evolution_in(evolution) +ls = morphological_geodesic_active_contour(gimage, 230, init_ls, + smoothing=1, balloon=-1, + threshold=0.69, + iter_callback=callback) + +ax[2].imshow(image, cmap="gray") +ax[2].set_axis_off() +ax[2].contour(ls, [0.5], colors='r') +ax[2].set_title("Morphological GAC segmentation", fontsize=12) + +ax[3].imshow(ls, cmap="gray") +ax[3].set_axis_off() +contour = ax[3].contour(evolution[0], [0.5], colors='g') +contour.collections[0].set_label("Iteration 0") +contour = ax[3].contour(evolution[100], [0.5], colors='y') +contour.collections[0].set_label("Iteration 100") +contour = ax[3].contour(evolution[-1], [0.5], colors='r') +contour.collections[0].set_label("Iteration 230") +ax[3].legend(loc="upper right") +title = "Morphological GAC evolution" +ax[3].set_title(title, fontsize=12) + +fig.tight_layout() +plt.show() diff -Nru skimage-0.13.1/doc/examples/segmentation/plot_peak_local_max.py skimage-0.14.0/doc/examples/segmentation/plot_peak_local_max.py --- skimage-0.13.1/doc/examples/segmentation/plot_peak_local_max.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/segmentation/plot_peak_local_max.py 2018-05-29 01:27:44.000000000 +0000 @@ -25,8 +25,7 @@ coordinates = peak_local_max(im, min_distance=20) # display results -fig, axes = plt.subplots(1, 3, figsize=(8, 3), sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) +fig, axes = plt.subplots(1, 3, figsize=(8, 3), sharex=True, sharey=True) ax = axes.ravel() ax[0].imshow(im, cmap=plt.cm.gray) ax[0].axis('off') diff -Nru skimage-0.13.1/doc/examples/segmentation/plot_rag_merge.py skimage-0.14.0/doc/examples/segmentation/plot_rag_merge.py --- skimage-0.13.1/doc/examples/segmentation/plot_rag_merge.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/segmentation/plot_rag_merge.py 2018-05-29 01:27:44.000000000 +0000 @@ -68,8 +68,6 @@ merge_func=merge_mean_color, weight_func=_weight_mean_color) -g2 = graph.rag_mean_color(img, labels2) - out = color.label2rgb(labels2, img, kind='avg') out = segmentation.mark_boundaries(out, labels2, (0, 0, 0)) io.imshow(out) diff -Nru skimage-0.13.1/doc/examples/segmentation/plot_random_walker_segmentation.py skimage-0.14.0/doc/examples/segmentation/plot_random_walker_segmentation.py --- skimage-0.13.1/doc/examples/segmentation/plot_random_walker_segmentation.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/segmentation/plot_random_walker_segmentation.py 2018-05-29 01:27:44.000000000 +0000 @@ -17,7 +17,7 @@ values, and use the random walker for the segmentation. .. [1] *Random walks for image segmentation*, Leo Grady, IEEE Trans. Pattern - Anal. Mach. Intell. 2006 Nov; 28(11):1768-83 + Anal. Mach. Intell. 2006 Nov; 28(11):1768-83 DOI:10.1109/TPAMI.2006.233 """ import numpy as np @@ -25,14 +25,21 @@ from skimage.segmentation import random_walker from skimage.data import binary_blobs +from skimage.exposure import rescale_intensity import skimage # Generate noisy synthetic data data = skimage.img_as_float(binary_blobs(length=128, seed=1)) -data += 0.35 * np.random.randn(*data.shape) +sigma = 0.35 +data += np.random.normal(loc=0, scale=sigma, size=data.shape) +data = rescale_intensity(data, in_range=(-sigma, 1 + sigma), + out_range=(-1, 1)) + +# The range of the binary image spans over (-1, 1). +# We choose the hottest and the coldest pixels as markers. markers = np.zeros(data.shape, dtype=np.uint) -markers[data < -0.3] = 1 -markers[data > 1.3] = 2 +markers[data < -0.95] = 1 +markers[data > 0.95] = 2 # Run random walker algorithm labels = random_walker(data, markers, beta=10, mode='bf') @@ -42,15 +49,12 @@ sharex=True, sharey=True) ax1.imshow(data, cmap='gray', interpolation='nearest') ax1.axis('off') -ax1.set_adjustable('box-forced') ax1.set_title('Noisy data') -ax2.imshow(markers, cmap='hot', interpolation='nearest') +ax2.imshow(markers, cmap='magma', interpolation='nearest') ax2.axis('off') -ax2.set_adjustable('box-forced') ax2.set_title('Markers') ax3.imshow(labels, cmap='gray', interpolation='nearest') ax3.axis('off') -ax3.set_adjustable('box-forced') ax3.set_title('Segmentation') fig.tight_layout() diff -Nru skimage-0.13.1/doc/examples/segmentation/plot_segmentations.py skimage-0.14.0/doc/examples/segmentation/plot_segmentations.py --- skimage-0.13.1/doc/examples/segmentation/plot_segmentations.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/segmentation/plot_segmentations.py 2018-05-29 01:27:44.000000000 +0000 @@ -106,8 +106,7 @@ print('SLIC number of segments: {}'.format(len(np.unique(segments_slic)))) print('Quickshift number of segments: {}'.format(len(np.unique(segments_quick)))) -fig, ax = plt.subplots(2, 2, figsize=(10, 10), sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) +fig, ax = plt.subplots(2, 2, figsize=(10, 10), sharex=True, sharey=True) ax[0, 0].imshow(mark_boundaries(img, segments_fz)) ax[0, 0].set_title("Felzenszwalbs's method") diff -Nru skimage-0.13.1/doc/examples/segmentation/plot_thresholding.py skimage-0.14.0/doc/examples/segmentation/plot_thresholding.py --- skimage-0.13.1/doc/examples/segmentation/plot_thresholding.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/segmentation/plot_thresholding.py 2018-05-29 01:27:44.000000000 +0000 @@ -34,9 +34,9 @@ fig, axes = plt.subplots(ncols=3, figsize=(8, 2.5)) ax = axes.ravel() -ax[0] = plt.subplot(1, 3, 1, adjustable='box-forced') +ax[0] = plt.subplot(1, 3, 1) ax[1] = plt.subplot(1, 3, 2) -ax[2] = plt.subplot(1, 3, 3, sharex=ax[0], sharey=ax[0], adjustable='box-forced') +ax[2] = plt.subplot(1, 3, 3, sharex=ax[0], sharey=ax[0]) ax[0].imshow(image, cmap=plt.cm.gray) ax[0].set_title('Original') diff -Nru skimage-0.13.1/doc/examples/segmentation/plot_watershed.py skimage-0.14.0/doc/examples/segmentation/plot_watershed.py --- skimage-0.13.1/doc/examples/segmentation/plot_watershed.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/segmentation/plot_watershed.py 2018-05-29 01:27:44.000000000 +0000 @@ -48,15 +48,14 @@ markers = ndi.label(local_maxi)[0] labels = watershed(-distance, markers, mask=image) -fig, axes = plt.subplots(ncols=3, figsize=(9, 3), sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) +fig, axes = plt.subplots(ncols=3, figsize=(9, 3), sharex=True, sharey=True) ax = axes.ravel() ax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest') ax[0].set_title('Overlapping objects') ax[1].imshow(-distance, cmap=plt.cm.gray, interpolation='nearest') ax[1].set_title('Distances') -ax[2].imshow(labels, cmap=plt.cm.spectral, interpolation='nearest') +ax[2].imshow(labels, cmap=plt.cm.nipy_spectral, interpolation='nearest') ax[2].set_title('Separated objects') for a in ax: diff -Nru skimage-0.13.1/doc/examples/transform/plot_pyramid.py skimage-0.14.0/doc/examples/transform/plot_pyramid.py --- skimage-0.13.1/doc/examples/transform/plot_pyramid.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/transform/plot_pyramid.py 2018-05-29 01:27:44.000000000 +0000 @@ -5,8 +5,8 @@ The ``pyramid_gaussian`` function takes an image and yields successive images shrunk by a constant scale factor. Image pyramids are often used, e.g., to -implement algorithms for denoising, texture discrimination, and scale- -invariant detection. +implement algorithms for denoising, texture discrimination, and scale-invariant +detection. """ import numpy as np @@ -18,7 +18,7 @@ image = data.astronaut() rows, cols, dim = image.shape -pyramid = tuple(pyramid_gaussian(image, downscale=2)) +pyramid = tuple(pyramid_gaussian(image, downscale=2, multichannel=True)) composite_image = np.zeros((rows, cols + cols // 2, 3), dtype=np.double) diff -Nru skimage-0.13.1/doc/examples/transform/plot_radon_transform.py skimage-0.14.0/doc/examples/transform/plot_radon_transform.py --- skimage-0.13.1/doc/examples/transform/plot_radon_transform.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/transform/plot_radon_transform.py 2018-05-29 01:27:44.000000000 +0000 @@ -67,8 +67,8 @@ from skimage import data_dir from skimage.transform import radon, rescale -image = imread(data_dir + "/phantom.png", as_grey=True) -image = rescale(image, scale=0.4, mode='reflect') +image = imread(data_dir + "/phantom.png", as_gray=True) +image = rescale(image, scale=0.4, mode='reflect', multichannel=False) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5)) @@ -109,8 +109,7 @@ imkwargs = dict(vmin=-0.2, vmax=0.2) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5), - sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) + sharex=True, sharey=True) ax1.set_title("Reconstruction\nFiltered back projection") ax1.imshow(reconstruction_fbp, cmap=plt.cm.Greys_r) ax2.set_title("Reconstruction error\nFiltered back projection") @@ -158,8 +157,7 @@ print('SART (1 iteration) rms reconstruction error: %.3g' % np.sqrt(np.mean(error**2))) -fig, axes = plt.subplots(2, 2, figsize=(8, 8.5), sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) +fig, axes = plt.subplots(2, 2, figsize=(8, 8.5), sharex=True, sharey=True) ax = axes.ravel() ax[0].set_title("Reconstruction\nSART") diff -Nru skimage-0.13.1/doc/examples/transform/plot_register_translation.py skimage-0.14.0/doc/examples/transform/plot_register_translation.py --- skimage-0.13.1/doc/examples/transform/plot_register_translation.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/transform/plot_register_translation.py 2018-05-29 01:27:44.000000000 +0000 @@ -34,8 +34,8 @@ shift, error, diffphase = register_translation(image, offset_image) fig = plt.figure(figsize=(8, 3)) -ax1 = plt.subplot(1, 3, 1, adjustable='box-forced') -ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1, adjustable='box-forced') +ax1 = plt.subplot(1, 3, 1) +ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1) ax3 = plt.subplot(1, 3, 3) ax1.imshow(image, cmap='gray') @@ -62,8 +62,8 @@ shift, error, diffphase = register_translation(image, offset_image, 100) fig = plt.figure(figsize=(8, 3)) -ax1 = plt.subplot(1, 3, 1, adjustable='box-forced') -ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1, adjustable='box-forced') +ax1 = plt.subplot(1, 3, 1) +ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1) ax3 = plt.subplot(1, 3, 3) ax1.imshow(image, cmap='gray') diff -Nru skimage-0.13.1/doc/examples/transform/plot_rescale.py skimage-0.14.0/doc/examples/transform/plot_rescale.py --- skimage-0.13.1/doc/examples/transform/plot_rescale.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/doc/examples/transform/plot_rescale.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,54 @@ +""" +============================== +Rescale, resize, and downscale +============================== + +`Rescale` operation resizes an image by a given scaling factor. The scaling +factor can either be a single floating point value, or multiple values - one +along each axis. + +`Resize` serves the same purpose, but allows to specify an output image shape +instead of a scaling factor. + +Note that when down-sampling an image, `resize` and `rescale` should perform +Gaussian smoothing to avoid aliasing artifacts. See the `anti_aliasing` and +`anti_aliasing_sigma` arguments to these functions. + +`Downscale` serves the purpose of down-sampling an n-dimensional image by +integer factors using the local mean on the elements of each block of the size +factors given as a parameter to the function. + +""" + +import matplotlib.pyplot as plt + +from skimage import data, color +from skimage.transform import rescale, resize, downscale_local_mean + +image = color.rgb2gray(data.astronaut()) + +image_rescaled = rescale(image, 1.0 / 4.0, anti_aliasing=False) +image_resized = resize(image, (image.shape[0] / 4, image.shape[1] / 4), + anti_aliasing=True) +image_downscaled = downscale_local_mean(image, (4, 3)) + +fig, axes = plt.subplots(nrows=2, ncols=2) + +ax = axes.ravel() + +ax[0].imshow(image, cmap='gray') +ax[0].set_title("Original image") + +ax[1].imshow(image_rescaled, cmap='gray') +ax[1].set_title("Rescaled image (aliasing)") + +ax[2].imshow(image_resized, cmap='gray') +ax[2].set_title("Resized image (no aliasing)") + +ax[3].imshow(image_downscaled, cmap='gray') +ax[3].set_title("Downscaled image (no aliasing)") + +ax[0].set_xlim(0, 512) +ax[0].set_ylim(512, 0) +plt.tight_layout() +plt.show() diff -Nru skimage-0.13.1/doc/examples/transform/plot_ssim.py skimage-0.14.0/doc/examples/transform/plot_ssim.py --- skimage-0.13.1/doc/examples/transform/plot_ssim.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/transform/plot_ssim.py 2018-05-29 01:27:44.000000000 +0000 @@ -41,8 +41,7 @@ img_const = img + abs(noise) fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(10, 4), - sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) + sharex=True, sharey=True) ax = axes.ravel() mse_none = mse(img, img) diff -Nru skimage-0.13.1/doc/examples/transform/plot_swirl.py skimage-0.14.0/doc/examples/transform/plot_swirl.py --- skimage-0.13.1/doc/examples/transform/plot_swirl.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/transform/plot_swirl.py 2018-05-29 01:27:44.000000000 +0000 @@ -75,8 +75,7 @@ swirled = swirl(image, rotation=0, strength=10, radius=120) fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, figsize=(8, 3), - sharex=True, sharey=True, - subplot_kw={'adjustable':'box-forced'}) + sharex=True, sharey=True) ax0.imshow(image, cmap=plt.cm.gray, interpolation='none') ax0.axis('off') diff -Nru skimage-0.13.1/doc/examples/xx_applications/plot_coins_segmentation.py skimage-0.14.0/doc/examples/xx_applications/plot_coins_segmentation.py --- skimage-0.13.1/doc/examples/xx_applications/plot_coins_segmentation.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/xx_applications/plot_coins_segmentation.py 2018-05-29 01:27:44.000000000 +0000 @@ -20,7 +20,7 @@ axes[0].imshow(coins, cmap=plt.cm.gray, interpolation='nearest') axes[0].axis('off') axes[1].plot(hist[1][:-1], hist[0], lw=2) -axes[1].set_title('histogram of grey values') +axes[1].set_title('histogram of gray values') ###################################################################### # @@ -28,7 +28,7 @@ # ============ # # A simple way to segment the coins is to choose a threshold based on the -# histogram of grey values. Unfortunately, thresholding this image gives a +# histogram of gray values. Unfortunately, thresholding this image gives a # binary image that either misses significant parts of the coins or merges # parts of the background with the coins: @@ -42,7 +42,6 @@ for a in axes: a.axis('off') - a.set_adjustable('box-forced') plt.tight_layout() @@ -62,7 +61,6 @@ ax.imshow(edges, cmap=plt.cm.gray, interpolation='nearest') ax.set_title('Canny detector') ax.axis('off') -ax.set_adjustable('box-forced') ###################################################################### # These contours are then filled using mathematical morphology. @@ -89,7 +87,6 @@ ax.imshow(coins_cleaned, cmap=plt.cm.gray, interpolation='nearest') ax.set_title('removing small objects') ax.axis('off') -ax.set_adjustable('box-forced') ###################################################################### # However, this method is not very robust, since contours that are not @@ -110,21 +107,19 @@ ax.imshow(elevation_map, cmap=plt.cm.gray, interpolation='nearest') ax.set_title('elevation map') ax.axis('off') -ax.set_adjustable('box-forced') ###################################################################### # Next we find markers of the background and the coins based on the extreme -# parts of the histogram of grey values. +# parts of the histogram of gray values. markers = np.zeros_like(coins) markers[coins < 30] = 1 markers[coins > 150] = 2 fig, ax = plt.subplots(figsize=(4, 3)) -ax.imshow(markers, cmap=plt.cm.spectral, interpolation='nearest') +ax.imshow(markers, cmap=plt.cm.nipy_spectral, interpolation='nearest') ax.set_title('markers') ax.axis('off') -ax.set_adjustable('box-forced') ###################################################################### # Finally, we use the watershed transform to fill regions of the elevation @@ -136,7 +131,6 @@ ax.imshow(segmentation, cmap=plt.cm.gray, interpolation='nearest') ax.set_title('segmentation') ax.axis('off') -ax.set_adjustable('box-forced') ###################################################################### # This last method works even better, and the coins can be segmented and @@ -155,6 +149,7 @@ for a in axes: a.axis('off') - a.set_adjustable('box-forced') plt.tight_layout() + +plt.show() diff -Nru skimage-0.13.1/doc/examples/xx_applications/plot_geometric.py skimage-0.14.0/doc/examples/xx_applications/plot_geometric.py --- skimage-0.13.1/doc/examples/xx_applications/plot_geometric.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/xx_applications/plot_geometric.py 2018-05-29 01:27:44.000000000 +0000 @@ -113,3 +113,5 @@ a.axis('off') plt.tight_layout() + +plt.show() diff -Nru skimage-0.13.1/doc/examples/xx_applications/plot_haar_extraction_selection_classification.py skimage-0.14.0/doc/examples/xx_applications/plot_haar_extraction_selection_classification.py --- skimage-0.13.1/doc/examples/xx_applications/plot_haar_extraction_selection_classification.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/doc/examples/xx_applications/plot_haar_extraction_selection_classification.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,176 @@ +""" +====================================================== +Face classification using Haar-like feature descriptor +====================================================== + +Haar-like feature descriptors were successfully used to implement the first +real-time face detector [1]_. Inspired by this application, we propose an +example illustrating the extraction, selection, and classification of Haar-like +features to detect faces vs. non-faces. + +Notes +----- + +This example relies on scikit-learn to select and classify features. + +References +---------- + +.. [1] Viola, Paul, and Michael J. Jones. "Robust real-time face + detection." International journal of computer vision 57.2 + (2004): 137-154. + http://www.merl.com/publications/docs/TR2004-043.pdf + DOI: 10.1109/CVPR.2001.990517 + +""" +from __future__ import division, print_function +from time import time + +import numpy as np +import matplotlib.pyplot as plt + +from dask import delayed, threaded, multiprocessing + +from sklearn.ensemble import RandomForestClassifier +from sklearn.model_selection import train_test_split +from sklearn.metrics import roc_auc_score + +from skimage.data import lfw_subset +from skimage.transform import integral_image +from skimage.feature import haar_like_feature +from skimage.feature import haar_like_feature_coord +from skimage.feature import draw_haar_like_feature + +############################################################################### +# The usual feature extraction scheme +############################################################################### +# The procedure to extract the Haar-like feature for an image is quite easy: a +# region of interest (ROI) is defined for which all possible feature will be +# extracted. The integral image of this ROI will be computed and all possible +# features will be computed. + + +@delayed +def extract_feature_image(img, feature_type, feature_coord=None): + """Extract the haar feature for the current image""" + ii = integral_image(img) + return haar_like_feature(ii, 0, 0, ii.shape[0], ii.shape[1], + feature_type=feature_type, + feature_coord=feature_coord) + + +############################################################################### +# We will use a subset of the CBCL which is composed of 100 face images and 100 +# non-face images. Each image has been resized to a ROI of 19 by 19 pixels. We +# will keep 75 images from each group to train a classifier and check which +# extracted features are the most salient, and use the remaining 25 from each +# class to check the performance of the classifier. + +images = lfw_subset() +# For speed, only extract the two first types of features +feature_types = ['type-2-x', 'type-2-y'] + +# Build a computation graph using dask. This allows using multiple CPUs for +# the computation step +X = delayed(extract_feature_image(img, feature_types) + for img in images) +# Compute the result using the "multiprocessing" dask backend +t_start = time() +X = np.array(X.compute(get=multiprocessing.get)) +time_full_feature_comp = time() - t_start +y = np.array([1] * 100 + [0] * 100) +X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=150, + random_state=0, + stratify=y) + +# Extract all possible features to be able to select the most salient. +feature_coord, feature_type = \ + haar_like_feature_coord(width=images.shape[2], height=images.shape[1], + feature_type=feature_types) + +############################################################################### +# A random forest classifier can be trained in order to select the most salient +# features, specifically for face classification. The idea is to check which +# features are the most often used by the ensemble of trees. By using only +# the most salient features in subsequent steps, we can dramatically speed up +# computation, while retaining accuracy. + +# Train a random forest classifier and check performance +clf = RandomForestClassifier(n_estimators=1000, max_depth=None, + max_features=100, n_jobs=-1, random_state=0) +t_start = time() +clf.fit(X_train, y_train) +time_full_train = time() - t_start +auc_full_features = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1]) + +# Sort features in order of importance, plot six most significant +idx_sorted = np.argsort(clf.feature_importances_)[::-1] + +fig, axes = plt.subplots(3, 2) +for idx, ax in enumerate(axes.ravel()): + image = images[0] + image = draw_haar_like_feature(image, 0, 0, + images.shape[2], + images.shape[1], + [feature_coord[idx_sorted[idx]]]) + ax.imshow(image) + ax.set_xticks([]) + ax.set_yticks([]) + +fig.suptitle('The most important features') + +############################################################################### +# We can select the most important features by checking the cumulative sum of +# the feature importance index; below, we keep features representing 70% of the +# cumulative value which represent only 3% of the total number of features. + +cdf_feature_importances = np.cumsum(clf.feature_importances_[idx_sorted]) +cdf_feature_importances /= np.max(cdf_feature_importances) +sig_feature_count = np.count_nonzero(cdf_feature_importances < 0.7) +sig_feature_percent = round(sig_feature_count / + len(cdf_feature_importances) * 100, 1) +print(('{} features, or {}%, account for 70% of branch points in the random ' + 'forest.').format(sig_feature_count, sig_feature_percent)) + +# Select the most informative features +selected_feature_coord = feature_coord[idx_sorted[:sig_feature_count]] +selected_feature_type = feature_type[idx_sorted[:sig_feature_count]] +# Note: we could select those features from the +# original matrix X but we would like to emphasize the usage of `feature_coord` +# and `feature_type` to recompute a subset of desired features. + +# Delay the computation and build the graph using dask +X = delayed(extract_feature_image(img, selected_feature_type, + selected_feature_coord) + for img in images) +# Compute the result using the *threaded* backend: +# When computing all features, the Python GIL is acquired to process each ROI, +# and this is where most of the time is spent, so multiprocessing is faster. +# For this small subset, most of the time is spent on the feature computation +# rather than the ROI scanning, and using threaded is *much* faster, because +# we avoid the overhead of launching a new process. +t_start = time() +X = np.array(X.compute(get=threaded.get)) +time_subs_feature_comp = time() - t_start +y = np.array([1] * 100 + [0] * 100) +X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=150, + random_state=0, + stratify=y) + +############################################################################### +# Once the features are extracted, we can train and test the a new classifier. + +t_start = time() +clf.fit(X_train, y_train) +time_subs_train = time() - t_start + +auc_subs_features = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1]) + +summary = (('Computing the full feature set took {:.3f}s, plus {:.3f}s ' + 'training, for an AUC of {:.2f}. Computing the restricted feature ' + 'set took {:.3f}s, plus {:.3f}s training, for an AUC of {:.2f}.') + .format(time_full_feature_comp, time_full_train, auc_full_features, + time_subs_feature_comp, time_subs_train, auc_subs_features)) + +print(summary) +plt.show() diff -Nru skimage-0.13.1/doc/examples/xx_applications/plot_morphology.py skimage-0.14.0/doc/examples/xx_applications/plot_morphology.py --- skimage-0.13.1/doc/examples/xx_applications/plot_morphology.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/xx_applications/plot_morphology.py 2018-05-29 01:27:44.000000000 +0000 @@ -22,7 +22,7 @@ To get started, let's load an image using ``io.imread``. Note that morphology -functions only work on gray-scale or binary images, so we set ``as_grey=True``. +functions only work on gray-scale or binary images, so we set ``as_gray=True``. """ import os @@ -32,7 +32,7 @@ from skimage import io orig_phantom = img_as_ubyte(io.imread(os.path.join(data_dir, "phantom.png"), - as_grey=True)) + as_gray=True)) fig, ax = plt.subplots() ax.imshow(orig_phantom, cmap=plt.cm.gray) @@ -47,11 +47,9 @@ ax1.imshow(original, cmap=plt.cm.gray) ax1.set_title('original') ax1.axis('off') - ax1.set_adjustable('box-forced') ax2.imshow(filtered, cmap=plt.cm.gray) ax2.set_title(filter_name) ax2.axis('off') - ax2.set_adjustable('box-forced') ###################################################################### # Erosion @@ -193,7 +191,7 @@ #*single-pixel wide skeleton*. It is important to note that this is #performed on binary images only. -horse = io.imread(os.path.join(data_dir, "horse.png"), as_grey=True) +horse = io.imread(os.path.join(data_dir, "horse.png"), as_gray=True) sk = skeletonize(horse == 0) plot_comparison(horse, sk, 'skeletonize') @@ -242,3 +240,5 @@ # /ImageProcessing-html/topic4.htm>`_ # # 3. http://en.wikipedia.org/wiki/Mathematical_morphology + +plt.show() diff -Nru skimage-0.13.1/doc/examples/xx_applications/plot_rank_filters.py skimage-0.14.0/doc/examples/xx_applications/plot_rank_filters.py --- skimage-0.13.1/doc/examples/xx_applications/plot_rank_filters.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/xx_applications/plot_rank_filters.py 2018-05-29 01:27:44.000000000 +0000 @@ -87,7 +87,6 @@ for a in ax: a.axis('off') - a.set_adjustable('box-forced') plt.tight_layout() @@ -120,7 +119,6 @@ for a in ax: a.axis('off') - a.set_adjustable('box-forced') plt.tight_layout() @@ -158,7 +156,6 @@ for a in ax: a.axis('off') - a.set_adjustable('box-forced') plt.tight_layout() @@ -239,7 +236,6 @@ for a in ax: a.axis('off') - a.set_adjustable('box-forced') plt.tight_layout() @@ -284,7 +280,6 @@ ax[i].imshow(image_list[i], cmap=plt.cm.gray, vmin=0, vmax=255) ax[i].set_title(title_list[i]) ax[i].axis('off') - ax[i].set_adjustable('box-forced') plt.tight_layout() @@ -315,7 +310,6 @@ for a in ax: a.axis('off') - a.set_adjustable('box-forced') plt.tight_layout() @@ -345,7 +339,6 @@ for a in ax: a.axis('off') - a.set_adjustable('box-forced') plt.tight_layout() @@ -404,7 +397,6 @@ for a in ax: a.axis('off') - a.set_adjustable('box-forced') plt.tight_layout() @@ -431,7 +423,6 @@ for a in ax: a.axis('off') - a.set_adjustable('box-forced') plt.tight_layout() @@ -472,7 +463,6 @@ for a in ax: a.axis('off') - a.set_adjustable('box-forced') plt.tight_layout() @@ -513,7 +503,6 @@ for a in ax: a.axis('off') - a.set_adjustable('box-forced') plt.tight_layout() @@ -661,7 +650,6 @@ for a in ax: a.axis('off') - a.set_adjustable('box-forced') plt.tight_layout() @@ -689,3 +677,5 @@ ax.set_xlabel('Image size') plt.tight_layout() + +plt.show() diff -Nru skimage-0.13.1/doc/examples/xx_applications/plot_thresholding.py skimage-0.14.0/doc/examples/xx_applications/plot_thresholding.py --- skimage-0.13.1/doc/examples/xx_applications/plot_thresholding.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/examples/xx_applications/plot_thresholding.py 2018-05-29 01:27:44.000000000 +0000 @@ -121,9 +121,9 @@ fig, axes = plt.subplots(ncols=3, figsize=(8, 2.5)) ax = axes.ravel() -ax[0] = plt.subplot(1, 3, 1, adjustable='box-forced') +ax[0] = plt.subplot(1, 3, 1) ax[1] = plt.subplot(1, 3, 2) -ax[2] = plt.subplot(1, 3, 3, sharex=ax[0], sharey=ax[0], adjustable='box-forced') +ax[2] = plt.subplot(1, 3, 3, sharex=ax[0], sharey=ax[0]) ax[0].imshow(image, cmap=plt.cm.gray) ax[0].set_title('Original') @@ -208,8 +208,7 @@ threshold_global_otsu = threshold_otsu(img) global_otsu = img >= threshold_global_otsu -fig, axes = plt.subplots(2, 2, figsize=(8, 5), sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) +fig, axes = plt.subplots(2, 2, figsize=(8, 5), sharex=True, sharey=True) ax = axes.ravel() plt.tight_layout() diff -Nru skimage-0.13.1/doc/release/release_0.13.rst skimage-0.14.0/doc/release/release_0.13.rst --- skimage-0.13.1/doc/release/release_0.13.rst 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/release/release_0.13.rst 2018-05-29 01:27:44.000000000 +0000 @@ -1,6 +1,3 @@ -Announcement: scikit-image 0.13.1 -================================= - scikit-image 0.13.1 is a bug-fix and compatibility update. See below for the many new features in 0.13.0. diff -Nru skimage-0.13.1/doc/release/release_0.14.rst skimage-0.14.0/doc/release/release_0.14.rst --- skimage-0.13.1/doc/release/release_0.14.rst 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/doc/release/release_0.14.rst 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,412 @@ +Announcement: scikit-image 0.14.0 +================================= + +We're happy to announce the release of scikit-image v0.14.0! + +scikit-image is an image processing toolbox for SciPy that includes algorithms +for segmentation, geometric transformations, color space manipulation, +analysis, filtering, morphology, feature detection, and more. + +This is the last major release with official support for Python 2.7. Future +releases will be developed using Python 3-only syntax. + +However, 0.14 is a long-time support (LTS) release and will receive bug fixes +and backported features deemed important (by community demand) for two years +(till the end of maintenance of Python 2.7; see PEP 373 for the details). + +For more information, examples, and documentation, please visit our website: + +http://scikit-image.org + + +New Features +------------ +- Lookfor function to search across the library: ``skimage.lookfor``. (#2713) +- nD support for ``skimage.transform.rescale``, ``skimage.transform.resize``, + and ``skimage.transform.pyramid_*`` transforms. (#1522) +- Chan-Vese segmentation algorithm. (#1957) +- Manual segmentation with matplotlib for fast data annotation: + ``skimage.future.manual_polygon_segmentation``, + ``skimage.future.manual_lasso_segmentation``. (#2584) +- Hysteresis thresholding: + ``skimage.filters.apply_hysteresis_threshold``. (#2665) +- Segmentation with morphological snakes: + ``skimage.segmentation.morphological_chan_vese`` (2D), + ``skimage.segmentation.morphological_geodesic_active_contour`` (2D and 3D). (#2791) +- nD support for image moments: ``skimage.measure.moments_central``, + ``skimage.measure.moments_central``, ``skimage.measure.moments_normalized``, + ``skimage.measure.moments_hu``. This change leads to 3D/nD compatibility for + many regionprops. (#2603) +- Image moments from coordinate input: ``skimage.measure.moments_coords``, + ``skimage.measure.moments_coords_central``. (#2859) +- Added 3D support to ``blob_dog`` and ``blob_log``. (#2854) +- Inertia tensor and its eigenvalues can now be computed outside of + regionprops; available in ``skimage.measure.inertia_tensor``. (#2603) +- Cycle-spinning function for approximating shift-invariance by averaging + results from a series of spatial shifts: + ``skimage.restoration.cycle_spin``. (#2647) +- Haar-like feature: ``skimage.feature.haar_like_feature``, + ``skimage.feature.haar_like_feature_coord``, + ``skimage.feature.draw_haar_like_feature``. (#2848) +- Data generation with random_shapes function: + ``skimage.draw.random_shapes``. (#2773) +- Subset of LFW (Labeled Faces in the Wild) database: + ``skimage.data.cbcl_face_database``. (#2905) +- Fully reworked montage function (now with a better padding behavior): + ``skimage.util.montage``. (#2626) +- YDbDr colorspace conversion routines: ``skimage.color.rgb2ydbdr``, + ``skimage.color.ydbdr2rgb``. (#3018) + + +Improvements +------------ +- ``VisuShrink`` method for ``skimage.restoration.denoise_wavelet``. (#2470) +- New ``max_ratio`` parameter for ``skimage.feature.match_descriptors``. (#2472) +- ``skimage.transform.resize`` and ``skimage.transform.rescale`` have a new + ``anti_aliasing`` option to avoid aliasing artifacts when down-sampling + images. (#2802) +- Support for multichannel images for ``skimage.feature.hog``. (#2870) +- Non-local means denoising (``skimage.restoration.denoise_nl_means``) has + a new optional parameter, ``sigma``, that can be used to specify the noise + standard deviation. This enables noise-robust patch distance estimation. (#2890) +- Mixed dtypes support for ``skimage.measure.compare_ssim``, + ``skimage.measure.compare_psnr``, etc. (#2893) +- New ``alignment`` parameter in ``skimage.feature.plot_matches``. (#2955) +- New ``seed`` parameter in ``skimage.transform.probabilistic_hough_line``. (#2960) +- Various performance improvements. (#2821, #2878, #2967, #3035, #3056, #3100) + + +Bugfixes +-------- +- Fixed ``skimage.measure.regionprops.bbox_area`` returning incorrect value. (#2837) +- Changed gradient and L2-Hys norm computation in ``skimage.feature.hog`` + to closely follow the paper. (#2864) +- Fixed ``skimage.color.convert_colorspace`` not working for YCbCr, YPbPr. (#2780) +- Fixed incorrect composition of projective tranformation with inverse transformation. (#2826) +- Fixed bug in random walker appearing when seed pixels are isolated inside pruned zones. (#2946) +- Fixed ``rescale`` not working properly with different rescale factors in multichannel case. (#2959) +- Fixed float and integer dtype support in ``skimage.util.invert``. (#3030) +- Fixed ``skimage.measure.find_contours`` raising StopIteration on Python 3.7. (#3038) +- Fixed platform-specific issues appearing in Windows and/or 32-bit environments. (#2867, #3033) + + +API Changes +----------- +- ``skimage.util.montage.`` namespace has been removed, and + ``skimage.util.montage.montage2d`` function is now available as + ``skimage.util.montage2d``. +- ``skimage.morphology.binary_erosion`` now uses ``True`` as border + value, and is now consistent with ``skimage.morphology.erosion``. + + +Deprecations +------------ +- ``freeimage`` plugin has been removed from ``skimage.io``. +- ``skimage.util.montage2d`` is deprecated and will be removed in 0.15. + Use ``skimage.util.montage`` function instead. +- ``skimage.novice`` is deprecated and will be removed in 0.16. +- ``skimage.transform.resize`` and ``skimage.transform.rescale`` have a new + ``anti_aliasing`` option that avoids aliasing artifacts when down-sampling + images. This option will be enabled by default in 0.15. +- ``regionprops`` will use row-column coordinates in 0.16. You can start + using them now with ``regionprops(..., coordinates='rc')``. You can silence + warning messages, and retain the old behavior, with + ``regionprops(..., coordinates='xy')``. However, that option will go away + in 0.16 and result in an error. This change has a number of consequences. + Specifically, the "orientation" region property will measure the + anticlockwise angle from a *vertical* line, i.e. from the vector (1, 0) in + row-column coordinates. +- ``skimage.morphology.remove_small_holes`` ``min_size`` argument is deprecated + and will be removed in 0.16. Use ``area_threshold`` instead. + + +Contributors to this release +---------------------------- + +- Alvin +- Norman Barker +- Brad Bazemore +- Leonid Bloch +- Benedikt Boecking +- Jirka Borovec +- François Boulogne +- Larry Bradley +- Robert Bradshaw +- Matthew Brett +- Floris van Breugel +- Alex Chum +- Yannick Copin +- Nethanel Elzas +- Kira Evans +- Christoph Gohlke +- GGoussar +- Jens Glaser +- Peter Goldsborough +- Emmanuelle Gouillart +- Ben Hadfield +- Mark Harfouche +- Scott Heatwole +- Gregory R. Lee +- Guillaume Lemaitre +- Theodore Lindsay +- Kevin Mader +- Jarrod Millman +- Vinicius Monego +- Pradyumna Narayana +- Juan Nunez-Iglesias +- Kesavan PS +- Egor Panfilov +- Oleksandr Pavlyk +- Justin Pinkney +- Robert Pollak +- Jonathan Reich +- Émile Robitaille +- RoseZhao +- Alex Rothberg +- Arka Sadhu +- Max Schambach +- Johannes Schönberger +- Sourav Singh +- Kesavan Subburam +- Matt Swain +- Saurav R. Tuladhar +- Nelle Varoquaux +- Viraj +- David Volgyes +- Stefan van der Walt +- Thomas Walter +- Scott Warchal +- Josh Warner +- Nicholas Weir +- Sera Yang +- Chiang, Yi-Yo +- corrado9999 +- ed1d1a8d +- eepaillard +- leaprovenzano +- mikigom +- mrastgoo +- mutterer +- pmneila +- timhok +- zhongzyd + + +We'd also like to thank all the people who contributed their time to perform the reviews: + +- Leonid Bloch +- Jirka Borovec +- François Boulogne +- Matthew Brett +- Thomas A Caswell +- Kira Evans +- Peter Goldsborough +- Emmanuelle Gouillart +- Almar Klein +- Gregory R. Lee +- Joan Massich +- Juan Nunez-Iglesias +- Faraz Oloumi +- Daniil Pakhomov +- Egor Panfilov +- Dan Schult +- Johannes Schönberger +- Steven Silvester +- Alexandre de Siqueira +- Nelle Varoquaux +- Stefan van der Walt +- Josh Warner +- Eric Wieser + + +Full list of changes +-------------------- +This release is the result of 14 months of work. +It contains the following 186 merged pull requests by 67 committers: + +- n-dimensional rescale, resize, and pyramid transforms (#1522) +- Segmentation: Implemention of a simple Chan-Vese Algorithm (#1957) +- JPEG quality argument in imsave (#2063) +- improve geometric models fitting (line, circle) using LSM (#2433) +- Improve input parameter handling in `_sift_read` (#2452) +- Remove broken test in `_shared/tests/test_interpolation.py` (#2454) +- [MRG] Pytest migration (#2468) +- Add VisuShrink method for `denoise_wavelet` (#2470) +- Ratio test for descriptor matching (#2472) +- Make HOG visualization use midpoints of orientation bins (#2525) +- DOC: Add example for rescaling/resizing/downscaling (#2560) +- Gallery random walker: Rescale image range to -1, 1 (#2575) +- Update conditional requirement for PySide (#2578) +- Add configuration file for `pep8_speaks` (#2579) +- Manual segmentation tool with matplotlib (#2584) +- Website updates (documentation build) (#2585) +- Update the release process notes (#2593) +- Defer matplotlib imports (#2596) +- Spelling: replaces colour by color (#2598) +- Add nD support to image moments computation (#2603) +- Set xlim and ylim in rescale gallery example (#2606) +- Reduce runtime of local_maxima gallery example (#2608) +- MAINT _shared.testing now contains pytest's useful functions (#2614) +- error message misspelled, integral to integer (#2615) +- Respect standard notations for images in functions arguments (#2617) +- MAINT: remove unused argument in private inpainting function (#2618) +- MAINT: some minor edits on Chan Vese segmentation (#2619) +- Fix UserWarning: Unknown section Example (#2620) +- Eliminate some TODOs for 0.14 (#2621) +- Clean up and fix bug in ssim tests (#2622) +- Add padding_width to montage2d and add montage_rgb (#2626) +- Add tests covering erroneous input to morphology.watershed (#2631) +- Fix name of code coverage tool (#2638) +- MAINT: Remove undefined attributes in skimage.filters (#2643) +- Improve the support for 1D images in `color.gray2rgb` (#2645) +- ENH: add cycle spinning routine (#2647) +- as_gray replaces as_grey in imread() and load() (#2652) +- Fix AppVeyor pytest execution (#2658) +- More TODOs for 0.14 (#2659) +- pin sphinx to <1.6 (#2662) +- MAINT: use relative imports instead of absolute ones (#2664) +- Add hysteresis thresholding function (#2665) +- Improve hysteresis docstring (#2669) +- Add helper functions img_as_float32 and img_as_float64 (#2673) +- Remove unnecessary assignment in pxd file. (#2683) +- Unused var and function call in documentation example (#2684) +- Make `imshow_collection` to plot images on a grid of convenient aspect ratio (#2689) +- Fix typo in Chan-Vese docstrings (#2692) +- Fix data type error with marching_cubes_lewiner(allow_degenerate=False) (#2694) +- Add handling for uniform arrays when finding local extrema. (#2699) +- Avoid uneccesary copies in skimage.morphology.label (#2701) +- Deprecate `visualise` in favor of `visualize` in `skimage.feature.hog` (#2705) +- Remove alpha channel when saving to jpg format (#2706) +- Tweak in-place installation instructions (#2712) +- Add `skimage.lookfor` function (#2713) +- Speedup image dtype conversion by switching to `asarray` (#2715) +- MAINT reorganizing CI-related scripts (#2718) +- added rect function to draw module (#2719) +- Remove duplicate parameter in `skimage.io.imread` docstring (#2725) +- Add support for 1D arrays for grey erosion (#2727) +- Build with Xcode 9 beta 3, MacOS 10.12 (#2730) +- Travis docs one platform (#2732) +- Install documentation build requirements on Travis-CI (#2737) +- Add reference papers for `restoration.inpaint_biharmonic` (#2738) +- Completely remove `freeimage` plugin from `skimage.io` (#2744) +- Implementation and test fix for shannon_entropy calculation. (#2749) +- Minor cleanup (#2750) +- Add notes on testing to CONTRIBUTING (#2751) +- Update OSX install script (#2752) +- fix bug in horizontal seam_carve and seam_carve test. issue :#2545 (#2754) +- Recommend merging instead of rebasing, to lower contribution barrier (#2757) +- updated second link, first link still has paywall (#2768) +- DOC: set_color docstring, in-place said explicitly (#2771) +- Add module for generating random, labeled shapes (#2773) +- Ignore known failures (#2774) +- Update testdoc (#2775) +- Remove bento support (#2776) +- AppVeyor supports dot-file-style (#2779) +- Fix bug in `color.convert_colorspace` for YCbCr, YPbPr (#2780) +- Reorganizing requirements (#2781) +- WIP: Deal with long running command on travis (#2782) +- Deprecate the novice module (#2742) (#2784) +- Document mentioning deprecations in the release notes (#2785) +- [WIP] FIX Swirl center coordinates are reversed (#2790) +- Implementation of the Morphological Snakes (#2791) +- Merge TASKS.txt with CONTRIBUTING.txt (#2800) +- Add Gaussian filter-based antialiasing to resize (#2802) +- Add morphological snakes to release notes (#2803) +- Return empty array if hough_line_peaks detects nothing (#2805) +- Add W503 to pep8speaks ignore. (#2816) +- Slice PIL palette correctly using extreme image value. (#2818) +- Move INSTALL to top-level (#2819) +- Make simple watershed fast again (#2821) +- The gallery now points to the stable docs (#2822) +- Adapt AppVeyor to use Python.org dist, and remove install script (#2823) +- Remove pytest yield (#2824) +- Bug fix in projective tranformation composition with inverse transformation (#2826) +- FIX: add estimate_sigma to __all__ in restoration module (#2829) +- Switch from LaTeX to MathJax in doc build (#2832) +- Docstring fixes for better formula formatting (#2834) +- Fix regionprops.bbox_area bug (#2837) +- MAINT: add Python 3.6 to appveyor, small edits (#2840) +- Allow convex area calculation in 3D for regionprops (#2847) +- [MRG] DOC fix documentation build (#2851) +- Change default args from list to tuple in `feature.draw_multiblock_lbp` (#2852) +- Add 3D support to `blob_dog` and `blob_log` (#2854) +- Update compare_nrmse docstring (#2855) +- Fix link order in example (#2858) +- Add Computation of Image Moments to Coordinates (#2859) +- Revert gradient formula, modify the deprecation warning, and fix L2-Hys norm in `skimage.feature.hog` (#2864) +- OverflowError: Python int too large to convert to C long on win-amd64-py2.7 (#2867) +- Fix `skimage.measure.centroid` and add test coverage (#2869) +- Add multichannel support to `feature.hog` (#2870) +- Remove scipy version check in `active_contour` (#2871) +- Update DOI reference in `measure.compare_ssim` (#2872) +- Fix randomness and expected ranges for RGB in `test_random_shapes`. (#2877) +- Nl means fixes for large datasets (#2878) +- Make `test_random_shapes` use internally shipped testing tools (#2879) +- DOC: Update docstring for is_low_constrast to match function signature (#2883) +- Update URL in RAG docstring (#2885) +- Fix spelling typo in NL means docstring (#2887) +- noise-robust patch distance estimation for non-local means (#2890) +- Allow mixed dtypes in compare_ssim, compare_psnr, etc. (#2893) +- EHN add Haar-like feature (#2896) +- Add CBCL face database subset to `skimage.data` (#2897) +- EXA example for haar like features (#2898) +- Install documentation dependencies on all builds (#2900) +- Improve LineModelND doc strings (#2903) +- Add a subset of LFW dataset to `skimage.data` (#2905) +- Update default parameter values in the docstring of `skimage.restoration.unsupervised_wiener` (#2906) +- Revert "Add CBCL face database subset to `skimage.data`" (#2907) +- remove unused parameter 'n_segments' in `_enforce_label_connectivity_cython()` (#2908) +- Update six version to make pytest_cov work (#2909) +- Fix typos in `draw._random_shapes._generate_triangle_mask` docstring (#2914) +- do not assume 3 channels during non-local means denoising (#2922) +- add missing cdef in _integral_image_3d (non-local means) (#2923) +- Replace `morphology.remove_small_holes` argument `min_size` with `area_threshold` (#2924) +- Ensure warning to provide bool array is warranted (#2930) +- Remove copyright notice with permission of the author (Thomas Lewiner) (#2932) +- Fix link to Windows binaries in README. (#2934) +- Handle NumPy 1.14 API changes (#2935) +- Specify `gradient` parameter docstring in `compare_ssim` (#2937) +- Fixed broken link on LBP documentation (#2941) +- Corrected bug related to border value of morphology.binary_erosion (#2945) +- Correct bug in random walker when seed pixels are isolated inside pruned zones (#2946) +- Fix Cython compilation warnings in NL Means and Watershed (#2947) +- Add `alignment` parameter to `feature.plot_matches` (#2955) +- Raise warning when attempting to save boolean image (#2957) +- Allow different rescale factors in multichannel warp (#2959) +- Add seed parameter to probabilistic_hough_line (#2960) +- Minor style fixes for #2946 (#2961) +- Build on fewer AppVeyor platforms to avoid timeout (#2962) +- Watershed segmentation: make usable for large arrays (#2967) +- Mark data_range as being a float (#2971) +- Use correct NumPy version comparison in pytest configuration (#2975) +- Handle matplotlib 2.2 pre-release deprecations (#2977) +- Bugfix LineModelND.residuals does not use the optional parameter `params` (#2979) +- Return empty list on flat images with hough_ellipse #2820 (#2996) +- Add release notes for 0.13.1 (#2999) +- MAINT: PIL removed saving RGBA images as jpeg files (#3004) +- Ensure stdev is always nonnegative in _mean_std (#3008) +- Add citation information to README (#3013) +- Add YDbDr colorspace conversion routines (#3018) +- Minor style and documentation updates for #2859 (#3023) +- `draw.random_shapes` API improvements (#3029) +- Type dependent inversion (#3030) +- Fix ValueError: Buffer dtype mismatch, expected 'int64_t' but got 'int' on win_amd64 (#3033) +- Replace pow function calls in Cython modules to fix performance issues on Windows (#3035) +- Add __pycache__ and .cache to .gitignore. (#3037) +- Fix RuntimeError: generator raised StopIteration on Python 3.7 (#3038) +- Fix invert tests (#3039) +- Fix examples not displaying figures (#3040) +- Correct reference for the coins sample image (#3042) +- Switch to basis numpy int dtypes in dtype_range (#3050) +- speedup img_as_float by making division multiplication and avoiding unecessary allocation (#3056) +- For sparse CG solver, provide atol=0 keyword for SciPy >= 1.1 (#3063) +- Update dependencies and deprecations to fix Travis builds (#3072) +- Sanitizing marching_cubes_lewiner spacing input argument (#3074) +- Allow convex_hull_image on empty images (#3076) +- v0.13.x: Backport NumPy 1.14 compatibility (#3085) +- Force Appveyor to fail on failed tests (#3093) +- Add `threshold_local` to `filters` module namespace (#3096) +- Replace grey by gray where no deprecation is needed (#3098) +- Optimize _probabilistic_hough_line function (#3100) +- Rebuild docs upon deploy to ensure Javascript is generated (#3104) +- Fix random gallery script generation (#3106) diff -Nru skimage-0.13.1/doc/release/release_dev.rst skimage-0.14.0/doc/release/release_dev.rst --- skimage-0.13.1/doc/release/release_dev.rst 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/release/release_dev.rst 2018-05-29 01:27:44.000000000 +0000 @@ -33,6 +33,23 @@ Deprecations ------------ +- Python 2 support has been dropped in the development version. Users of the + development version should have Python >= 3.5. +- ``skimage.util.montage2d`` has been removed. Use ``skimage.util.montage`` instead. +- ``skimage.novice`` is deprecated and will be removed in 0.16. +- ``skimage.transform.resize`` and ``skimage.transform.rescale`` option + ``anti_aliasing`` has been enabled by default. +- ``regionprops`` will use row-column coordinates in 0.16. You can start + using them now with ``regionprops(..., coordinates='rc')``. You can silence + warning messages, and retain the old behavior, with + ``regionprops(..., coordinates='xy')``. However, that option will go away + in 0.16 and result in an error. This change has a number of consequences. + Specifically, the "orientation" region property will measure the + anticlockwise angle from a *vertical* line, i.e. from the vector (1, 0) in + row-column coordinates. +- ``skimage.morphology.remove_small_holes`` ``min_size`` argument is deprecated + and will be removed in 0.16. Use ``area_threshold`` instead. + Contributors to this release ---------------------------- diff -Nru skimage-0.13.1/doc/release/release_template.rst skimage-0.14.0/doc/release/release_template.rst --- skimage-0.13.1/doc/release/release_template.rst 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/release/release_template.rst 2018-05-29 01:27:44.000000000 +0000 @@ -33,7 +33,7 @@ Deprecations ------------ -- Python 2.6 support has been dropped. Minimal required Python version is 2.7. + Contributors to this release diff -Nru skimage-0.13.1/doc/source/api_changes.rst skimage-0.14.0/doc/source/api_changes.rst --- skimage-0.13.1/doc/source/api_changes.rst 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/source/api_changes.rst 2018-05-29 01:27:44.000000000 +0000 @@ -1,3 +1,31 @@ +Version 0.14 +------------ +- ``skimage.filters.gaussian_filter`` has been removed. Use + ``skimage.filters.gaussian`` instead. +- ``skimage.filters.gabor_filter`` has been removed. Use + ``skimage.filters.gabor`` instead. +- The old syntax support for ``skimage.transform.integrate`` has been removed. +- The ``normalise`` parameter of ``skimage.feature.hog`` was removed due to + incorrect behavior: it only applied a square root instead of a true + normalization. If you wish to duplicate the old behavior, set + ``transform_sqrt=True``. +- ``skimage.measure.structural_similarity`` has been removed. Use + ``skimage.measure.compare_ssim`` instead. +- In ``skimage.measure.compare_ssim``, the `dynamic_range` has been removed in + favor of '`data_range`. +- In ``skimage.restoration.denoise_bilateral``, the `sigma_range` kwarg has + been removed in favor of `sigma_color`. +- ``skimage.measure.marching_cubes`` has been removed in favor of + ``skimage.measure.marching_cubes_lewiner``. +- ``ntiles_*`` parameters have been removed from + ``skimage.exposure.equalize_adapthist``. Use ``kernel_size`` instead. +- ``skimage.restoration.nl_means_denoising`` has been removed in + favor of ``skimage.restoration.denoise_nl_means``. +- ``skimage.measure.LineModel`` has been removed in favor of + ``skimage.measure.LineModelND``. +- In ``skimage.feature.hog`` visualise has been changed to visualize. +- `freeimage` plugin of ``skimage.io`` has been removed. + Version 0.13 ------------ - `skimage.filter` has been removed. Use `skimage.filters` instead. diff -Nru skimage-0.13.1/doc/source/conf.py skimage-0.14.0/doc/source/conf.py --- skimage-0.13.1/doc/source/conf.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/source/conf.py 2018-05-29 01:27:44.000000000 +0000 @@ -28,7 +28,7 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.imgmath', + 'sphinx.ext.mathjax', 'numpydoc', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx', @@ -50,10 +50,7 @@ 'gallery_dirs' : 'auto_examples', 'backreferences_dir': 'api', 'reference_url' : { - 'skimage': None, - 'matplotlib': 'http://matplotlib.org', - 'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0', - 'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',} + 'skimage': None,} } # Determine if the matplotlib has a recent enough version of the @@ -388,5 +385,3 @@ else: return ("http://github.com/scikit-image/scikit-image/blob/" "v%s/skimage/%s%s" % (skimage.__version__, fn, linespec)) - - diff -Nru skimage-0.13.1/doc/source/contribute.rst skimage-0.14.0/doc/source/contribute.rst --- skimage-0.13.1/doc/source/contribute.rst 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/source/contribute.rst 2018-05-29 01:27:44.000000000 +0000 @@ -1,2 +1,8 @@ -.. include:: ../../TASKS.txt -.. include:: ../../.github/CONTRIBUTING.txt +.. toctree:: + :hidden: + + gitwash/index + gsoc2011 + cell_profiler + +.. include:: ../../CONTRIBUTING.txt diff -Nru skimage-0.13.1/doc/source/install.rst skimage-0.14.0/doc/source/install.rst --- skimage-0.13.1/doc/source/install.rst 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/source/install.rst 2018-05-29 01:27:44.000000000 +0000 @@ -1,160 +1 @@ -Installing scikit-image -======================= - -We are assuming that you have default Python environment already configured on your computer -and you intend to install ``scikit-image`` inside of it. If you want to create and work with Python -virtual environments, please follow instructions on `venv`_ and `virtual environments`_. - -There are two ways you can install ``scikit-image`` on your preferred Python environment. - -1. Standard Installation -2. Development Installation - -1. Standard Installation: -------------------------- - -``scikit-image`` comes pre-installed with several Python distributions, including Anaconda_, `Enthought Canopy`_, -`Python(x,y)`_ and `WinPython`_. However, you can install or upgrade existing ``scikit-image`` via -shell/command prompt. - -a. Windows -`````````` - -On Windows, you can install ``scikit-image`` using:: - - pip install scikit-image - -For Conda-based distributions (Anaconda, Miniconda), execute:: - - conda install scikit-image - -If you are using pure Python i.e. the distribution from python.org_, you'll need to -manually download packages (such as numpy, scipy and scikit-image) using Python wheels available from -`Christoph Gohlke's`_ website. You can install Python wheels using:: - - pip install SomePackage-1.0-py2.py3-none-any.whl - -.. _Anaconda: https://store.continuum.io/cshop/anaconda/ -.. _Enthought Canopy: https://www.enthought.com/products/canopy/ -.. _Python(x,y): http://python-xy.github.io/ -.. _WinPython: https://winpython.github.io/ - -b. Debian and Ubuntu -```````````````````` - -On Debian and Ubuntu, install ``scikit-image`` with:: - - sudo apt-get install python-skimage - -2. Development Installation: ----------------------------- - -You can install ``scikit-image`` development version if either your distribution ships an outdated version -or you want to develop and work on new features before the package is released officially. - -a. Windows -`````````` - -Before installing the development version, uninstall the standard version of ``scikit-image`` using pip as:: - - pip uninstall scikit-image - -or using conda (for Anaconda users) as:: - - conda uninstall scikit-image - -Now clone scikit-image on your local computer:: - - git clone https://github.com/scikit-image/scikit-image.git - -Change the directory and build from source code:: - - cd scikit-image - python setup.py develop - -If you experience the error ``Error:unable to find vcvarsall.bat`` it means that -your computer does not have recommended compilers for python. You can either download and -install Windows compilers from `here`_ or use `MinGW compilers`_ . If using `MinGW`, make sure to correctly -configure distutils by modifying (or create, if not existing) the configuration file -``distutils.cfg`` (located for example at ``C:\Python26\Lib\distutils\distutils.cfg``) -to contain:: - - [build] - compiler=mingw32 - -Once the build process is complete, run:: - - pip install -U -e . - -Make sure to give space after ``-e`` and add dot at the end. This will install scikit-image development version -and upgrade (or install) all the required dependencies. Otherwise, you can run the following command -to skip installation of dependencies:: - - pip install -U[--no-deps] -e . - -You can install or upgrade dependencies required for scikit-image anytime after installation using:: - - pip install -r requirements.txt --upgrade - -For more details on compiling in Windows, there is a lot of knowledge iterated -into the `setup of appveyor`_ (a continuous integration service). - -.. _miniconda: http://conda.pydata.org/miniconda.html -.. _python.org: http://python.org/ -.. _Christoph Gohlke's: http://www.lfd.uci.edu/~gohlke/pythonlibs/ -.. _setup of appveyor: https://github.com/scikit-image/scikit-image/blob/master/appveyor.yml -.. _here: https://wiki.python.org/moin/WindowsCompilers#Microsoft_Visual_C.2B-.2B-_14.0_standalone:_Visual_C.2B-.2B-_Build_Tools_2015_.28x86.2C_x64.2C_ARM.29 -.. _venv: https://docs.python.org/3/library/venv.html -.. _virtual environments: http://docs.python-guide.org/en/latest/dev/virtualenvs/ -.. _MinGW compilers: http://www.mingw.org/wiki/howto_install_the_mingw_gcc_compiler_suite - -b. Debian and Ubuntu -```````````````````` - -Install all the required dependencies:: - - sudo apt-get install python-matplotlib python-numpy python-pil python-scipy - -Get suitable compilers for successful installation:: - - sudo apt-get install build-essential cython - -Obtain the source from the git repository at -``http://github.com/scikit-image/scikit-image`` by running:: - - git clone https://github.com/scikit-image/scikit-image.git - -After unpacking, change into the source directory and execute:: - - pip install -e . - -To update:: - - git pull # Grab latest source - python setup.py build_ext -i # Compile any modified extensions - -c. Building with bento -`````````````````````` - -Alternatively, ``scikit-image`` can also be built using `bento -`__. Bento depends on `WAF -`__ for compilation. - -Follow the `Bento installation instructions -`__ and `download the WAF -source `__. - -Tell Bento where to find WAF by setting the ``WAFDIR`` environment variable:: - - export WAFDIR= - -From the ``scikit-image`` source directory:: - - bentomaker configure - bentomaker build -j # (add -i for in-place build) - bentomaker install # (when not building in-place) - -Depending on file permissions, the install commands may need to be run as -sudo. - -.. include:: ../../DEPENDS.txt +.. include:: ../../INSTALL.rst diff -Nru skimage-0.13.1/doc/source/random_gallery.py skimage-0.14.0/doc/source/random_gallery.py --- skimage-0.13.1/doc/source/random_gallery.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/source/random_gallery.py 2018-05-29 01:27:44.000000000 +0000 @@ -29,23 +29,28 @@ \ ''' -examples = glob.glob(os.path.join(example_dir, 'plot_*.py')) +examples = glob.glob(os.path.join(example_dir, '**/plot_*.py'), recursive=True) images, links = [], [] -image_url = 'http://scikit-image.org/docs/dev/_images/%s.png' -link_url = 'http://scikit-image.org/docs/dev/auto_examples/%s.html' +image_url = 'http://scikit-image.org/docs/dev/_images/' +link_url = 'http://scikit-image.org/docs/dev/auto_examples/' -for e in examples: - e = os.path.basename(e) - e = e[:-len('.py')] +for example_path in examples: + example_path = os.path.relpath(example_path, example_dir) + example_path, ext = os.path.splitext(example_path) - images.append(image_url % e) - links.append(link_url % e) + image_path, image_file = example_path.rsplit('/', maxsplit=1) + image_file = 'sphx_glr_' + image_file + '_001.png' + + images.append(image_url + image_file) + links.append(link_url + example_path + '.html') javascript = javascript.replace('{{IMAGES}}', str(images)) javascript = javascript.replace('{{LINKS}}', str(links)) javascript = javascript.replace('{{GALLERY_DIV}}', ''.join(gallery_div.split('\n'))) +print(images) + f = open(js_fn, 'w') f.write(javascript) f.close() diff -Nru skimage-0.13.1/doc/source/_static/docversions.js skimage-0.14.0/doc/source/_static/docversions.js --- skimage-0.13.1/doc/source/_static/docversions.js 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/source/_static/docversions.js 2018-05-29 01:27:44.000000000 +0000 @@ -1,4 +1,4 @@ -var versions = ['dev', '0.13.x', '0.12.x', '0.11.x', '0.10.x', '0.9.x', '0.8.0', '0.7.0', '0.6', '0.5', '0.4', '0.3']; +var versions = ['dev', '0.14.x', '0.13.x', '0.12.x', '0.11.x', '0.10.x', '0.9.x', '0.8.0', '0.7.0', '0.6', '0.5', '0.4', '0.3']; function insert_version_links() { for (i = 0; i < versions.length; i++){ diff -Nru skimage-0.13.1/doc/source/_templates/navbar.html skimage-0.14.0/doc/source/_templates/navbar.html --- skimage-0.13.1/doc/source/_templates/navbar.html 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/source/_templates/navbar.html 2018-05-29 01:27:44.000000000 +0000 @@ -1,6 +1,6 @@
  • Download
  • -
  • Gallery
  • -
  • Documentation
  • +
  • Gallery
  • +
  • Documentation
  • Community Guidelines
  • diff -Nru skimage-0.13.1/doc/source/user_guide/tutorial_parallelization.rst skimage-0.14.0/doc/source/user_guide/tutorial_parallelization.rst --- skimage-0.13.1/doc/source/user_guide/tutorial_parallelization.rst 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/source/user_guide/tutorial_parallelization.rst 2018-05-29 01:27:44.000000000 +0000 @@ -20,7 +20,7 @@ image = denoise_tv_chambolle(image[0][0], weight=0.1, multichannel=True) fd, hog_image = hog(color.rgb2gray(image), orientations=8, pixels_per_cell=(16, 16), cells_per_block=(1, 1), - visualise=True) + visualize=True) return hog_image diff -Nru skimage-0.13.1/doc/tools/apigen.py skimage-0.14.0/doc/tools/apigen.py --- skimage-0.13.1/doc/tools/apigen.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/doc/tools/apigen.py 2018-05-29 01:27:44.000000000 +0000 @@ -204,7 +204,8 @@ """ mod = __import__(uri, fromlist=[uri.split('.')[-1]]) # find all public objects in the module. - obj_strs = [obj for obj in dir(mod) if not obj.startswith('_')] + obj_strs = getattr(mod, '__all__', + [obj for obj in dir(mod) if not obj.startswith('_')]) functions = [] classes = [] submodules = [] @@ -217,7 +218,7 @@ # figure out if obj is a function or class if isinstance(obj, (FunctionType, BuiltinFunctionType)): functions.append(obj_str) - elif isinstance(obj, ModuleType): + elif isinstance(obj, ModuleType) and 'skimage' in mod.__name__: submodules.append(obj_str) else: try: diff -Nru skimage-0.13.1/.github/CONTRIBUTING.txt skimage-0.14.0/.github/CONTRIBUTING.txt --- skimage-0.13.1/.github/CONTRIBUTING.txt 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/.github/CONTRIBUTING.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,375 +0,0 @@ -Development process -------------------- - -Here's the long and short of it: - -1. If you are a first-time contributor: - - * Go to `https://github.com/scikit-image/scikit-image - `_ and click the - "fork" button to create your own copy of the project. - - * Clone the project to your local computer:: - - git clone https://github.com/your-username/scikit-image.git - - * Change the directory:: - - cd scikit-image - - * Add the upstream repository:: - - git remote add upstream https://github.com/scikit-image/scikit-image.git - - * Now, you have remote repositories named: - - - ``upstream``, which refers to the ``scikit-image`` repository - - ``origin``, which refers to your personal fork - -2. Develop your contribution: - - * Pull the latest changes from upstream:: - - git checkout master - git pull upstream master - - * Create a branch for the feature you want to work on. Since the - branch name will appear in the merge message, use a sensible name - such as 'transform-speedups':: - - git checkout -b transform-speedups - - * Commit locally as you progress (``git add`` and ``git commit``) - -3. To submit your contribution: - - * Push your changes back to your fork on GitHub:: - - git push origin transform-speedups - - * Enter your GitHub username and password (repeat contributors or advanced - users can remove this step by connecting to GitHub with SSH. See detailed - instructions below if desired). - - * Go to GitHub. The new branch will show up with a green Pull Request - button - click it. - - * If you want, post on the `mailing list - `_ to explain your changes or - to ask for review. - -For a more detailed discussion, read these :doc:`detailed documents -` on how to use Git with ``scikit-image`` -(``_). - -4. Review process: - - * Reviewers (the other developers and interested community members) will - write inline and/or general comments on your Pull Request (PR) to help - you improve its implementation, documentation and style. Every single - developer working on the project has their code reviewed, and we've come - to see it as friendly conversation from which we all learn and the - overall code quality benefits. Therefore, please don't let the review - discourage you from contributing: its only aim is to improve the quality - of project, not to criticize (we are, after all, very grateful for the - time you're donating!). - - * To update your pull request, make your changes on your local repository - and commit. As soon as those changes are pushed up (to the same branch as - before) the pull request will update automatically. - - * `Travis-CI `__, a continuous integration service, - is triggered after each Pull Request update to build the code, run unit - tests, measure code coverage and check coding style (PEP8) of your - branch. The Travis tests must pass before your PR can be merged. If - Travis fails, you can find out why by clicking on the "failed" icon (red - cross) and inspecting the build and test log. - - * A pull request must be approved by two core team members before merging. - -5. Document changes - - If your change introduces any API modifications, please update - ``doc/source/api_changes.txt``. - - If your change introduces a deprecation, add a reminder to ``TODO.txt`` - for the team to remove the deprecated functionality in the future. - -.. note:: - - To reviewers: if it is not obvious from the PR description, add a short - explanation of what a branch did to the merge message and, if closing a - bug, also add "Closes #123" where 123 is the issue number. - - -Divergence between ``upstream master`` and your feature branch --------------------------------------------------------------- - -Do *not* ever merge the main branch into yours. If GitHub indicates that the -branch of your Pull Request can no longer be merged automatically, rebase -onto master:: - - git checkout master - git pull upstream master - git checkout transform-speedups - git rebase master - -If any conflicts occur, fix the according files and continue:: - - git add conflict-file1 conflict-file2 - git rebase --continue - -However, you should only rebase your own branches and must generally not -rebase any branch which you collaborate on with someone else. - -Finally, you must push your rebased branch:: - - git push --force origin transform-speedups - -(If you are curious, here's a further discussion on the -`dangers of rebasing `__. -Also see this `LWN article `__.) - -Guidelines ----------- - -* All code should have tests (see `test coverage`_ below for more details). -* All code should be documented, to the same - `standard `_ as NumPy and SciPy. -* For new functionality, always add an example to the gallery. -* No changes are ever committed without review and approval by two core - team members. Ask on the - `mailing list `_ if - you get no response to your pull request. - **Never merge your own pull request.** -* Examples in the gallery should have a maximum figure width of 8 inches. - -Stylistic Guidelines --------------------- - -* Set up your editor to remove trailing whitespace. Follow `PEP08 - `__. Check code with pyflakes / flake8. - -* Use numpy data types instead of strings (``np.uint8`` instead of - ``"uint8"``). - -* Use the following import conventions:: - - import numpy as np - import matplotlib.pyplot as plt - from scipy import ndimage as ndi - - cimport numpy as cnp # in Cython code - -* When documenting array parameters, use ``image : (M, N) ndarray`` - and then refer to ``M`` and ``N`` in the docstring, if necessary. - -* Refer to array dimensions as (plane), row, column, not as x, y, z. See - :ref:`Coordinate conventions ` - in the user guide for more information. - -* Functions should support all input image dtypes. Use utility functions such - as ``img_as_float`` to help convert to an appropriate type. The output - format can be whatever is most efficient. This allows us to string together - several functions into a pipeline, e.g.:: - - hough(canny(my_image)) - -* Use ``Py_ssize_t`` as data type for all indexing, shape and size variables - in C/C++ and Cython code. - -* Use relative module imports, i.e. ``from .._shared import xyz`` rather than - ``from skimage._shared import xyz``. - -* Wrap Cython code in a pure Python function, which defines the API. This - improves compatibility with code introspection tools, which are often not - aware of Cython code. - -* For Cython functions, release the GIL whenever possible, using - ``with nogil:``. - - -Test coverage -------------- - -Tests for a module should ideally cover all code in that module, -i.e., statement coverage should be at 100%. - -To measure the test coverage, install -`coverage.py `__ -(using ``easy_install coverage``) and then run:: - - $ make coverage - -This will print a report with one line for each file in `skimage`, -detailing the test coverage:: - - Name Stmts Exec Cover Missing - ------------------------------------------------------------------------------ - skimage/color/colorconv 77 77 100% - skimage/filter/__init__ 1 1 100% - ... - - -Activate Travis-CI for your fork (optional) -------------------------------------------- - -Travis-CI checks all unittests in the project to prevent breakage. - -Before sending a pull request, you may want to check that Travis-CI -successfully passes all tests. To do so, - -* Go to `Travis-CI `__ and follow the Sign In link at - the top - -* Go to your `profile page `__ and switch on - your scikit-image fork - -It corresponds to steps one and two in -`Travis-CI documentation `__ -(Step three is already done in scikit-image). - -Thus, as soon as you push your code to your fork, it will trigger Travis-CI, -and you will receive an email notification when the process is done. - -Every time Travis is triggered, it also calls on `Coveralls -`_ to inspect the current test overage. - - -Building docs -------------- - -To build docs, run ``make`` from the ``doc`` directory. ``make help`` lists -all targets. - -Requirements -~~~~~~~~~~~~ - -Sphinx (>= 1.3) and Latex is needed to build doc. - -**Sphinx:** - -.. code:: sh - - pip install sphinx - -**Latex Ubuntu:** - -.. code:: sh - - sudo apt-get install -qq texlive texlive-latex-extra dvipng - -**Latex Mac:** - -Install the full `MacTex `__ installation or -install the smaller -`BasicTex `__ and add *ucs* -and *dvipng* packages: - -.. code:: sh - - sudo tlmgr install ucs dvipng - -Fixing Warnings -~~~~~~~~~~~~~~~ - -- "citation not found: R###" There is probably an underscore after a - reference in the first line of a docstring (e.g. [1]\_). Use this - method to find the source file: $ cd doc/build; grep -rin R#### - -- "Duplicate citation R###, other instance in..."" There is probably a - [2] without a [1] in one of the docstrings - -- Make sure to use pre-sphinxification paths to images (not the - \_images directory) - -Auto-generating dev docs -~~~~~~~~~~~~~~~~~~~~~~~~ - -This set of instructions was used to create -scikit-image/tools/deploy-docs.sh - -- Go to Github account settings -> personal access tokens -- Create a new token with access rights ``public_repo`` and - ``user:email only`` -- Install the travis command line tool: ``gem install travis``. On OSX, - you can get gem via ``brew install ruby``. -- Take then token generated by Github and run - ``travis encrypt GH_TOKEN=`` from inside a scikit-image repo -- Paste the output into the secure: field of ``.travis.yml``. -- The decrypted GH\_TOKEN env var will be available for travis scripts - -https://help.github.com/articles/creating-an-access-token-for-command-line-use/ -http://docs.travis-ci.com/user/encryption-keys/ - -Deprecation cycle ------------------ - -If the behavior of the library has to be changed, a deprecation cycle must be -followed to warn users. - -- a deprecation cycle is *not* necessary when - * adding a new function, or - * adding a new keyword argument to the *end* of a function signature, or - * fixing what was buggy behaviour - -- a deprecation cycle is necessary for *any breaking API change*, meaning a - change where the function, invoked with the same arguments, would return a - different result after the change. This includes: - * changing the order of arguments or keyword arguments, or - * adding arguments or keyword arguments to a function, or - * changing a function's name or submodule, or - * changing the default value of a function's arguments. - -Usually, our policy is to put in place a deprecation cycle over two releases. - -For the sake of illustration, we consider the modification of a default value in -a function signature. In version N (therefore, next release will be N+1), we -have - -.. code-block:: python - - def a_function(image, rescale=True): - out = do_something(image, rescale=rescale) - return out - - -that has to be changed to - -.. code-block:: python - - def a_function(image, rescale=None): - if rescale is None: - warn('The default value of rescale will change to `False` in version N+3') - rescale = True - out = do_something(image, rescale=rescale) - return out - - -and in version N+3 - -.. code-block:: python - - def a_function(image, rescale=False): - out = do_something(image, rescale=rescale) - return out - - -Here is the process for a 2-release deprecation cycle: - -- In the signature, set default to `None`, and modify the docstring to specify - that it's `True`. -- In the function, _if_ rescale is set to `None`, set to `True` and warn that the - default will change to `False` in version N+3. -- In ``TODO.txt``, create an item in the section related to version N+3 and write - "change rescale default to False in a_function". - -Note that the 2-release deprecation cycle is not a strict rule and in some -cases, the developers can agree on a different procedure upon justification -(like when we can't detect the change, or it involves moving or deleting an -entire function for example). - -Bugs ----- - -Please `report bugs on GitHub `_. diff -Nru skimage-0.13.1/.github/PULL_REQUEST_TEMPLATE.md skimage-0.14.0/.github/PULL_REQUEST_TEMPLATE.md --- skimage-0.13.1/.github/PULL_REQUEST_TEMPLATE.md 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/.github/PULL_REQUEST_TEMPLATE.md 2018-05-29 01:27:44.000000000 +0000 @@ -18,6 +18,10 @@ ## For reviewers +(Don't remove the checklist below.) + - [ ] Check that the PR title is short, concise, and will make sense 1 year later. -- [ ] Check that new features are mentioned in `doc/release/release_dev.rst`. +- [ ] Check that new functions are imported in corresponding `__init__.py`. +- [ ] Check that new features, API changes, and deprecations are mentioned in + `doc/release/release_dev.rst`. diff -Nru skimage-0.13.1/.gitignore skimage-0.14.0/.gitignore --- skimage-0.13.1/.gitignore 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/.gitignore 2018-05-29 01:27:44.000000000 +0000 @@ -1,3 +1,5 @@ +.cache +__pycache__ *.pyc *~ *# @@ -7,9 +9,13 @@ *.pyd *.bak *.c +skimage/feature/_haar.cpp *.new *.md5 *.old +.pytest_cache +temp.tif +.ropeproject doc/source/api doc/build source/api diff -Nru skimage-0.13.1/INSTALL.rst skimage-0.14.0/INSTALL.rst --- skimage-0.13.1/INSTALL.rst 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/INSTALL.rst 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,209 @@ +Installing scikit-image +======================= + +We are assuming that you have default Python environment already configured on +your computer and you intend to install ``scikit-image`` inside of it. If you +want to create and work with Python virtual environments, please follow the +instructions on `venv`_ and `virtual environments`_. + +There are two ways you can install ``scikit-image`` on your preferred Python +environment. + +1. Standard Installation +2. Development Installation + +1. Standard Installation: +------------------------- + +``scikit-image`` comes pre-installed with several Python distributions, +including Anaconda_, `Enthought Canopy`_, `Python(x,y)`_ and `WinPython`_. +However, you can install or upgrade existing ``scikit-image`` via +shell/command prompt. + +a. Windows +`````````` + +On Windows, you can install ``scikit-image`` using:: + + pip install scikit-image + +For Conda-based distributions (Anaconda, Miniconda), execute:: + + conda install scikit-image + +If you are using pure Python i.e. the distribution from python.org_, you'll +need to manually download packages (such as numpy, scipy and scikit-image) +using Python wheels available from `Christoph Gohlke's`_ website. +You can install Python wheels using:: + + pip install SomePackage-1.0-py2.py3-none-any.whl + +.. _Anaconda: https://store.continuum.io/cshop/anaconda/ +.. _Enthought Canopy: https://www.enthought.com/products/canopy/ +.. _Python(x,y): http://python-xy.github.io/ +.. _WinPython: https://winpython.github.io/ + +b. Debian and Ubuntu +```````````````````` + +On Debian and Ubuntu, install ``scikit-image`` with:: + + sudo apt-get install python-skimage + +2. Development Installation: +---------------------------- + +You can install ``scikit-image`` development version if either your +distribution ships an outdated version or you want to develop and work on new +features before the package is released officially. + +a. Windows +`````````` + +Before installing the development version, uninstall the standard version of +``scikit-image`` using pip as:: + + pip uninstall scikit-image + +or using conda (for Anaconda users) as:: + + conda uninstall scikit-image + +Now clone scikit-image on your local computer:: + + git clone https://github.com/scikit-image/scikit-image.git + +Change the directory and build from source code:: + + cd scikit-image + python setup.py develop + +If you experience the error ``Error:unable to find vcvarsall.bat`` it means +that your computer does not have recommended compilers for Python. You can +either download and install Windows compilers from `here`_ or use +`MinGW compilers`_ . If using `MinGW`, make sure to correctly configure +``distutils`` by modifying (or create, if not existing) the configuration file +``distutils.cfg`` (located for example at +``C:\Python26\Lib\distutils\distutils.cfg``) to contain:: + + [build] + compiler=mingw32 + +Once the build process is complete, run:: + + pip install -U -e . + +Make sure to give space after ``-e`` and add dot at the end. This will install +``scikit-image`` development version and upgrade (or install) all the required +dependencies. Otherwise, you can run the following command to skip installation +of dependencies:: + + pip install -U[--no-deps] -e . + +You can install or upgrade dependencies required for scikit-image anytime after +installation using:: + + pip install -r requirements.txt --upgrade + +For more details on compiling in Windows, there is a lot of knowledge iterated +into the `setup of appveyor`_ (a continuous integration service). + +.. _miniconda: http://conda.pydata.org/miniconda.html +.. _python.org: http://python.org/ +.. _Christoph Gohlke's: http://www.lfd.uci.edu/~gohlke/pythonlibs/ +.. _setup of appveyor: https://github.com/scikit-image/scikit-image/blob/master/.appveyor.yml +.. _here: https://wiki.python.org/moin/WindowsCompilers#Microsoft_Visual_C.2B-.2B-_14.0_standalone:_Visual_C.2B-.2B-_Build_Tools_2015_.28x86.2C_x64.2C_ARM.29 +.. _venv: https://docs.python.org/3/library/venv.html +.. _virtual environments: http://docs.python-guide.org/en/latest/dev/virtualenvs/ +.. _MinGW compilers: http://www.mingw.org/wiki/howto_install_the_mingw_gcc_compiler_suite + +b. Debian and Ubuntu +```````````````````` + +Install all the required dependencies:: + + sudo apt-get install python-matplotlib python-numpy python-pil python-scipy + +Get suitable compilers for successful installation:: + + sudo apt-get install build-essential cython + +Obtain the source from the git repository at +``http://github.com/scikit-image/scikit-image`` by running:: + + git clone https://github.com/scikit-image/scikit-image.git + +After unpacking, change into the source directory and execute:: + + pip install -e . + +To update:: + + git pull # Grab latest source + python setup.py build_ext -i # Compile any modified extensions + +Build Requirements +------------------ + +* `Python >= 2.7 `__ +* `Numpy >= 1.11 `__ +* `Cython >= 0.23 `__ +* `Six >=1.7.3 `__ +* `SciPy >=0.17.0 `__ +* `numpydoc >=0.6 `__ + +Runtime requirements +-------------------- + +* `Python >= 2.7 `__ +* `Numpy >= 1.11 `__ +* `SciPy >= 0.17.0 `__ +* `Matplotlib >= 1.3.1 `__ +* `NetworkX >= 1.8 `__ +* `Six >=1.7.3 `__ +* `Pillow >= 2.1.0 `__ + (or `PIL `__) +* `PyWavelets>=0.4.0 `__ +* `dask[array] >= 0.9.0 `__. + For parallel computation using `skimage.util.apply_parallel`. + +You can use pip to automatically install the runtime dependencies as follows:: + + $ pip install -r requirements.txt + +Optional Requirements +--------------------- + +You can use ``scikit-image`` with the basic requirements listed above, but some +functionality is only available with the following installed: + +* `PyQt4 `__ + The ``qt`` plugin that provides ``imshow(x, fancy=True)`` and `skivi`. + +* `PyAMG `__ + The ``pyamg`` module is used for the fast `cg_mg` mode of random + walker segmentation. + +* `Astropy `__ + Provides FITS I/O capability. + +* `SimpleITK `__ + Optional I/O plugin providing a wide variety of `formats `__. + including specialized formats using in medical imaging. + +* `imread `__ + Optional I/O plugin providing most standard `formats `__. + +Testing requirements +-------------------- + +* `pytest `__ + A Python Unit Testing Framework. Required to execute the tests. +* `pytest-cov `__ + A tool that generates a unit test code coverage report. + +Documentation requirements +-------------------------- + +* `sphinx >= 1.3 `_ + Required to build the documentation. diff -Nru skimage-0.13.1/Makefile skimage-0.14.0/Makefile --- skimage-0.13.1/Makefile 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/Makefile 2018-05-29 01:27:44.000000000 +0000 @@ -1,6 +1,6 @@ .PHONY: all clean test PYTHON=python -NOSETESTS=nosetests +PYTESTS=pytest all: $(PYTHON) setup.py build_ext --inplace @@ -10,13 +10,13 @@ find . -name "*.pyx" -exec ./tools/rm_pyx_c_file.sh {} \; test: - $(PYTHON) -c "import skimage, sys, io; sys.exit(skimage.test_verbose())" + $(PYTESTS) skimage --doctest-modules doctest: $(PYTHON) -c "import skimage, sys, io; sys.exit(skimage.doctest_verbose())" coverage: - $(NOSETESTS) skimage --with-coverage --cover-package=skimage + $(PYTESTS) skimage --cov=skimage html: pip install -q sphinx pytest-runner sphinx-gallery diff -Nru skimage-0.13.1/MANIFEST.in skimage-0.14.0/MANIFEST.in --- skimage-0.13.1/MANIFEST.in 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/MANIFEST.in 2018-05-29 01:27:44.000000000 +0000 @@ -1,8 +1,11 @@ include setup*.py +include conftest.py include MANIFEST.in include *.txt +include *.rst +recursive-include requirements *.txt +include requirements/README.md include Makefile -include bento.info include skimage/scripts/skivi recursive-include skimage *.pyx *.pxd *.pxi *.py *.c *.h *.ini *.md5 *.npy *.txt *.in *.cpp *.md recursive-include skimage/data * diff -Nru skimage-0.13.1/optional_requirements.txt skimage-0.14.0/optional_requirements.txt --- skimage-0.13.1/optional_requirements.txt 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/optional_requirements.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -PySide; python_version != '2.7' -imread; python_version != '2.7' -SimpleITK; python_version != '3.4' and python_version != '3.5' -astropy -tifffile -imageio -dask[array]>=0.5.0 diff -Nru skimage-0.13.1/.pep8speaks.yml skimage-0.14.0/.pep8speaks.yml --- skimage-0.13.1/.pep8speaks.yml 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/.pep8speaks.yml 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,11 @@ +scanner: + diff_only: True # Errors caused by only the patch are shown, not the whole file + +pycodestyle: + ignore: # Errors and warnings to ignore + - W391 # blank line at the end of file + - E203 # whitespace before ,;: + - W503 # newline before binary operator + +no_blank_comment: True # If True, no comment is made when the bot does not find any pep8 errors + diff -Nru skimage-0.13.1/README.md skimage-0.14.0/README.md --- skimage-0.13.1/README.md 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/README.md 2018-05-29 01:27:44.000000000 +0000 @@ -12,14 +12,13 @@ - **Debian/Ubuntu:** ``sudo apt-get install python-skimage`` - **OSX:** ``pip install scikit-image`` - **Anaconda:** ``conda install scikit-image`` -- **Windows:** Download [Windows binaries](http://www.lfd.uci.edu/~gohlke/pythonlibs/#scikits.image) +- **Windows:** Download [Windows binaries](http://www.lfd.uci.edu/~gohlke/pythonlibs/#scikit-image) -Also see -[http://scikit-image.org/docs/dev/install.html](http://scikit-image.org/docs/dev/install.html) +Also see [installing ``scikit-image``](INSTALL.rst). ## Installation from source -Install [dependencies](DEPENDS.txt) using: +Install dependencies using: ``` pip install -r requirements.txt @@ -34,7 +33,12 @@ If you plan to develop the package, you may run it directly from source: ``` -$ python setup.py develop # Do this once to add pkg to Python path +$ pip install -e . # Do this once to add package to Python path +``` + +Every time you modify Cython files, also run: + +``` $ python setup.py build_ext -i # Build binary extensions ``` @@ -69,3 +73,12 @@ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +## Citation + +If you find this project useful, please cite: + +> Stéfan van der Walt, Johannes L. Schönberger, Juan Nunez-Iglesias, +> François Boulogne, Joshua D. Warner, Neil Yager, Emmanuelle +> Gouillart, Tony Yu, and the scikit-image contributors. +> *scikit-image: Image processing in Python*. PeerJ 2:e453 (2014) +> http://dx.doi.org/10.7717/peerj.453 diff -Nru skimage-0.13.1/RELEASE.txt skimage-0.14.0/RELEASE.txt --- skimage-0.13.1/RELEASE.txt 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/RELEASE.txt 2018-05-29 01:27:44.000000000 +0000 @@ -12,7 +12,7 @@ appropriate to master. - In the master branch, update the version number in ``skimage/__init__.py`` - and ``bento.info`` to the next ``-dev`` version, commit, and push. + to the next ``-dev`` version, commit, and push. - Back on the release branch, update the release notes: @@ -30,8 +30,10 @@ 5. Copy ``doc/release/release_template.txt`` to ``doc/release/release_dev.txt`` for the next release. -- Update the version number in ``skimage/__init__.py`` and ``bento.info`` and - commit. + 6. Copy relevant deprecations from ``release__.txt`` + to ``release_dev.txt``. + +- Update the version number in ``skimage/__init__.py`` and commit. - Update the docs: diff -Nru skimage-0.13.1/requirements/build.txt skimage-0.14.0/requirements/build.txt --- skimage-0.13.1/requirements/build.txt 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/requirements/build.txt 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,2 @@ +Cython>=0.23 +wheel diff -Nru skimage-0.13.1/requirements/default.txt skimage-0.14.0/requirements/default.txt --- skimage-0.13.1/requirements/default.txt 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/requirements/default.txt 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,9 @@ +numpy>=1.11 +scipy>=0.17.0 +matplotlib>=2.0.0 +networkx>=1.8 +six>=1.10.0 +pillow>=4.3.0 +PyWavelets>=0.4.0 +dask[array]>=0.9.0 +cloudpickle>=0.2.1 diff -Nru skimage-0.13.1/requirements/docs.txt skimage-0.14.0/requirements/docs.txt --- skimage-0.13.1/requirements/docs.txt 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/requirements/docs.txt 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,4 @@ +sphinx>=1.3 +numpydoc +sphinx-gallery +scikit-learn diff -Nru skimage-0.13.1/requirements/optional.txt skimage-0.14.0/requirements/optional.txt --- skimage-0.13.1/requirements/optional.txt 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/requirements/optional.txt 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,6 @@ +PySide; python_version <= '3.4' +imread +SimpleITK +astropy +tifffile +imageio diff -Nru skimage-0.13.1/requirements/README.md skimage-0.14.0/requirements/README.md --- skimage-0.13.1/requirements/README.md 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/requirements/README.md 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,29 @@ +# pip requirements files + +## Index + +- [default.txt](default.txt) + Default requirements +- [docs.txt](docs.txt) + Documentation requirements +- [optional.txt](optional.txt) + Optional requirements +- [test.txt](test.txt) + Requirements for running test suite +- [build.txt](build.txt) + Requirements for building from the source repository + +## Examples + +### Installing requirements + +```bash +$ pip install -U -r requirements/default.txt +``` + +### Running the tests + +```bash +$ pip install -U -r requirements/default.txt +$ pip install -U -r requirements/test.txt +``` diff -Nru skimage-0.13.1/requirements/test.txt skimage-0.14.0/requirements/test.txt --- skimage-0.13.1/requirements/test.txt 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/requirements/test.txt 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,2 @@ +pytest +pytest-cov diff -Nru skimage-0.13.1/requirements.txt skimage-0.14.0/requirements.txt --- skimage-0.13.1/requirements.txt 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/requirements.txt 2018-05-29 01:27:44.000000000 +0000 @@ -1,7 +1,2 @@ -matplotlib>=1.3.1 -numpy>=1.11 -scipy>=0.17.0 -six>=1.7.3 -networkx>=1.8 -pillow>=2.1.0 -PyWavelets>=0.4.0 +-r requirements/default.txt +-r requirements/test.txt diff -Nru skimage-0.13.1/setup.py skimage-0.14.0/setup.py --- skimage-0.13.1/setup.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/setup.py 2018-05-29 01:27:44.000000000 +0000 @@ -45,7 +45,7 @@ VERSION = line.strip().split()[-1][1:-1] break -with open('requirements.txt') as fid: +with open('requirements/default.txt') as fid: INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l] # requirements for those browsing PyPI diff -Nru skimage-0.13.1/skimage/color/colorconv.py skimage-0.14.0/skimage/color/colorconv.py --- skimage-0.13.1/skimage/color/colorconv.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/color/colorconv.py 2018-05-29 01:27:44.000000000 +0000 @@ -94,50 +94,51 @@ def convert_colorspace(arr, fromspace, tospace): """Convert an image array to a new color space. + Valid color spaces are: + 'RGB', 'HSV', 'RGB CIE', 'XYZ', 'YUV', 'YIQ', 'YPbPr', 'YCbCr', 'YDbDr' + Parameters ---------- arr : array_like The image to convert. - fromspace : str - The color space to convert from. Valid color space strings are - ``['RGB', 'HSV', 'RGB CIE', 'XYZ', 'YUV', 'YIQ', 'YPbPr', 'YCbCr']``. - Value may also be specified as lower case. - - tospace : str - The color space to convert to. Valid color space strings are - ``['RGB', 'HSV', 'RGB CIE', 'XYZ', 'YUV', 'YIQ', 'YPbPr', 'YCbCr']``. - Value may also be specified as lower case. + fromspace : valid color space + The color space to convert from. Can be specified in lower case. + tospace : valid color space + The color space to convert to. Can be specified in lower case. Returns ------- - newarr : ndarray + out : ndarray The converted image. Notes ----- - Conversion occurs through the "central" RGB color space, i.e. conversion - from XYZ to HSV is implemented as ``XYZ -> RGB -> HSV`` instead of - directly. + Conversion is performed through the "central" RGB color space, + i.e. conversion from XYZ to HSV is implemented as ``XYZ -> RGB -> HSV`` + instead of directly. Examples -------- >>> from skimage import data >>> img = data.astronaut() >>> img_hsv = convert_colorspace(img, 'RGB', 'HSV') + """ - fromdict = {'RGB': lambda im: im, 'HSV': hsv2rgb, 'RGB CIE': rgbcie2rgb, - 'XYZ': xyz2rgb, 'YUV': yuv2rgb, 'YIQ': yiq2rgb, - 'YPbPr': ypbpr2rgb, 'YCbCr': ycbcr2rgb} - todict = {'RGB': lambda im: im, 'HSV': rgb2hsv, 'RGB CIE': rgb2rgbcie, - 'XYZ': rgb2xyz, 'YUV': rgb2yuv, 'YIQ': rgb2yiq, - 'YPbPr': rgb2ypbpr, 'YCbCr': rgb2ycbcr} - - fromspace = fromspace.upper() - tospace = tospace.upper() - if fromspace not in fromdict.keys(): - raise ValueError('fromspace needs to be one of %s' % fromdict.keys()) - if tospace not in todict.keys(): - raise ValueError('tospace needs to be one of %s' % todict.keys()) + fromdict = {'rgb': lambda im: im, 'hsv': hsv2rgb, 'rgb cie': rgbcie2rgb, + 'xyz': xyz2rgb, 'yuv': yuv2rgb, 'yiq': yiq2rgb, + 'ypbpr': ypbpr2rgb, 'ycbcr': ycbcr2rgb, 'ydbdr': ydbdr2rgb} + todict = {'rgb': lambda im: im, 'hsv': rgb2hsv, 'rgb cie': rgb2rgbcie, + 'xyz': rgb2xyz, 'yuv': rgb2yuv, 'yiq': rgb2yiq, + 'ypbpr': rgb2ypbpr, 'ycbcr': rgb2ycbcr, 'ydbdr': rgb2ydbdr} + + fromspace = fromspace.lower() + tospace = tospace.lower() + if fromspace not in fromdict: + msg = '`fromspace` has to be one of {}'.format(fromdict.keys()) + raise ValueError(msg) + if tospace not in todict: + msg = '`tospace` has to be one of {}'.format(todict.keys()) + raise ValueError(msg) return todict[tospace](fromdict[fromspace](arr)) @@ -408,6 +409,13 @@ rgb_from_ycbcr = linalg.inv(ycbcr_from_rgb) +ydbdr_from_rgb = np.array([[ 0.299, 0.587, 0.114], + [ -0.45 , -0.883, 1.333], + [ -1.333, 1.116, 0.217]]) + +rgb_from_ydbdr = linalg.inv(ydbdr_from_rgb) + + # CIE LAB constants for Observer=2A, Illuminant=D65 # NOTE: this is actually the XYZ values for the illuminant above. lab_ref_white = np.array([0.95047, 1., 1.08883]) @@ -811,7 +819,7 @@ Parameters ---------- image : array_like - Input image of shape ``(M, N [, P])``. + Input image of shape ``(M[, N][, P])``. alpha : bool, optional Ensure that the output image has an alpha layer. If None, alpha layers are passed through but not created. @@ -819,13 +827,17 @@ Returns ------- rgb : ndarray - RGB image of shape ``(M, N, [, P], 3)``. + RGB image of shape ``(M[, N][, P], 3)``. Raises ------ ValueError - If the input is not a 2- or 3-dimensional image. + If the input is not a 1-, 2- or 3-dimensional image. + Notes + ----- + If the input is a 1-dimensional image of shape ``(M, )``, the output + will be shape ``(M, 3)``. """ is_rgb = False is_alpha = False @@ -849,7 +861,7 @@ return image - elif image.ndim != 1 and dims in (1, 2, 3): + elif dims in (1, 2, 3): image = image[..., np.newaxis] if alpha: @@ -1691,6 +1703,40 @@ return arr +def rgb2ydbdr(rgb): + """RGB to YDbDr color space conversion. + + Parameters + ---------- + rgb : array_like + The image in RGB format, in a 3- or 4-D array of shape + ``(M, N, [P,] 3)``. + + Returns + ------- + out : ndarray + The image in YDbDr format, in a 3- or 4-D array of shape + ``(M, N, [P,] 3)``. + + Raises + ------ + ValueError + If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``. + + Notes + ----- + This is the color space which is commonly used + by video codecs, it is also the reversible color transform in JPEG2000. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/YDbDr + + """ + arr = _convert(ydbdr_from_rgb, rgb) + return arr + + def yuv2rgb(yuv): """YUV to RGB color space conversion. @@ -1805,3 +1851,37 @@ arr[..., 1] -= 128 arr[..., 2] -= 128 return _convert(rgb_from_ycbcr, arr) + + +def ydbdr2rgb(ydbdr): + """YDbDr to RGB color space conversion. + + Parameters + ---------- + ydbdr : array_like + The image in YDbDr format, in a 3- or 4-D array of shape + ``(M, N, [P,] 3)``. + + Returns + ------- + out : ndarray + The image in RGB format, in a 3- or 4-D array of shape + ``(M, N, [P,] 3)``. + + Raises + ------ + ValueError + If `ydbdr` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``. + + Notes + ----- + This is the color space which is commonly used + by video codecs, it is also the reversible color transform in JPEG2000. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/YDbDr + + """ + arr = ydbdr.copy() + return _convert(rgb_from_ydbdr, arr) diff -Nru skimage-0.13.1/skimage/color/colorlabel.py skimage-0.14.0/skimage/color/colorlabel.py --- skimage-0.13.1/skimage/color/colorlabel.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/color/colorlabel.py 2018-05-29 01:27:44.000000000 +0000 @@ -3,7 +3,7 @@ import numpy as np from .._shared.utils import warn -from .. import img_as_float +from ..util import img_as_float from . import rgb_colors from .colorconv import rgb2gray, gray2rgb diff -Nru skimage-0.13.1/skimage/color/__init__.py skimage-0.14.0/skimage/color/__init__.py --- skimage-0.13.1/skimage/color/__init__.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/color/__init__.py 2018-05-29 01:27:44.000000000 +0000 @@ -31,6 +31,8 @@ ypbpr2rgb, rgb2ycbcr, ycbcr2rgb, + rgb2ydbdr, + ydbdr2rgb, separate_stains, combine_stains, rgb_from_hed, @@ -93,6 +95,8 @@ 'ypbpr2rgb', 'rgb2ycbcr', 'ycbcr2rgb', + 'rgb2ydbdr', + 'ydbdr2rgb', 'separate_stains', 'combine_stains', 'rgb_from_hed', diff -Nru skimage-0.13.1/skimage/color/tests/test_colorconv.py skimage-0.14.0/skimage/color/tests/test_colorconv.py --- skimage-0.13.1/skimage/color/tests/test_colorconv.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/color/tests/test_colorconv.py 2018-05-29 01:27:44.000000000 +0000 @@ -15,12 +15,9 @@ import os.path import numpy as np -from numpy.testing import (assert_equal, - assert_almost_equal, - assert_array_almost_equal, - assert_raises, - TestCase, - ) +from skimage._shared.testing import assert_equal, assert_almost_equal +from skimage._shared.testing import assert_array_almost_equal +from skimage._shared.testing import TestCase from skimage import img_as_float, img_as_ubyte from skimage.io import imread @@ -41,13 +38,13 @@ rgb2yiq, yiq2rgb, rgb2ypbpr, ypbpr2rgb, rgb2ycbcr, ycbcr2rgb, + rgb2ydbdr, ydbdr2rgb, rgba2rgb, - guess_spatial_dimensions - ) + guess_spatial_dimensions) from skimage import data_dir from skimage._shared._warnings import expected_warnings - +from skimage._shared import testing import colorsys @@ -61,7 +58,8 @@ assert_equal(guess_spatial_dimensions(im2), 3) assert_equal(guess_spatial_dimensions(im3), None) assert_equal(guess_spatial_dimensions(im4), 3) - assert_raises(ValueError, guess_spatial_dimensions, im5) + with testing.raises(ValueError): + guess_spatial_dimensions(im5) class TestColorconv(TestCase): @@ -224,24 +222,32 @@ self.colbars_array) def test_convert_colorspace(self): - colspaces = ['HSV', 'RGB CIE', 'XYZ'] - colfuncs_from = [hsv2rgb, rgbcie2rgb, xyz2rgb] - colfuncs_to = [rgb2hsv, rgb2rgbcie, rgb2xyz] + colspaces = ['HSV', 'RGB CIE', 'XYZ', 'YCbCr', 'YPbPr', 'YDbDr'] + colfuncs_from = [ + hsv2rgb, rgbcie2rgb, xyz2rgb, + ycbcr2rgb, ypbpr2rgb, ydbdr2rgb + ] + colfuncs_to = [ + rgb2hsv, rgb2rgbcie, rgb2xyz, + rgb2ycbcr, rgb2ypbpr, rgb2ydbdr + ] + + assert_almost_equal( + convert_colorspace(self.colbars_array, 'RGB', 'RGB'), + self.colbars_array) - assert_almost_equal(convert_colorspace(self.colbars_array, 'RGB', - 'RGB'), self.colbars_array) for i, space in enumerate(colspaces): gt = colfuncs_from[i](self.colbars_array) - assert_almost_equal(convert_colorspace(self.colbars_array, space, - 'RGB'), gt) + assert_almost_equal( + convert_colorspace(self.colbars_array, space, 'RGB'), gt) gt = colfuncs_to[i](self.colbars_array) - assert_almost_equal(convert_colorspace(self.colbars_array, 'RGB', - space), gt) + assert_almost_equal( + convert_colorspace(self.colbars_array, 'RGB', space), gt) - self.assertRaises(ValueError, convert_colorspace, self.colbars_array, - 'nokey', 'XYZ') - self.assertRaises(ValueError, convert_colorspace, self.colbars_array, - 'RGB', 'nokey') + self.assertRaises(ValueError, convert_colorspace, + self.colbars_array, 'nokey', 'XYZ') + self.assertRaises(ValueError, convert_colorspace, + self.colbars_array, 'RGB', 'nokey') def test_rgb2grey(self): x = np.array([1, 1, 1]).reshape((1, 1, 3)).astype(np.float) @@ -468,11 +474,13 @@ assert_array_almost_equal(rgb2yiq(rgb), np.array([[[1, 0, 0]]])) assert_array_almost_equal(rgb2ypbpr(rgb), np.array([[[1, 0, 0]]])) assert_array_almost_equal(rgb2ycbcr(rgb), np.array([[[235, 128, 128]]])) + assert_array_almost_equal(rgb2ydbdr(rgb), np.array([[[1, 0, 0]]])) rgb = np.array([[[0.0, 1.0, 0.0]]]) assert_array_almost_equal(rgb2yuv(rgb), np.array([[[0.587, -0.28886916, -0.51496512]]])) assert_array_almost_equal(rgb2yiq(rgb), np.array([[[0.587, -0.27455667, -0.52273617]]])) assert_array_almost_equal(rgb2ypbpr(rgb), np.array([[[0.587, -0.331264, -0.418688]]])) assert_array_almost_equal(rgb2ycbcr(rgb), np.array([[[144.553, 53.797, 34.214]]])) + assert_array_almost_equal(rgb2ydbdr(rgb), np.array([[[0.587, -0.883, 1.116]]])) def test_yuv_roundtrip(self): img_rgb = img_as_float(self.img_rgb)[::16, ::16] @@ -480,6 +488,7 @@ assert_array_almost_equal(yiq2rgb(rgb2yiq(img_rgb)), img_rgb) assert_array_almost_equal(ypbpr2rgb(rgb2ypbpr(img_rgb)), img_rgb) assert_array_almost_equal(ycbcr2rgb(rgb2ycbcr(img_rgb)), img_rgb) + assert_array_almost_equal(ydbdr2rgb(rgb2ydbdr(img_rgb)), img_rgb) def test_rgb2yiq_conversion(self): rgb = img_as_float(self.img_rgb)[::16, ::16] @@ -492,7 +501,12 @@ def test_gray2rgb(): x = np.array([0, 0.5, 1]) - assert_raises(ValueError, gray2rgb, x) + w = gray2rgb(x) + expected_output = np.array([[ 0, 0, 0 ], + [ 0.5, 0.5, 0.5, ], + [ 1, 1, 1 ]]) + + assert_equal(w, expected_output) x = x.reshape((3, 1)) y = gray2rgb(x) @@ -531,8 +545,3 @@ alpha=True)[0, 0, 3], 1) assert_equal(gray2rgb(np.array([[1, 2], [3, 4]], dtype=np.uint8), alpha=True)[0, 0, 3], 255) - - -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() diff -Nru skimage-0.13.1/skimage/color/tests/test_colorlabel.py skimage-0.14.0/skimage/color/tests/test_colorlabel.py --- skimage-0.13.1/skimage/color/tests/test_colorlabel.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/color/tests/test_colorlabel.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,16 +1,18 @@ import itertools import numpy as np -from numpy import testing from skimage.color.colorlabel import label2rgb -from numpy.testing import (assert_array_almost_equal as assert_close, - assert_array_equal, assert_warns) + +from skimage._shared import testing +from skimage._shared.testing import (assert_array_almost_equal, + assert_array_equal, assert_warns) def test_shape_mismatch(): image = np.ones((3, 3)) label = np.ones((2, 2)) - testing.assert_raises(ValueError, label2rgb, image, label) + with testing.raises(ValueError): + label2rgb(image, label) def test_rgb(): @@ -19,7 +21,7 @@ colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] # Set alphas just in case the defaults change rgb = label2rgb(label, image=image, colors=colors, alpha=1, image_alpha=1) - assert_close(rgb, [colors]) + assert_array_almost_equal(rgb, [colors]) def test_alpha(): @@ -27,16 +29,16 @@ label = np.random.randint(0, 9, size=(3, 3)) # If we set `alpha = 0`, then rgb should match image exactly. rgb = label2rgb(label, image=image, alpha=0, image_alpha=1) - assert_close(rgb[..., 0], image) - assert_close(rgb[..., 1], image) - assert_close(rgb[..., 2], image) + assert_array_almost_equal(rgb[..., 0], image) + assert_array_almost_equal(rgb[..., 1], image) + assert_array_almost_equal(rgb[..., 2], image) def test_no_input_image(): label = np.arange(3).reshape(1, -1) colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] rgb = label2rgb(label, colors=colors) - assert_close(rgb, [colors]) + assert_array_almost_equal(rgb, [colors]) def test_image_alpha(): @@ -45,7 +47,7 @@ colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] # If we set `image_alpha = 0`, then rgb should match label colors exactly. rgb = label2rgb(label, image=image, colors=colors, alpha=1, image_alpha=0) - assert_close(rgb, [colors]) + assert_array_almost_equal(rgb, [colors]) def test_color_names(): @@ -55,7 +57,7 @@ colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] # Set alphas just in case the defaults change rgb = label2rgb(label, image=image, colors=cnames, alpha=1, image_alpha=1) - assert_close(rgb, [colors]) + assert_array_almost_equal(rgb, [colors]) def test_bg_and_color_cycle(): @@ -65,20 +67,25 @@ bg_color = (0, 0, 0) rgb = label2rgb(label, image=image, bg_label=0, bg_color=bg_color, colors=colors, alpha=1) - assert_close(rgb[0, 0], bg_color) + assert_array_almost_equal(rgb[0, 0], bg_color) for pixel, color in zip(rgb[0, 1:], itertools.cycle(colors)): - assert_close(pixel, color) + assert_array_almost_equal(pixel, color) + def test_negative_labels(): labels = np.array([0, -1, -2, 0]) rout = np.array([(0., 0., 0.), (0., 0., 1.), (1., 0., 0.), (0., 0., 0.)]) - assert_close(rout, label2rgb(labels, bg_label=0, alpha=1, image_alpha=1)) + assert_array_almost_equal( + rout, label2rgb(labels, bg_label=0, alpha=1, image_alpha=1)) + def test_nonconsecutive(): labels = np.array([0, 2, 4, 0]) - colors=[(1, 0, 0), (0, 0, 1)] + colors = [(1, 0, 0), (0, 0, 1)] rout = np.array([(1., 0., 0.), (0., 0., 1.), (1., 0., 0.), (1., 0., 0.)]) - assert_close(rout, label2rgb(labels, colors=colors, alpha=1, image_alpha=1)) + assert_array_almost_equal( + rout, label2rgb(labels, colors=colors, alpha=1, image_alpha=1)) + def test_label_consistency(): """Assert that the same labels map to the same colors.""" @@ -89,7 +96,9 @@ rgb_1 = label2rgb(label_1, colors=colors) rgb_2 = label2rgb(label_2, colors=colors) for label_id in label_2.flat: - assert_close(rgb_1[label_1 == label_id], rgb_2[label_2 == label_id]) + assert_array_almost_equal(rgb_1[label_1 == label_id], + rgb_2[label_2 == label_id]) + def test_leave_labels_alone(): labels = np.array([-1, 0, 1]) @@ -99,6 +108,7 @@ label2rgb(labels, bg_label=1) assert_array_equal(labels, labels_saved) + def test_avg(): # label image label_field = np.array([[1, 1, 1, 2], @@ -149,8 +159,3 @@ labels = np.arange(100).reshape(10, 10) image = -1 * np.ones((10, 10)) assert_warns(UserWarning, label2rgb, labels, image) - - -if __name__ == '__main__': - testing.run_module_suite() - diff -Nru skimage-0.13.1/skimage/color/tests/test_delta_e.py skimage-0.14.0/skimage/color/tests/test_delta_e.py --- skimage-0.13.1/skimage/color/tests/test_delta_e.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/color/tests/test_delta_e.py 2018-05-29 01:27:44.000000000 +0000 @@ -2,7 +2,7 @@ from os.path import abspath, dirname, join as pjoin import numpy as np -from numpy.testing import assert_allclose +from skimage._shared.testing import assert_allclose from skimage.color import (deltaE_cie76, deltaE_ciede94, @@ -160,8 +160,3 @@ lab1 = (0.5, 0.5, 0.5) lab2 = (0.4, 0.4, 0.4) deltaE_cmc(lab1, lab2) - - -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() Binary files /tmp/tmp4mRx1F/_QVxokqXnc/skimage-0.13.1/skimage/data/astronaut_GRAY_hog_L1.npy and /tmp/tmp4mRx1F/45Vghig4fP/skimage-0.14.0/skimage/data/astronaut_GRAY_hog_L1.npy differ Binary files /tmp/tmp4mRx1F/_QVxokqXnc/skimage-0.13.1/skimage/data/astronaut_GRAY_hog_L2-Hys.npy and /tmp/tmp4mRx1F/45Vghig4fP/skimage-0.14.0/skimage/data/astronaut_GRAY_hog_L2-Hys.npy differ Binary files /tmp/tmp4mRx1F/_QVxokqXnc/skimage-0.13.1/skimage/data/green_palette.png and /tmp/tmp4mRx1F/45Vghig4fP/skimage-0.14.0/skimage/data/green_palette.png differ diff -Nru skimage-0.13.1/skimage/data/__init__.py skimage-0.14.0/skimage/data/__init__.py --- skimage-0.13.1/skimage/data/__init__.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/data/__init__.py 2018-05-29 01:27:44.000000000 +0000 @@ -10,16 +10,17 @@ import os as _os -import numpy as np +import numpy as _np from .. import data_dir from ..io import imread, use_plugin -from .._shared._warnings import expected_warnings -from ._binary_blobs import binary_blobs +from .._shared._warnings import expected_warnings, warn from .. import img_as_bool +from ._binary_blobs import binary_blobs __all__ = ['load', 'astronaut', + 'binary_blobs', 'camera', 'checkerboard', 'chelsea', @@ -29,6 +30,7 @@ 'horse', 'hubble_deep_field', 'immunohistochemistry', + 'lfw_subset', 'logo', 'moon', 'page', @@ -37,23 +39,32 @@ 'stereo_motorcycle'] -def load(f, as_grey=False): +def load(f, as_gray=False, as_grey=None): """Load an image file located in the data directory. Parameters ---------- f : string File name. - as_grey : bool, optional - Convert to greyscale. + as_gray : bool, optional + Convert to grayscale. + as_grey : bool or None, optional + Deprecated keyword argument. Use `as_gray` instead. + If None, `as_gray` is used. + Convert to grayscale. Returns ------- img : ndarray Image loaded from ``skimage.data_dir``. """ + if as_grey is not None: + as_gray = as_grey + warn('`as_grey` has been deprecated in favor of `as_gray`' + ' and will be removed in v0.16.') + use_plugin('pil') - return imread(_os.path.join(data_dir, f), as_grey=as_grey) + return imread(_os.path.join(data_dir, f), as_gray=as_gray) def camera(): @@ -70,7 +81,7 @@ def astronaut(): - """Colour image of the astronaut Eileen Collins. + """Color image of the astronaut Eileen Collins. Photograph of Eileen Collins, an American astronaut. She was selected as an astronaut in 1992 and first piloted the space shuttle STS-63 in @@ -138,7 +149,7 @@ ----- This image was downloaded from the `Brooklyn Museum Collection - `__. + `__. No known copyright restrictions. @@ -204,7 +215,7 @@ Horse image. """ with expected_warnings(['Possible precision loss', 'Possible sign loss']): - return img_as_bool(load("horse.png", as_grey=True)) + return img_as_bool(load("horse.png", as_gray=True)) def clock(): @@ -331,15 +342,15 @@ def stereo_motorcycle(): """Rectified stereo image pair with ground-truth disparities. - The two images are rectified such that every pixel in the left image has its - corresponding pixel on the same scanline in the right image. That means that - both images are warped such that they have the same orientation but a + The two images are rectified such that every pixel in the left image has + its corresponding pixel on the same scanline in the right image. That means + that both images are warped such that they have the same orientation but a horizontal spatial offset (baseline). The ground-truth pixel offset in column direction is specified by the included disparity map. - The two images are part of the Middlebury 2014 stereo benchmark. The dataset - was created by Nera Nesic, Porter Westling, Xi Wang, York Kitajima, Greg - Krathwohl, and Daniel Scharstein at Middlebury College. A detailed + The two images are part of the Middlebury 2014 stereo benchmark. The + dataset was created by Nera Nesic, Porter Westling, Xi Wang, York Kitajima, + Greg Krathwohl, and Daniel Scharstein at Middlebury College. A detailed description of the acquisition process can be found in [1]_. The images included here are down-sampled versions of the default exposure @@ -369,18 +380,51 @@ Notes ----- - The original resolution images, images with different exposure and lighting, - and ground-truth depth maps can be found at the Middlebury website [2]_. + The original resolution images, images with different exposure and + lighting, and ground-truth depth maps can be found at the Middlebury + website [2]_. References ---------- - .. [1] D. Scharstein, H. Hirschmueller, Y. Kitajima, G. Krathwohl, N. Nesic, - X. Wang, and P. Westling. High-resolution stereo datasets with - subpixel-accurate ground truth. In German Conference on Pattern + .. [1] D. Scharstein, H. Hirschmueller, Y. Kitajima, G. Krathwohl, N. + Nesic, X. Wang, and P. Westling. High-resolution stereo datasets + with subpixel-accurate ground truth. In German Conference on Pattern Recognition (GCPR 2014), Muenster, Germany, September 2014. .. [2] http://vision.middlebury.edu/stereo/data/scenes2014/ """ return (load("motorcycle_left.png"), load("motorcycle_right.png"), - np.load(_os.path.join(data_dir, "motorcycle_disp.npz"))["arr_0"]) + _np.load(_os.path.join(data_dir, "motorcycle_disp.npz"))["arr_0"]) + + +def lfw_subset(): + """Subset of data from the LFW dataset. + + This database is a subset of the LFW database containing: + + * 100 faces + * 100 non-faces + + The full dataset is available at [2]_. + + Returns + ------- + images : (200, 25, 25) uint8 ndarray + 100 first images are faces and subsequent 100 are non-faces. + + Notes + ----- + The faces were randomly selected from the LFW dataset and the non-faces + were extracted from the background of the same dataset. The cropped ROIs + have been resized to a 25 x 25 pixels. + + References + ---------- + .. [1] Huang, G., Mattar, M., Lee, H., & Learned-Miller, E. G. (2012). + Learning to align from scratch. In Advances in Neural Information + Processing Systems (pp. 764-772). + .. [2] http://vis-www.cs.umass.edu/lfw/ + + """ + return _np.load(_os.path.join(data_dir, 'lfw_subset.npy')) Binary files /tmp/tmp4mRx1F/_QVxokqXnc/skimage-0.13.1/skimage/data/lfw_subset.npy and /tmp/tmp4mRx1F/45Vghig4fP/skimage-0.14.0/skimage/data/lfw_subset.npy differ diff -Nru skimage-0.13.1/skimage/data/tests/test_data.py skimage-0.14.0/skimage/data/tests/test_data.py --- skimage-0.13.1/skimage/data/tests/test_data.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/data/tests/test_data.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,6 +1,6 @@ import numpy as np import skimage.data as data -from numpy.testing import assert_equal, assert_almost_equal +from skimage._shared.testing import assert_equal, assert_almost_equal def test_astronaut(): @@ -96,6 +96,6 @@ assert not np.all(blobs == other_realization) -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() +def test_lfw_subset(): + """ Test that "lfw_subset" can be loaded.""" + data.lfw_subset() diff -Nru skimage-0.13.1/skimage/draw/draw.py skimage-0.14.0/skimage/draw/draw.py --- skimage-0.13.1/skimage/draw/draw.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/draw/draw.py 2018-05-29 01:27:44.000000000 +0000 @@ -260,14 +260,15 @@ return _coords_inside_image(rr, cc, shape) -def set_color(img, coords, color, alpha=1): +def set_color(image, coords, color, alpha=1): """Set pixel color in the image at the given coordinates. + Note that this function modifies the color of the image in-place. Coordinates that exceed the shape of the image will be ignored. Parameters ---------- - img : (M, N, D) ndarray + image : (M, N, D) ndarray Image coords : tuple of ((P,) ndarray, (P,) ndarray) Row and column coordinates of pixels to be colored. @@ -277,11 +278,6 @@ Alpha values used to blend color with image. 0 is transparent, 1 is opaque. - Returns - ------- - img : (M, N, D) ndarray - The updated image. - Examples -------- >>> from skimage.draw import line, set_color @@ -303,29 +299,29 @@ """ rr, cc = coords - if img.ndim == 2: - img = img[..., np.newaxis] + if image.ndim == 2: + image = image[..., np.newaxis] color = np.array(color, ndmin=1, copy=False) - if img.shape[-1] != color.shape[-1]: + if image.shape[-1] != color.shape[-1]: raise ValueError('Color shape ({}) must match last ' 'image dimension ({}).'.format(color.shape[0], - img.shape[-1])) + image.shape[-1])) if np.isscalar(alpha): # Can be replaced by ``full_like`` when numpy 1.8 becomes # minimum dependency alpha = np.ones_like(rr) * alpha - rr, cc, alpha = _coords_inside_image(rr, cc, img.shape, val=alpha) + rr, cc, alpha = _coords_inside_image(rr, cc, image.shape, val=alpha) alpha = alpha[..., np.newaxis] color = color * alpha - vals = img[rr, cc] * (1 - alpha) + vals = image[rr, cc] * (1 - alpha) - img[rr, cc] = vals + color + image[rr, cc] = vals + color def line(r0, c0, r1, c1): @@ -693,3 +689,78 @@ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8) """ return _bezier_curve(r0, c0, r1, c1, r2, c2, weight, shape) + + +def rectangle(start, end=None, extent=None, shape=None): + """Generate coordinates of pixels within a rectangle. + + Parameters + ---------- + start : tuple + Origin point of the rectangle, e.g., ``([plane,] row, column)``. + end : tuple + End point of the rectangle ``([plane,] row, column)``. + Either `end` or `extent` must be specified. + extent : tuple + The extent (size) of the drawn rectangle. E.g., + ``([num_planes,] num_rows, num_cols)``. + Either `end` or `extent` must be specified. + shape : tuple, optional + Image shape used to determine the maximum bounds of the output + coordinates. This is useful for clipping rectangles that exceed + the image size. By default, no clipping is done. + + Returns + ------- + coords : array of int, shape (Ndim, Npoints) + The coordinates of all pixels in the rectangle. + + Notes + ----- + This function can be applied to N-dimensional images, by passing `start` and + `end` or `extent` as tuples of length N. + + Examples + -------- + >>> import numpy as np + >>> from skimage.draw import rectangle + >>> img = np.zeros((5, 5), dtype=np.uint8) + >>> start = (1, 1) + >>> extent = (3, 3) + >>> rr, cc = rectangle(start, extent=extent, shape=img.shape) + >>> img[rr, cc] = 1 + >>> img + array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]], dtype=uint8) + + + >>> img = np.zeros((5, 5), dtype=np.uint8) + >>> start = (0, 1) + >>> end = (3, 3) + >>> rr, cc = rectangle(start, end=end, shape=img.shape) + >>> img[rr, cc] = 1 + >>> img + array([[0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]], dtype=uint8) + + """ + if extent is not None: + end = np.array(start) + np.array(extent) + elif end is None: + raise ValueError("Either `end` or `extent` must be given") + tl = np.minimum(start, end) + br = np.maximum(start, end) + if extent is None: + br += 1 + if shape is not None: + br = np.minimum(shape, br) + tl = np.maximum(np.zeros_like(shape), tl) + coords = np.meshgrid(*[np.arange(st, en) for st, en in zip(tuple(tl), + tuple(br))]) + return coords diff -Nru skimage-0.13.1/skimage/draw/_draw.pyx skimage-0.14.0/skimage/draw/_draw.pyx --- skimage-0.13.1/skimage/draw/_draw.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/draw/_draw.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -385,7 +385,7 @@ while r > c + 1: c += 1 - dceil = sqrt(radius**2 - c**2) + dceil = sqrt(radius * radius - c * c) dceil = ceil(dceil) - dceil if dceil < dceil_prev: r -= 1 @@ -447,8 +447,8 @@ cdef list cc = list() # Compute useful values - cdef Py_ssize_t rd = r_radius**2 - cdef Py_ssize_t cd = c_radius**2 + cdef Py_ssize_t rd = r_radius * r_radius + cdef Py_ssize_t cd = c_radius * c_radius cdef Py_ssize_t r, c, e2, err diff -Nru skimage-0.13.1/skimage/draw/__init__.py skimage-0.14.0/skimage/draw/__init__.py --- skimage-0.13.1/skimage/draw/__init__.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/draw/__init__.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,9 +1,10 @@ from .draw import (circle, ellipse, set_color, polygon_perimeter, line, line_aa, polygon, ellipse_perimeter, circle_perimeter, circle_perimeter_aa, - bezier_curve) + bezier_curve,rectangle) from .draw3d import ellipsoid, ellipsoid_stats from ._draw import _bezier_segment +from ._random_shapes import random_shapes __all__ = ['line', 'line_aa', @@ -17,4 +18,6 @@ 'circle', 'circle_perimeter', 'circle_perimeter_aa', - 'set_color'] + 'set_color', + 'random_shapes', + 'rectangle'] diff -Nru skimage-0.13.1/skimage/draw/_random_shapes.py skimage-0.14.0/skimage/draw/_random_shapes.py --- skimage-0.13.1/skimage/draw/_random_shapes.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/skimage/draw/_random_shapes.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,354 @@ +import numpy as np + +from . import polygon as draw_polygon, circle as draw_circle +from .._shared.utils import warn + + +def _generate_rectangle_mask(point, image, shape, random): + """Generate a mask for a filled rectangle shape. + + The height and width of the rectangle are generated randomly. + + Parameters + ---------- + point : tuple + The row and column of the top left corner of the rectangle. + image : tuple + The height, width and depth of the image into which the shape is placed. + shape : tuple + The minimum and maximum size of the shape to fit. + random : np.random.RandomState + The random state to use for random sampling. + + Raises + ------ + ArithmeticError + When a shape cannot be fit into the image with the given starting + coordinates. This usually means the image dimensions are too small or + shape dimensions too large. + + Returns + ------- + label : tuple + A (category, ((r0, r1), (c0, c1))) tuple specifying the category and + bounding box coordinates of the shape. + indices : 2-D array + A mask of indices that the shape fills. + """ + available_width = min(image[1] - point[1], shape[1]) + if available_width < shape[0]: + raise ArithmeticError('cannot fit shape to image') + available_height = min(image[0] - point[0], shape[1]) + if available_height < shape[0]: + raise ArithmeticError('cannot fit shape to image') + # Pick random widths and heights. + r = random.randint(shape[0], available_height + 1) + c = random.randint(shape[0], available_width + 1) + rectangle = draw_polygon([ + point[0], + point[0] + r, + point[0] + r, + point[0], + ], [ + point[1], + point[1], + point[1] + c, + point[1] + c, + ]) + label = ('rectangle', ((point[0], point[0] + r), (point[1], point[1] + c))) + + return rectangle, label + + +def _generate_circle_mask(point, image, shape, random): + """Generate a mask for a filled circle shape. + + The radius of the circle is generated randomly. + + Parameters + ---------- + point : tuple + The row and column of the top left corner of the rectangle. + image : tuple + The height, width and depth of the image into which the shape is placed. + shape : tuple + The minimum and maximum size and color of the shape to fit. + random : np.random.RandomState + The random state to use for random sampling. + + Raises + ------ + ArithmeticError + When a shape cannot be fit into the image with the given starting + coordinates. This usually means the image dimensions are too small or + shape dimensions too large. + + Returns + ------- + label : tuple + A (category, ((r0, r1), (c0, c1))) tuple specifying the category and + bounding box coordinates of the shape. + indices : 2-D array + A mask of indices that the shape fills. + """ + if shape[0] == 1 or shape[1] == 1: + raise ValueError('size must be > 1 for circles') + min_radius = shape[0] / 2.0 + max_radius = shape[1] / 2.0 + left = point[1] + right = image[1] - point[1] + top = point[0] + bottom = image[0] - point[0] + available_radius = min(left, right, top, bottom, max_radius) + if available_radius < min_radius: + raise ArithmeticError('cannot fit shape to image') + radius = random.randint(min_radius, available_radius + 1) + circle = draw_circle(point[0], point[1], radius) + label = ('circle', ((point[0] - radius + 1, point[0] + radius), + (point[1] - radius + 1, point[1] + radius))) + + return circle, label + + +def _generate_triangle_mask(point, image, shape, random): + """Generate a mask for a filled equilateral triangle shape. + + The length of the sides of the triangle is generated randomly. + + Parameters + ---------- + point : tuple + The row and column of the top left corner of a down-pointing triangle. + image : tuple + The height, width and depth of the image into which the shape is placed. + shape : tuple + The minimum and maximum size and color of the shape to fit. + random : np.random.RandomState + The random state to use for random sampling. + + Raises + ------ + ArithmeticError + When a shape cannot be fit into the image with the given starting + coordinates. This usually means the image dimensions are too small or + shape dimensions too large. + + Returns + ------- + label : tuple + A (category, ((r0, r1), (c0, c1))) tuple specifying the category and + bounding box coordinates of the shape. + indices : 2-D array + A mask of indices that the shape fills. + """ + if shape[0] == 1 or shape[1] == 1: + raise ValueError('dimension must be > 1 for triangles') + available_side = min(image[1] - point[1], point[0] + 1, shape[1]) + if available_side < shape[0]: + raise ArithmeticError('cannot fit shape to image') + side = random.randint(shape[0], available_side + 1) + triangle_height = int(np.ceil(np.sqrt(3 / 4.0) * side)) + triangle = draw_polygon([ + point[0], + point[0] - triangle_height, + point[0], + ], [ + point[1], + point[1] + side // 2, + point[1] + side, + ]) + label = ('triangle', ((point[0] - triangle_height, point[0]), + (point[1], point[1] + side))) + + return triangle, label + + +# Allows lookup by key as well as random selection. +SHAPE_GENERATORS = dict( + rectangle=_generate_rectangle_mask, + circle=_generate_circle_mask, + triangle=_generate_triangle_mask) +SHAPE_CHOICES = list(SHAPE_GENERATORS.values()) + + +def _generate_random_colors(num_colors, num_channels, intensity_range, random): + """Generate an array of random colors. + + Parameters + ---------- + num_colors : int + Number of colors to generate. + num_channels : int + Number of elements representing color. + intensity_range : {tuple of tuples of ints, tuple of ints}, optional + The range of values to sample pixel values from. For grayscale images + the format is (min, max). For multichannel - ((min, max),) if the + ranges are equal across the channels, and + ((min_0, max_0), ... (min_N, max_N)) if they differ. + random : np.random.RandomState + The random state to use for random sampling. + + Raises + ------ + ValueError + When the `intensity_range` is not in the interval (0, 255). + + Returns + ------- + colors : array + An array of shape (num_colors, num_channels), where the values for + each channel are drawn from the corresponding `intensity_range`. + + """ + if num_channels == 1: + intensity_range = (intensity_range, ) + elif len(intensity_range) == 1: + intensity_range = intensity_range * num_channels + colors = [random.randint(r[0], r[1]+1, size=num_colors) + for r in intensity_range] + return np.transpose(colors) + + +def random_shapes(image_shape, + max_shapes, + min_shapes=1, + min_size=2, + max_size=None, + multichannel=True, + num_channels=3, + shape=None, + intensity_range=None, + allow_overlap=False, + num_trials=100, + random_seed=None): + """Generate an image with random shapes, labeled with bounding boxes. + + The image is populated with random shapes with random sizes, random + locations, and random colors, with or without overlap. + + Shapes have random (row, col) starting coordinates and random sizes bounded + by `min_size` and `max_size`. It can occur that a randomly generated shape + will not fit the image at all. In that case, the algorithm will try again + with new starting coordinates a certain number of times. However, it also + means that some shapes may be skipped altogether. In that case, this + function will generate fewer shapes than requested. + + Parameters + ---------- + image_shape : tuple + The number of rows and columns of the image to generate. + max_shapes : int + The maximum number of shapes to (attempt to) fit into the shape. + min_shapes : int, optional + The minimum number of shapes to (attempt to) fit into the shape. + min_size : int, optional + The minimum dimension of each shape to fit into the image. + max_size : int, optional + The maximum dimension of each shape to fit into the image. + multichannel : bool, optional + If True, the generated image has ``num_channels`` color channels, + otherwise generates grayscale image. + num_channels : int, optional + Number of channels in the generated image. If 1, generate monochrome + images, else color images with multiple channels. Ignored if + ``multichannel`` is set to False. + shape : {rectangle, circle, triangle, None} str, optional + The name of the shape to generate or `None` to pick random ones. + intensity_range : {tuple of tuples of uint8, tuple of uint8}, optional + The range of values to sample pixel values from. For grayscale images + the format is (min, max). For multichannel - ((min, max),) if the + ranges are equal across the channels, and ((min_0, max_0), ... (min_N, max_N)) + if they differ. As the function supports generation of uint8 arrays only, + the maximum range is (0, 255). If None, set to (0, 254) for each + channel reserving color of intensity = 255 for background. + allow_overlap : bool, optional + If `True`, allow shapes to overlap. + num_trials : int, optional + How often to attempt to fit a shape into the image before skipping it. + seed : int, optional + Seed to initialize the random number generator. + If `None`, a random seed from the operating system is used. + + Returns + ------- + image : uint8 array + An image with the fitted shapes. + labels : list + A list of labels, one per shape in the image. Each label is a + (category, ((r0, r1), (c0, c1))) tuple specifying the category and + bounding box coordinates of the shape. + + Examples + -------- + >>> import skimage.draw + >>> image, labels = skimage.draw.random_shapes((32, 32), max_shapes=3) + >>> image # doctest: +SKIP + array([ + [[255, 255, 255], + [255, 255, 255], + [255, 255, 255], + ..., + [255, 255, 255], + [255, 255, 255], + [255, 255, 255]]], dtype=uint8) + >>> labels # doctest: +SKIP + [('circle', ((22, 18), (25, 21))), + ('triangle', ((5, 6), (13, 13)))] + """ + if min_size > image_shape[0] or min_size > image_shape[1]: + raise ValueError('Minimum dimension must be less than ncols and nrows') + max_size = max_size or max(image_shape[0], image_shape[1]) + + if not multichannel: + num_channels = 1 + + if intensity_range is None: + intensity_range = (0, 254) if num_channels == 1 else ((0, 254), ) + else: + tmp = (intensity_range, ) if num_channels == 1 else intensity_range + for intensity_pair in tmp: + for intensity in intensity_pair: + if not (0 <= intensity <= 255): + msg = 'Intensity range must lie within (0, 255) interval' + raise ValueError(msg) + + random = np.random.RandomState(random_seed) + user_shape = shape + image_shape = (image_shape[0], image_shape[1], num_channels) + image = np.ones(image_shape, dtype=np.uint8) * 255 + filled = np.zeros(image_shape, dtype=bool) + labels = [] + + num_shapes = random.randint(min_shapes, max_shapes + 1) + colors = _generate_random_colors(num_shapes, num_channels, + intensity_range, random) + for shape_idx in range(num_shapes): + if user_shape is None: + shape_generator = random.choice(SHAPE_CHOICES) + else: + shape_generator = SHAPE_GENERATORS[user_shape] + shape = (min_size, max_size) + for _ in range(num_trials): + # Pick start coordinates. + column = random.randint(image_shape[1]) + row = random.randint(image_shape[0]) + point = (row, column) + try: + indices, label = shape_generator(point, image_shape, shape, + random) + except ArithmeticError: + # Couldn't fit the shape, skip it. + continue + # Check if there is an overlap where the mask is nonzero. + if allow_overlap or not filled[indices].any(): + image[indices] = colors[shape_idx] + filled[indices] = True + labels.append(label) + break + else: + warn('Could not fit any shapes to image, ' + 'consider reducing the minimum dimension') + + if not multichannel: + image = np.squeeze(image, axis=2) + return image, labels diff -Nru skimage-0.13.1/skimage/draw/tests/test_draw3d.py skimage-0.14.0/skimage/draw/tests/test_draw3d.py --- skimage-0.13.1/skimage/draw/tests/test_draw3d.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/draw/tests/test_draw3d.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,23 +1,23 @@ import numpy as np -from numpy.testing import assert_array_equal, assert_allclose -from nose.tools import raises +from skimage._shared.testing import assert_array_equal, assert_allclose -from skimage.draw import ellipsoid, ellipsoid_stats +from skimage.draw import ellipsoid, ellipsoid_stats, rectangle +from skimage._shared import testing -@raises(ValueError) def test_ellipsoid_sign_parameters1(): - ellipsoid(-1, 2, 2) + with testing.raises(ValueError): + ellipsoid(-1, 2, 2) -@raises(ValueError) def test_ellipsoid_sign_parameters2(): - ellipsoid(0, 2, 2) + with testing.raises(ValueError): + ellipsoid(0, 2, 2) -@raises(ValueError) def test_ellipsoid_sign_parameters3(): - ellipsoid(-3, -2, 2) + with testing.raises(ValueError): + ellipsoid(-3, -2, 2) def test_ellipsoid_bool(): @@ -116,5 +116,59 @@ assert_allclose(37426.3, surf, atol=1e-1) -if __name__ == "__main__": - np.testing.run_module_suite() +def test_rect_3d_extent(): + expected = np.array([[[0, 0, 1, 1, 1], + [0, 0, 1, 1, 1], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]], + [[0, 0, 1, 1, 1], + [0, 0, 1, 1, 1], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]], + [[0, 0, 1, 1, 1], + [0, 0, 1, 1, 1], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]], + [[0, 0, 1, 1, 1], + [0, 0, 1, 1, 1], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]]], dtype=np.uint8) + img = np.zeros((4, 5, 5), dtype=np.uint8) + start = (0, 0, 2) + extent = (5, 2, 3) + pp, rr, cc = rectangle(start, extent=extent, shape=img.shape) + img[pp, rr, cc] = 1 + assert_array_equal(img, expected) + + +def test_rect_3d_end(): + expected = np.array([[[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]], + [[0, 0, 1, 1, 0], + [0, 0, 1, 1, 0], + [0, 0, 1, 1, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]], + [[0, 0, 1, 1, 0], + [0, 0, 1, 1, 0], + [0, 0, 1, 1, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]], + [[0, 0, 1, 1, 0], + [0, 0, 1, 1, 0], + [0, 0, 1, 1, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]]], dtype=np.uint8) + img = np.zeros((4, 5, 5), dtype=np.uint8) + start = (1, 0, 2) + end = (3, 2, 3) + pp, rr, cc = rectangle(start, end=end, shape=img.shape) + img[pp, rr, cc] = 1 + assert_array_equal(img, expected) diff -Nru skimage-0.13.1/skimage/draw/tests/test_draw.py skimage-0.14.0/skimage/draw/tests/test_draw.py --- skimage-0.13.1/skimage/draw/tests/test_draw.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/draw/tests/test_draw.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,12 +1,13 @@ -from numpy.testing import assert_array_equal, assert_equal, assert_raises, \ - assert_almost_equal import numpy as np from skimage._shared.testing import test_parallel +from skimage._shared import testing +from skimage._shared.testing import assert_array_equal, assert_equal +from skimage._shared.testing import assert_almost_equal from skimage.draw import (set_color, line, line_aa, polygon, polygon_perimeter, circle, circle_perimeter, circle_perimeter_aa, ellipse, ellipse_perimeter, - _bezier_segment, bezier_curve) + _bezier_segment, bezier_curve, rectangle) from skimage.measure import regionprops @@ -29,7 +30,8 @@ set_color(img, (rr, cc), 1, alpha=alpha) # Wrong dimensionality color - assert_raises(ValueError, set_color, img, (rr, cc), (255, 0, 0), alpha=alpha) + with testing.raises(ValueError): + set_color(img, (rr, cc), (255, 0, 0), alpha=alpha) img = np.zeros((10, 10, 3)) @@ -548,7 +550,8 @@ rr, cc = ellipse(500, 600, 200, 400, rotation=angle) img[rr, cc] = 1 # estimate orientation of ellipse - angle_estim = np.round(regionprops(img)[0].orientation, 3) % (np.pi / 2) + angle_estim_raw = regionprops(img, coordinates='xy')[0].orientation + angle_estim = np.round(angle_estim_raw, 3) % (np.pi / 2) assert_almost_equal(angle_estim, angle % (np.pi / 2), 2) @@ -851,7 +854,8 @@ out[rr, cc] = 1 assert_array_equal(out, expected) - assert_raises(ValueError, polygon_perimeter, [0], [1], clip=True) + with testing.raises(ValueError): + polygon_perimeter([0], [1], clip=True) def test_polygon_perimeter_outside_image(): @@ -861,6 +865,29 @@ assert_equal(len(cc), 0) -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() +def test_rectangle_end(): + expected = np.array([[0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]], dtype=np.uint8) + img = np.zeros((5, 5), dtype=np.uint8) + start = (0, 1) + end = (3, 3) + rr, cc = rectangle(start, end=end, shape=img.shape) + img[rr, cc] = 1 + assert_array_equal(img, expected) + + +def test_rectangle_extent(): + expected = np.array([[0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 0]], dtype=np.uint8) + img = np.zeros((5, 5), dtype=np.uint8) + start = (1, 1) + extent = (3, 3) + rr, cc = rectangle(start, extent=extent, shape=img.shape) + img[rr, cc] = 1 + assert_array_equal(img, expected) diff -Nru skimage-0.13.1/skimage/draw/tests/test_random_shapes.py skimage-0.14.0/skimage/draw/tests/test_random_shapes.py --- skimage-0.13.1/skimage/draw/tests/test_random_shapes.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/skimage/draw/tests/test_random_shapes.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,143 @@ +import numpy as np + +from skimage.draw import random_shapes + +from skimage._shared import testing +from skimage._shared.testing import expected_warnings + + +def test_generates_color_images_with_correct_shape(): + image, _ = random_shapes((128, 128), max_shapes=10) + assert image.shape == (128, 128, 3) + + +def test_generates_gray_images_with_correct_shape(): + image, _ = random_shapes( + (4567, 123), min_shapes=3, max_shapes=20, multichannel=False) + assert image.shape == (4567, 123) + + +def test_generates_correct_bounding_boxes_for_rectangles(): + image, labels = random_shapes( + (128, 128), + max_shapes=1, + shape='rectangle', + random_seed=42) + assert len(labels) == 1 + label, bbox = labels[0] + assert label == 'rectangle', label + + crop = image[bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1]] + + # The crop is filled. + assert (crop >= 0).all() and (crop < 255).all() + + # The crop is complete. + image[bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1]] = 255 + assert (image == 255).all() + + +def test_generates_correct_bounding_boxes_for_triangles(): + image, labels = random_shapes( + (128, 128), + max_shapes=1, + shape='triangle', + random_seed=42) + assert len(labels) == 1 + label, bbox = labels[0] + assert label == 'triangle', label + + crop = image[bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1]] + + # The crop is filled. + assert (crop >= 0).any() and (crop < 255).any() + + # The crop is complete. + image[bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1]] = 255 + assert (image == 255).all() + + +def test_generates_correct_bounding_boxes_for_circles(): + image, labels = random_shapes( + (43, 44), + max_shapes=1, + min_size=20, + max_size=20, + shape='circle', + random_seed=42) + assert len(labels) == 1 + label, bbox = labels[0] + assert label == 'circle', label + + crop = image[bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1]] + + # The crop is filled. + assert (crop >= 0).any() and (crop < 255).any() + + # The crop is complete. + image[bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1]] = 255 + assert (image == 255).all() + + +def test_generate_circle_throws_when_size_too_small(): + with testing.raises(ValueError): + random_shapes( + (64, 128), max_shapes=1, min_size=1, max_size=1, shape='circle') + + +def test_generate_triangle_throws_when_size_too_small(): + with testing.raises(ValueError): + random_shapes( + (128, 64), max_shapes=1, min_size=1, max_size=1, shape='triangle') + + +def test_can_generate_one_by_one_rectangle(): + image, labels = random_shapes( + (50, 128), + max_shapes=1, + min_size=1, + max_size=1, + shape='rectangle') + assert len(labels) == 1 + _, bbox = labels[0] + crop = image[bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1]] + + # rgb + assert (np.shape(crop) == (1, 1, 3) and np.any(crop >= 1) + and np.any(crop < 255)) + + +def test_throws_when_intensity_range_out_of_range(): + with testing.raises(ValueError): + random_shapes((1000, 1234), max_shapes=1, multichannel=False, + intensity_range=(0, 256)) + with testing.raises(ValueError): + random_shapes((2, 2), max_shapes=1, + intensity_range=((-1, 255),)) + + +def test_returns_empty_labels_and_white_image_when_cannot_fit_shape(): + # The circle will never fit this. + with expected_warnings(['Could not fit']): + image, labels = random_shapes( + (10000, 10000), max_shapes=1, min_size=10000, shape='circle') + assert len(labels) == 0 + assert (image == 255).all() + + +def test_random_shapes_is_reproducible_with_seed(): + random_seed = 42 + labels = [] + for _ in range(5): + _, label = random_shapes((128, 128), max_shapes=5, + random_seed=random_seed) + labels.append(label) + assert all(other == labels[0] for other in labels[1:]) + + +def test_generates_white_image_when_intensity_range_255(): + image, labels = random_shapes((128, 128), max_shapes=3, + intensity_range=((255, 255),), + random_seed=42) + assert len(labels) > 0 + assert (image == 255).all() diff -Nru skimage-0.13.1/skimage/exposure/_adapthist.py skimage-0.14.0/skimage/exposure/_adapthist.py --- skimage-0.13.1/skimage/exposure/_adapthist.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/exposure/_adapthist.py 2018-05-29 01:27:44.000000000 +0000 @@ -16,7 +16,7 @@ from __future__ import division import numbers import numpy as np -from .. import img_as_float, img_as_uint +from ..util import img_as_float, img_as_uint from ..color.adapt_rgb import adapt_rgb, hsv_value from ..exposure import rescale_intensity @@ -26,7 +26,7 @@ @adapt_rgb(hsv_value) def equalize_adapthist(image, kernel_size=None, - clip_limit=0.01, nbins=256, **kwargs): + clip_limit=0.01, nbins=256): """Contrast Limited Adaptive Histogram Equalization (CLAHE). An algorithm for local contrast enhancement, that uses histograms computed @@ -74,11 +74,6 @@ image = img_as_uint(image) image = rescale_intensity(image, out_range=(0, NR_OF_GREY - 1)) - if kwargs: - if 'ntiles_x' in kwargs or 'ntiles_y' in kwargs: - msg = '`ntiles_*` have been deprecated in favor of `kernel_size`' - raise ValueError(msg) - if kernel_size is None: kernel_size = (image.shape[0] // 8, image.shape[1] // 8) elif isinstance(kernel_size, numbers.Number): diff -Nru skimage-0.13.1/skimage/exposure/exposure.py skimage-0.14.0/skimage/exposure/exposure.py --- skimage-0.13.1/skimage/exposure/exposure.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/exposure/exposure.py 2018-05-29 01:27:44.000000000 +0000 @@ -468,9 +468,9 @@ The low contrast fraction threshold. An image is considered low- contrast when its range of brightness spans less than this fraction of its data type's full range. [1]_ - lower_bound : float, optional + lower_percentile : float, optional Disregard values below this percentile when computing image contrast. - upper_bound : float, optional + upper_percentile : float, optional Disregard values above this percentile when computing image contrast. method : str, optional The contrast determination method. Right now the only available diff -Nru skimage-0.13.1/skimage/exposure/tests/test_exposure.py skimage-0.14.0/skimage/exposure/tests/test_exposure.py --- skimage-0.13.1/skimage/exposure/tests/test_exposure.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/exposure/tests/test_exposure.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,16 +1,18 @@ import warnings import numpy as np -from numpy.testing import (assert_array_equal, assert_array_almost_equal, - assert_raises, assert_almost_equal) - import skimage from skimage import data from skimage import exposure from skimage.exposure.exposure import intensity_range from skimage.color import rgb2gray from skimage.util.dtype import dtype_range + from skimage._shared._warnings import expected_warnings +from skimage._shared import testing +from skimage._shared.testing import (assert_array_equal, + assert_array_almost_equal, + assert_almost_equal) # Test integer histograms @@ -93,24 +95,26 @@ # ==================== -def test_intensity_range_uint8(): +@testing.parametrize("test_input,expected", [ + ('image', [0, 1]), + ('dtype', [0, 255]), + ((10, 20), [10, 20]) +]) +def test_intensity_range_uint8(test_input, expected): image = np.array([0, 1], dtype=np.uint8) - input_and_expected = [('image', [0, 1]), - ('dtype', [0, 255]), - ((10, 20), [10, 20])] - for range_values, expected_values in input_and_expected: - out = intensity_range(image, range_values=range_values) - yield assert_array_equal, out, expected_values + out = intensity_range(image, range_values=test_input) + assert_array_equal(out, expected) -def test_intensity_range_float(): +@testing.parametrize("test_input,expected", [ + ('image', [0.1, 0.2]), + ('dtype', [-1, 1]), + ((0.3, 0.4), [0.3, 0.4]) +]) +def test_intensity_range_float(test_input, expected): image = np.array([0.1, 0.2], dtype=np.float64) - input_and_expected = [('image', [0.1, 0.2]), - ('dtype', [-1, 1]), - ((0.3, 0.4), [0.3, 0.4])] - for range_values, expected_values in input_and_expected: - out = intensity_range(image, range_values=range_values) - yield assert_array_equal, out, expected_values + out = intensity_range(image, range_values=test_input) + assert_array_equal(out, expected) def test_intensity_range_clipped_float(): @@ -187,21 +191,6 @@ # Test adaptive histogram equalization # ==================================== -def test_adapthist_scalar(): - """Test a scalar uint8 image - """ - img = skimage.img_as_ubyte(data.moon()) - adapted = exposure.equalize_adapthist(img, kernel_size=64, clip_limit=0.02) - assert adapted.min() == 0.0 - assert adapted.max() == 1.0 - assert img.shape == adapted.shape - full_scale = skimage.exposure.rescale_intensity(skimage.img_as_float(img)) - - assert_almost_equal(peak_snr(full_scale, adapted), 102.066, 3) - assert_almost_equal(norm_brightness_err(full_scale, adapted), - 0.038, 3) - - def test_adapthist_grayscale(): """Test a grayscale float image """ @@ -252,14 +241,6 @@ assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.0248, 3) -def test_adapthist_ntiles_raises(): - img = skimage.img_as_ubyte(data.moon()) - assert_raises(ValueError, exposure.equalize_adapthist, img, ntiles_x=8) - assert_raises(ValueError, exposure.equalize_adapthist, img, ntiles_y=8) - assert_raises(ValueError, exposure.equalize_adapthist, img, - ntiles_x=8, ntiles_y=8) - - def peak_snr(img1, img2): """Peak signal to noise ratio of two images @@ -359,7 +340,8 @@ def test_adjust_gamma_neggative(): image = np.arange(0, 255, 4, np.uint8).reshape((8, 8)) - assert_raises(ValueError, exposure.adjust_gamma, image, -1) + with testing.raises(ValueError): + exposure.adjust_gamma(image, -1) # Test Logarithmic Correction @@ -478,7 +460,8 @@ def test_negative(): image = np.arange(-10, 245, 4).reshape((8, 8)).astype(np.double) - assert_raises(ValueError, exposure.adjust_gamma, image) + with testing.raises(ValueError): + exposure.adjust_gamma(image) def test_is_low_contrast(): @@ -495,8 +478,3 @@ image = (image.astype(np.uint16)) * 2**8 assert exposure.is_low_contrast(image) assert not exposure.is_low_contrast(image, upper_percentile=100) - - -if __name__ == '__main__': - from numpy import testing - testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/external/test_tifffile.py skimage-0.14.0/skimage/external/test_tifffile.py --- skimage-0.13.1/skimage/external/test_tifffile.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/external/test_tifffile.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,23 +1,22 @@ import os -import numpy as np +import itertools +from tempfile import NamedTemporaryFile +from .tifffile import imread, imsave +import numpy as np try: import skimage as si except Exception: si = None - -from numpy.testing import ( - assert_array_equal, assert_array_almost_equal, run_module_suite) -from numpy.testing.decorators import skipif - -from tempfile import NamedTemporaryFile -from .tifffile import imread, imsave +from skimage._shared import testing +from skimage._shared.testing import (assert_array_equal, + assert_array_almost_equal) np.random.seed(0) -@skipif(si is None) +@testing.skipif(si is None, reason="skimage not installed") def test_imread_uint16(): expected = np.load(os.path.join(si.data_dir, 'chessboard_GRAY_U8.npy')) img = imread(os.path.join(si.data_dir, 'chessboard_GRAY_U16.tif')) @@ -25,7 +24,7 @@ assert_array_almost_equal(img, expected) -@skipif(si is None) +@testing.skipif(si is None, reason="skimage not installed") def test_imread_uint16_big_endian(): expected = np.load(os.path.join(si.data_dir, 'chessboard_GRAY_U8.npy')) img = imread(os.path.join(si.data_dir, 'chessboard_GRAY_U16B.tif')) @@ -59,7 +58,7 @@ assert_array_equal(x, y) f.close() - #input: byte stream + # input: byte stream from io import BytesIO b = BytesIO() imsave(b, x) @@ -67,18 +66,15 @@ y = imread(b) assert_array_equal(x, y) - def test_imsave_roundtrip(self): - for shape in [(10, 10), (10, 10, 3), (10, 10, 4)]: - for dtype in (np.uint8, np.uint16, np.float32, np.int16, - np.float64): - x = np.random.rand(*shape) - - if not np.issubdtype(dtype, float): - x = (x * np.iinfo(dtype).max).astype(dtype) - else: - x = x.astype(dtype) - yield self.roundtrip, dtype, x - + shapes = ((10, 10), (10, 10, 3), (10, 10, 4)) + dtypes = (np.uint8, np.uint16, np.float32, np.int16, np.float64) -if __name__ == "__main__": - run_module_suite() + @testing.parametrize("shape, dtype", itertools.product(shapes, dtypes)) + def test_imsave_roundtrip(self, shape, dtype): + x = np.random.rand(*shape) + + if not np.issubdtype(dtype, np.floating): + x = (x * np.iinfo(dtype).max).astype(dtype) + else: + x = x.astype(dtype) + self.roundtrip(dtype, x) diff -Nru skimage-0.13.1/skimage/feature/blob.py skimage-0.14.0/skimage/feature/blob.py --- skimage-0.13.1/skimage/feature/blob.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/blob.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,10 +1,9 @@ - +from __future__ import division import numpy as np from scipy.ndimage import gaussian_filter, gaussian_laplace -import itertools as itt import math -from math import sqrt, hypot, log -from numpy import arccos +from math import sqrt, log +from scipy import spatial from ..util import img_as_float from .peak import peak_local_max from ._hessian_det_appx import _hessian_matrix_det @@ -17,6 +16,72 @@ # Theory behind: http://en.wikipedia.org/wiki/Blob_detection (04.04.2013) +def _compute_disk_overlap(d, r1, r2): + """ + Compute surface overlap between two disks of radii ``r1`` and ``r2``, + with centers separated by a distance ``d``. + + Parameters + ---------- + d : float + Distance between centers. + r1 : float + Radius of the first disk. + r2 : float + Radius of the second disk. + + Returns + ------- + vol: float + Volume of the overlap between the two disks. + """ + + ratio1 = (d ** 2 + r1 ** 2 - r2 ** 2) / (2 * d * r1) + ratio1 = np.clip(ratio1, -1, 1) + acos1 = math.acos(ratio1) + + ratio2 = (d ** 2 + r2 ** 2 - r1 ** 2) / (2 * d * r2) + ratio2 = np.clip(ratio2, -1, 1) + acos2 = math.acos(ratio2) + + a = -d + r2 + r1 + b = d - r2 + r1 + c = d + r2 - r1 + d = d + r2 + r1 + area = (r1 ** 2 * acos1 + r2 ** 2 * acos2 - + 0.5 * sqrt(abs(a * b * c * d))) + return area / (math.pi * (min(r1, r2) ** 2)) + + +def _compute_sphere_overlap(d, r1, r2): + """ + Compute volume overlap between two spheres of radii ``r1`` and ``r2``, + with centers separated by a distance ``d``. + + Parameters + ---------- + d : float + Distance between centers. + r1 : float + Radius of the first sphere. + r2 : float + Radius of the second sphere. + + Returns + ------- + vol: float + Volume of the overlap between the two spheres. + + Notes + ----- + See for example http://mathworld.wolfram.com/Sphere-SphereIntersection.html + for more details. + """ + vol = (math.pi / (12 * d) * (r1 + r2 - d)**2 * + (d**2 + 2 * d * (r1 + r2) - 3 * (r1**2 + r2**2) + 6 * r1 * r2)) + return vol / (4./3 * math.pi * min(r1, r2) ** 3) + + def _blob_overlap(blob1, blob2): """Finds the overlapping area fraction between two blobs. @@ -24,28 +89,30 @@ Parameters ---------- - blob1 : sequence - A sequence of ``(y,x,sigma)``, where ``x,y`` are coordinates of blob - and sigma is the standard deviation of the Gaussian kernel which - detected the blob. - blob2 : sequence - A sequence of ``(y,x,sigma)``, where ``x,y`` are coordinates of blob - and sigma is the standard deviation of the Gaussian kernel which - detected the blob. + blob1 : sequence of arrays + A sequence of ``(row, col, sigma)`` or ``(pln, row, col, sigma)``, + where ``row, col`` (or ``(pln, row, col)``) are coordinates + of blob and ``sigma`` is the standard deviation of the Gaussian kernel + which detected the blob. + blob2 : sequence of arrays + A sequence of ``(row, col, sigma)`` or ``(pln, row, col, sigma)``, + where ``row, col`` (or ``(pln, row, col)``) are coordinates + of blob and ``sigma`` is the standard deviation of the Gaussian kernel + which detected the blob. Returns ------- f : float - Fraction of overlapped area. + Fraction of overlapped area (or volume in 3D). """ - root2 = sqrt(2) + n_dim = len(blob1) - 1 + root_ndim = sqrt(n_dim) # extent of the blob is given by sqrt(2)*scale - r1 = blob1[2] * root2 - r2 = blob2[2] * root2 - - d = hypot(blob1[0] - blob2[0], blob1[1] - blob2[1]) + r1 = blob1[-1] * root_ndim + r2 = blob2[-1] * root_ndim + d = sqrt(np.sum((blob1[:-1] - blob2[:-1])**2)) if d > r1 + r2: return 0 @@ -53,21 +120,11 @@ if d <= abs(r1 - r2): return 1 - ratio1 = (d ** 2 + r1 ** 2 - r2 ** 2) / (2 * d * r1) - ratio1 = np.clip(ratio1, -1, 1) - acos1 = arccos(ratio1) - - ratio2 = (d ** 2 + r2 ** 2 - r1 ** 2) / (2 * d * r2) - ratio2 = np.clip(ratio2, -1, 1) - acos2 = arccos(ratio2) - - a = -d + r2 + r1 - b = d - r2 + r1 - c = d + r2 - r1 - d = d + r2 + r1 - area = r1 ** 2 * acos1 + r2 ** 2 * acos2 - 0.5 * sqrt(abs(a * b * c * d)) + if n_dim == 2: + return _compute_disk_overlap(d, r1, r2) - return area / (math.pi * (min(r1, r2) ** 2)) + else: # http://mathworld.wolfram.com/Sphere-SphereIntersection.html + return _compute_sphere_overlap(d, r1, r2) def _prune_blobs(blobs_array, overlap): @@ -76,9 +133,12 @@ Parameters ---------- blobs_array : ndarray - A 2d array with each row representing 3 values, ``(y,x,sigma)`` - where ``(y,x)`` are coordinates of the blob and ``sigma`` is the - standard deviation of the Gaussian kernel which detected the blob. + A 2d array with each row representing 3 (or 4) values, + ``(row, col, sigma)`` or ``(pln, row, col, sigma)`` in 3D, + where ``(row, col)`` (``(pln, row, col)``) are coordinates of the blob + and ``sigma`` is the standard deviation of the Gaussian kernel which + detected the blob. + This array must not have a dimension of size 0. overlap : float A value between 0 and 1. If the fraction of area overlapping for 2 blobs is greater than `overlap` the smaller blob is eliminated. @@ -88,18 +148,22 @@ A : ndarray `array` with overlapping blobs removed. """ + sigma = blobs_array[:, -1].max() + distance = 2 * sigma * sqrt(blobs_array.shape[1] - 1) + tree = spatial.cKDTree(blobs_array[:, :-1]) + pairs = np.array(list(tree.query_pairs(distance))) + if len(pairs) == 0: + return blobs_array + else: + for (i, j) in pairs: + blob1, blob2 = blobs_array[i], blobs_array[j] + if _blob_overlap(blob1, blob2) > overlap: + if blob1[-1] > blob2[-1]: + blob2[-1] = 0 + else: + blob1[-1] = 0 - # iterating again might eliminate more blobs, but one iteration suffices - # for most cases - for blob1, blob2 in itt.combinations(blobs_array, 2): - if _blob_overlap(blob1, blob2) > overlap: - if blob1[2] > blob2[2]: - blob2[2] = -1 - else: - blob1[2] = -1 - - # return blobs_array[blobs_array[:, 2] > 0] - return np.array([b for b in blobs_array if b[2] > 0]) + return np.array([b for b in blobs_array if b[-1] > 0]) def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0, @@ -112,7 +176,7 @@ Parameters ---------- - image : ndarray + image : 2D or 3D ndarray Input grayscale image, blobs are assumed to be light on dark background (white on black). min_sigma : float, optional @@ -134,10 +198,12 @@ Returns ------- - A : (n, 3) ndarray - A 2d array with each row representing 3 values, ``(y,x,sigma)`` - where ``(y,x)`` are coordinates of the blob and ``sigma`` is the - standard deviation of the Gaussian kernel which detected the blob. + A : (n, image.ndim + 1) ndarray + A 2d array with each row representing 3 values for a 2D image, + and 4 values for a 3D image: ``(r, c, sigma)`` or ``(p, r, c, sigma)`` + where ``(r, c)`` or ``(p, r, c)`` are coordinates of the blob and + ``sigma`` is the standard deviation of the Gaussian kernel which + detected the blob. References ---------- @@ -174,10 +240,9 @@ Notes ----- - The radius of each blob is approximately :math:`\sqrt{2}sigma`. + The radius of each blob is approximately :math:`\sqrt{2}\sigma` for + a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image. """ - assert_nD(image, 2) - image = img_as_float(image) # k such that min_sigma*(sigma_ratio**k) > max_sigma @@ -193,22 +258,22 @@ # multiplying with standard deviation provides scale invariance dog_images = [(gaussian_images[i] - gaussian_images[i + 1]) * sigma_list[i] for i in range(k)] - image_cube = np.dstack(dog_images) + + image_cube = np.stack(dog_images, axis=-1) # local_maxima = get_local_maxima(image_cube, threshold) local_maxima = peak_local_max(image_cube, threshold_abs=threshold, - footprint=np.ones((3, 3, 3)), + footprint=np.ones((3,) * (image.ndim + 1)), threshold_rel=0.0, exclude_border=False) # Catch no peaks if local_maxima.size == 0: - return np.empty((0,3)) + return np.empty((0, 3)) # Convert local_maxima to float64 lm = local_maxima.astype(np.float64) # Convert the last index to its corresponding scale value - lm[:, 2] = sigma_list[local_maxima[:, 2]] - local_maxima = lm - return _prune_blobs(local_maxima, overlap) + lm[:, -1] = sigma_list[local_maxima[:, -1]] + return _prune_blobs(lm, overlap) def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, threshold=.2, @@ -221,7 +286,7 @@ Parameters ---------- - image : ndarray + image : 2D or 3D ndarray Input grayscale image, blobs are assumed to be light on dark background (white on black). min_sigma : float, optional @@ -247,10 +312,12 @@ Returns ------- - A : (n, 3) ndarray - A 2d array with each row representing 3 values, ``(y,x,sigma)`` - where ``(y,x)`` are coordinates of the blob and ``sigma`` is the - standard deviation of the Gaussian kernel which detected the blob. + A : (n, image.ndim + 1) ndarray + A 2d array with each row representing 3 values for a 2D image, + and 4 values for a 3D image: ``(r, c, sigma)`` or ``(p, r, c, sigma)`` + where ``(r, c)`` or ``(p, r, c)`` are coordinates of the blob and + ``sigma`` is the standard deviation of the Gaussian kernel which + detected the blob. References ---------- @@ -282,11 +349,9 @@ Notes ----- - The radius of each blob is approximately :math:`\sqrt{2}sigma`. + The radius of each blob is approximately :math:`\sqrt{2}\sigma` for + a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image. """ - - assert_nD(image, 2) - image = img_as_float(image) if log_scale: @@ -298,22 +363,22 @@ # computing gaussian laplace # s**2 provides scale invariance gl_images = [-gaussian_laplace(image, s) * s ** 2 for s in sigma_list] - image_cube = np.dstack(gl_images) + + image_cube = np.stack(gl_images, axis=-1) local_maxima = peak_local_max(image_cube, threshold_abs=threshold, - footprint=np.ones((3, 3, 3)), + footprint=np.ones((3,) * (image.ndim + 1)), threshold_rel=0.0, exclude_border=False) # Catch no peaks if local_maxima.size == 0: - return np.empty((0,3)) + return np.empty((0, 3)) # Convert local_maxima to float64 lm = local_maxima.astype(np.float64) # Convert the last index to its corresponding scale value - lm[:, 2] = sigma_list[local_maxima[:, 2]] - local_maxima = lm - return _prune_blobs(local_maxima, overlap) + lm[:, -1] = sigma_list[local_maxima[:, -1]] + return _prune_blobs(lm, overlap) def blob_doh(image, min_sigma=1, max_sigma=30, num_sigma=10, threshold=0.01, @@ -327,7 +392,7 @@ Parameters ---------- - image : ndarray + image : 2D ndarray Input grayscale image.Blobs can either be light on dark or vice versa. min_sigma : float, optional The minimum standard deviation for Gaussian Kernel used to compute @@ -398,7 +463,6 @@ this method can't be used for detecting blobs of radius less than `3px` due to the box filters used in the approximation of Hessian Determinant. """ - assert_nD(image, 2) image = img_as_float(image) @@ -414,16 +478,15 @@ image_cube = np.dstack(hessian_images) local_maxima = peak_local_max(image_cube, threshold_abs=threshold, - footprint=np.ones((3, 3, 3)), + footprint=np.ones((3,) * image_cube.ndim), threshold_rel=0.0, exclude_border=False) # Catch no peaks if local_maxima.size == 0: - return np.empty((0,3)) + return np.empty((0, 3)) # Convert local_maxima to float64 lm = local_maxima.astype(np.float64) # Convert the last index to its corresponding scale value - lm[:, 2] = sigma_list[local_maxima[:, 2]] - local_maxima = lm - return _prune_blobs(local_maxima, overlap) + lm[:, -1] = sigma_list[local_maxima[:, -1]] + return _prune_blobs(lm, overlap) diff -Nru skimage-0.13.1/skimage/feature/_canny.py skimage-0.14.0/skimage/feature/_canny.py --- skimage-0.13.1/skimage/feature/_canny.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/_canny.py 2018-05-29 01:27:44.000000000 +0000 @@ -57,7 +57,7 @@ Parameters ----------- image : 2D array - Greyscale input image to detect edges on; can be of any dtype. + Grayscale input image to detect edges on; can be of any dtype. sigma : float Standard deviation of the Gaussian filter. low_threshold : float diff -Nru skimage-0.13.1/skimage/feature/censure.py skimage-0.14.0/skimage/feature/censure.py --- skimage-0.13.1/skimage/feature/censure.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/censure.py 2018-05-29 01:27:44.000000000 +0000 @@ -148,12 +148,14 @@ .. [1] Motilal Agrawal, Kurt Konolige and Morten Rufus Blas "CENSURE: Center Surround Extremas for Realtime Feature Detection and Matching", - http://link.springer.com/content/pdf/10.1007%2F978-3-540-88693-8_8.pdf + https://link.springer.com/chapter/10.1007/978-3-540-88693-8_8 + DOI:10.1007/978-3-540-88693-8_8 .. [2] Adam Schmidt, Marek Kraft, Michal Fularz and Zuzanna Domagala "Comparative Assessment of Point Feature Detectors and Descriptors in the Context of Robot Navigation" - http://www.jamris.org/01_2013/saveas.php?QUEST=JAMRIS_No01_2013_P_11-20.pdf + http://yadda.icm.edu.pl/yadda/element/bwmeta1.element.baztech-268aaf28-0faf-4872-a4df-7e2e61cb364c/c/Schmidt_comparative.pdf + DOI:10.1.1.465.1117 Examples -------- diff -Nru skimage-0.13.1/skimage/feature/corner_cy.pyx skimage-0.14.0/skimage/feature/corner_cy.pyx --- skimage-0.13.1/skimage/feature/corner_cy.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/corner_cy.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -65,7 +65,7 @@ cdef double[:, ::1] cimage = np.ascontiguousarray(img_as_float(image)) cdef double[:, ::1] out = np.zeros(image.shape, dtype=np.double) - cdef double msum, min_msum + cdef double msum, min_msum, t cdef Py_ssize_t r, c, br, bc, mr, mc, a, b with nogil: @@ -78,8 +78,8 @@ msum = 0 for mr in range(- window_size, window_size + 1): for mc in range(- window_size, window_size + 1): - msum += (cimage[r + mr, c + mc] - - cimage[br + mr, bc + mc]) ** 2 + t = cimage[r + mr, c + mc] - cimage[br + mr, bc + mc] + msum += t * t min_msum = min(msum, min_msum) out[r, c] = min_msum diff -Nru skimage-0.13.1/skimage/feature/corner.py skimage-0.14.0/skimage/feature/corner.py --- skimage-0.13.1/skimage/feature/corner.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/corner.py 2018-05-29 01:27:44.000000000 +0000 @@ -14,6 +14,7 @@ from .corner_cy import _corner_moravec, _corner_orientations from warnings import warn + def _compute_derivatives(image, mode='constant', cval=0): """Compute derivatives in x and y direction using the Sobel operator. @@ -182,11 +183,36 @@ return H_elems -def hessian_matrix_det(image, sigma=1): - """Computes the approximate Hessian Determinant over an image. - This method uses box filters over integral images to compute the - approximate Hessian Determinant as described in [1]_. +def _hessian_matrix_image(H_elems): + """Convert the upper-diagonal elements of the Hessian matrix to a matrix. + + Parameters + ---------- + H_elems : list of array + The upper-diagonal elements of the Hessian matrix, as returned by + `hessian_matrix`. + + Returns + ------- + hessian_image : array + An array of shape ``(M, N[, ...], image.ndim, image.ndim)``, + containing the Hessian matrix corresponding to each coordinate. + """ + image = H_elems[0] + hessian_image = np.zeros(image.shape + (image.ndim, image.ndim)) + for idx, (row, col) in \ + enumerate(combinations_with_replacement(range(image.ndim), 2)): + hessian_image[..., row, col] = H_elems[idx] + hessian_image[..., col, row] = H_elems[idx] + return hessian_image + + +def hessian_matrix_det(image, sigma=1, approximate=True): + """Compute the approximate Hessian Determinant over an image. + + The 2D approximate method uses box filters over integral images to + compute the approximate Hessian Determinant, as described in [1]_. Parameters ---------- @@ -195,6 +221,9 @@ sigma : float, optional Standard deviation used for the Gaussian kernel, used for the Hessian matrix. + approximate : bool, optional + If ``True`` and the image is 2D, use a much faster approximate + computation. This argument has no effect on 3D and higher images. Returns ------- @@ -209,16 +238,19 @@ Notes ----- - The running time of this method only depends on size of the image. It is - independent of `sigma` as one would expect. The downside is that the - result for `sigma` less than `3` is not accurate, i.e., not similar to - the result obtained if someone computed the Hessian and took it's - determinant. + For 2D images when ``approximate=True``, the running time of this method + only depends on size of the image. It is independent of `sigma` as one + would expect. The downside is that the result for `sigma` less than `3` + is not accurate, i.e., not similar to the result obtained if someone + computed the Hessian and took its determinant. """ - image = img_as_float(image) - image = integral_image(image) - return np.array(_hessian_matrix_det(image, sigma)) + if image.ndim == 2 and approximate: + integral = integral_image(image) + return np.array(_hessian_matrix_det(integral, sigma)) + else: # slower brute-force implementation for nD images + hessian_mat_array = _hessian_matrix_image(hessian_matrix(image, sigma)) + return np.linalg.det(hessian_mat_array) def _image_orthogonal_matrix22_eigvals(M00, M01, M11): @@ -264,41 +296,57 @@ return _image_orthogonal_matrix22_eigvals(Axx, Axy, Ayy) -def hessian_matrix_eigvals(Hxx, Hxy, Hyy): +def hessian_matrix_eigvals(H_elems, Hxy=None, Hyy=None, Hxx=None): """Compute Eigenvalues of Hessian matrix. Parameters ---------- - Hxx : ndarray + H_elems : list of ndarray + The upper-diagonal elements of the Hessian matrix, as returned + by `hessian_matrix`. + Hxy : ndarray, deprecated Element of the Hessian matrix for each pixel in the input image. - Hxy : ndarray + Hyy : ndarray, deprecated Element of the Hessian matrix for each pixel in the input image. - Hyy : ndarray + Hxx : ndarray, deprecated Element of the Hessian matrix for each pixel in the input image. Returns ------- - l1 : ndarray - Larger eigen value for each input matrix. - l2 : ndarray - Smaller eigen value for each input matrix. + eigs : ndarray + The eigenvalues of the Hessian matrix, in decreasing order. The + eigenvalues are the leading dimension. That is, ``eigs[i, j, k]`` + contains the ith-largest eigenvalue at position (j, k). Examples -------- >>> from skimage.feature import hessian_matrix, hessian_matrix_eigvals >>> square = np.zeros((5, 5)) >>> square[2, 2] = 4 - >>> Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1, order='rc') - >>> hessian_matrix_eigvals(Hxx, Hxy, Hyy)[0] + >>> H_elems = hessian_matrix(square, sigma=0.1, order='rc') + >>> hessian_matrix_eigvals(H_elems)[0] array([[ 0., 0., 2., 0., 0.], [ 0., 1., 0., 1., 0.], [ 2., 0., -2., 0., 2.], [ 0., 1., 0., 1., 0.], [ 0., 0., 2., 0., 0.]]) - """ - - return _image_orthogonal_matrix22_eigvals(Hxx, Hxy, Hyy) + if Hxy is not None: + if Hxx is None: + Hxx = H_elems + H_elems = [Hxx, Hxy, Hyy] + warn('The API of `hessian_matrix_eigvals` has changed. Use a list of ' + 'elements instead of separate arguments. The old version of the ' + 'API will be removed in version 0.16.') + if len(H_elems) == 3: # Use fast Cython code for 2D + eigvals = np.array(_image_orthogonal_matrix22_eigvals(*H_elems)) + else: + matrices = _hessian_matrix_image(H_elems) + # eigvalsh returns eigenvalues in increasing order. We want decreasing + eigvals = np.linalg.eigvalsh(matrices)[..., ::-1] + leading_axes = tuple(range(eigvals.ndim - 1)) + eigvals = np.transpose(eigvals, (eigvals.ndim - 1,) + leading_axes) + return eigvals def shape_index(image, sigma=1, mode='constant', cval=0): @@ -367,8 +415,8 @@ [ nan, nan, -0.5, nan, nan]]) """ - Hxx, Hxy, Hyy = hessian_matrix(image, sigma=sigma, mode=mode, cval=cval, order='rc') - l1, l2 = hessian_matrix_eigvals(Hxx, Hxy, Hyy) + H = hessian_matrix(image, sigma=sigma, mode=mode, cval=cval, order='rc') + l1, l2 = hessian_matrix_eigvals(H) return (2.0 / np.pi) * np.arctan((l2 + l1) / (l2 - l1)) diff -Nru skimage-0.13.1/skimage/feature/_daisy.py skimage-0.14.0/skimage/feature/_daisy.py --- skimage-0.13.1/skimage/feature/_daisy.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/_daisy.py 2018-05-29 01:27:44.000000000 +0000 @@ -6,7 +6,7 @@ from .._shared.utils import assert_nD -def daisy(img, step=4, radius=15, rings=3, histograms=8, orientations=8, +def daisy(image, step=4, radius=15, rings=3, histograms=8, orientations=8, normalization='l1', sigmas=None, ring_radii=None, visualize=False): '''Extract DAISY feature descriptors densely for the given image. @@ -27,8 +27,8 @@ Parameters ---------- - img : (M, N) array - Input image (greyscale). + image : (M, N) array + Input image (grayscale). step : int, optional Distance between descriptor sampling points. radius : int, optional @@ -94,9 +94,9 @@ .. [2] http://cvlab.epfl.ch/software/daisy ''' - assert_nD(img, 2, 'img') + assert_nD(image, 2, 'img') - img = img_as_float(img) + image = img_as_float(image) # Validate parameters. if sigmas is not None and ring_radii is not None \ @@ -115,10 +115,10 @@ raise ValueError('Invalid normalization method.') # Compute image derivatives. - dx = np.zeros(img.shape) - dy = np.zeros(img.shape) - dx[:, :-1] = np.diff(img, n=1, axis=1) - dy[:-1, :] = np.diff(img, n=1, axis=0) + dx = np.zeros(image.shape) + dy = np.zeros(image.shape) + dx[:, :-1] = np.diff(image, n=1, axis=1) + dy[:-1, :] = np.diff(image, n=1, axis=0) # Compute gradient orientation and magnitude and their contribution # to the histograms. @@ -127,7 +127,7 @@ orientation_kappa = orientations / pi orientation_angles = [2 * o * pi / orientations - pi for o in range(orientations)] - hist = np.empty((orientations,) + img.shape, dtype=float) + hist = np.empty((orientations,) + image.shape, dtype=float) for i, o in enumerate(orientation_angles): # Weigh bin contribution by the circular normal distribution hist[i, :, :] = exp(orientation_kappa * cos(grad_ori - o)) @@ -145,8 +145,8 @@ # Assemble descriptor grid. theta = [2 * pi * j / histograms for j in range(histograms)] desc_dims = (rings * histograms + 1) * orientations - descs = np.empty((desc_dims, img.shape[0] - 2 * radius, - img.shape[1] - 2 * radius)) + descs = np.empty((desc_dims, image.shape[0] - 2 * radius, + image.shape[1] - 2 * radius)) descs[:orientations, :, :] = hist_smooth[0, :, radius:-radius, radius:-radius] idx = orientations @@ -177,7 +177,7 @@ descs[:, :, i:i + orientations] /= norms[:, :, np.newaxis] if visualize: - descs_img = gray2rgb(img) + descs_img = gray2rgb(image) for i in range(descs.shape[0]): for j in range(descs.shape[1]): # Draw center histogram sigma diff -Nru skimage-0.13.1/skimage/feature/_haar.pxd skimage-0.14.0/skimage/feature/_haar.pxd --- skimage-0.13.1/skimage/feature/_haar.pxd 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/skimage/feature/_haar.pxd 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,57 @@ +import cython +from libcpp.vector cimport vector +cimport numpy as cnp + + +ctypedef fused integral_floating: + cnp.uint8_t + cnp.uint16_t + cnp.uint32_t + cnp.uint64_t + cnp.int8_t + cnp.int16_t + cnp.int32_t + cnp.int64_t + cython.floating + + +cdef struct Point2D: + Py_ssize_t row + Py_ssize_t col + + +cdef struct Rectangle: + Point2D top_left + Point2D bottom_right + + +cdef inline void set_rectangle_feature(Rectangle* rectangle, + Py_ssize_t top_y, + Py_ssize_t top_x, + Py_ssize_t bottom_y, + Py_ssize_t bottom_x) nogil: + rectangle[0].top_left.row = top_y + rectangle[0].top_left.col = top_x + rectangle[0].bottom_right.row = bottom_y + rectangle[0].bottom_right.col = bottom_x + + +cdef vector[vector[Rectangle]] _haar_like_feature_coord( + Py_ssize_t width, + Py_ssize_t height, + unsigned int feature_type) nogil + + +cpdef haar_like_feature_coord_wrapper(width, height, feature_type) + + +cdef integral_floating[:, ::1] _haar_like_feature( + integral_floating[:, ::1] int_image, + vector[vector[Rectangle]] coord, + Py_ssize_t n_rectangle, Py_ssize_t n_feature) + + +cpdef haar_like_feature_wrapper( + cnp.ndarray[integral_floating, ndim=2] int_image, + r, c, width, height, feature_type, + feature_coord) diff -Nru skimage-0.13.1/skimage/feature/haar.py skimage-0.14.0/skimage/feature/haar.py --- skimage-0.13.1/skimage/feature/haar.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/skimage/feature/haar.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,323 @@ +from __future__ import division + +from itertools import chain +from operator import add + +import six +import numpy as np + +from ._haar import haar_like_feature_coord_wrapper +from ._haar import haar_like_feature_wrapper +from ..color import gray2rgb +from ..draw import rectangle +from .._shared.utils import check_random_state +from ..util import img_as_float + +FEATURE_TYPE = ('type-2-x', 'type-2-y', + 'type-3-x', 'type-3-y', + 'type-4') + + +def _validate_feature_type(feature_type): + """Transform feature type to an iterable and check that it exists.""" + if feature_type is None: + feature_type_ = FEATURE_TYPE + else: + if isinstance(feature_type, six.string_types): + feature_type_ = [feature_type] + else: + feature_type_ = feature_type + for feat_t in feature_type_: + if feat_t not in FEATURE_TYPE: + raise ValueError( + 'The given feature type is unknown. Got {} instead of one' + ' of {}.'.format(feat_t, FEATURE_TYPE)) + return feature_type_ + + +def haar_like_feature_coord(width, height, feature_type=None): + """Compute the coordinates of Haar-like features. + + Parameters + ---------- + width : int + Width of the detection window. + height : int + Height of the detection window. + feature_type : str or list of str or None, optional + The type of feature to consider: + + - 'type-2-x': 2 rectangles varying along the x axis; + - 'type-2-y': 2 rectangles varying along the y axis; + - 'type-3-x': 3 rectangles varying along the x axis; + - 'type-3-y': 3 rectangles varying along the y axis; + - 'type-4': 4 rectangles varying along x and y axis. + + By default all features are extracted. + + Returns + ------- + feature_coord : (n_features, n_rectangles, 2, 2), ndarray of list of \ +tuple coord + Coordinates of the rectangles for each feature. + feature_type : (n_features,), ndarray of str + The corresponding type for each feature. + + Examples + -------- + >>> import numpy as np + >>> from skimage.transform import integral_image + >>> from skimage.feature import haar_like_feature_coord + >>> feat_coord, feat_type = haar_like_feature_coord(2, 2, 'type-4') + >>> feat_coord # doctest: +SKIP + array([ list([[(0, 0), (0, 0)], [(0, 1), (0, 1)], + [(1, 1), (1, 1)], [(1, 0), (1, 0)]])], dtype=object) + >>> feat_type + array(['type-4'], dtype=object) + + """ + feature_type_ = _validate_feature_type(feature_type) + + feat_coord, feat_type = zip(*[haar_like_feature_coord_wrapper(width, + height, + feat_t) + for feat_t in feature_type_]) + + return np.concatenate(feat_coord), np.hstack(feat_type) + + +def haar_like_feature(int_image, r, c, width, height, feature_type=None, + feature_coord=None): + """Compute the Haar-like features for a region of interest (ROI) of an + integral image. + + Haar-like features have been successfully used for image classification and + object detection [1]_. It has been used for real-time face detection + algorithm proposed in [2]_. + + Parameters + ---------- + int_image : (M, N) ndarray + Integral image for which the features need to be computed. + r : int + Row-coordinate of top left corner of the detection window. + c : int + Column-coordinate of top left corner of the detection window. + width : int + Width of the detection window. + height : int + Height of the detection window. + feature_type : str or list of str or None, optional + The type of feature to consider: + + - 'type-2-x': 2 rectangles varying along the x axis; + - 'type-2-y': 2 rectangles varying along the y axis; + - 'type-3-x': 3 rectangles varying along the x axis; + - 'type-3-y': 3 rectangles varying along the y axis; + - 'type-4': 4 rectangles varying along x and y axis. + + By default all features are extracted. + + If using with `feature_coord`, it should correspond to the feature + type of each associated coordinate feature. + feature_coord : ndarray of list of tuples or None, optional + The array of coordinates to be extracted. This is useful when you want + to recompute only a subset of features. In this case `feature_type` + needs to be an array containing the type of each feature, as returned + by :func:`haar_like_feature_coord`. By default, all coordinates are + computed. + + Returns + ------- + haar_features : (n_features,) ndarray of int or float + Resulting Haar-like features. Each value is equal to the subtraction of + sums of the positive and negative rectangles. The data type depends of + the data type of `int_image`: `int` when the data type of `int_image` + is `uint` or `int` and `float` when the data type of `int_image` is + `float`. + + Notes + ----- + When extracting those features in parallel, be aware that the choice of the + backend (i.e. multiprocessing vs threading) will have an impact on the + performance. The rule of thumb is as follows: use multiprocessing when + extracting features for all possible ROI in an image; use threading when + extracting the feature at specific location for a limited number of ROIs. + Refer to the example + :ref:`sphx_glr_auto_examples_xx_applications_plot_haar_extraction_selection_classification.py` + for more insights. + + Examples + -------- + >>> import numpy as np + >>> from skimage.transform import integral_image + >>> from skimage.feature import haar_like_feature + >>> img = np.ones((5, 5), dtype=np.uint8) + >>> img_ii = integral_image(img) + >>> feature = haar_like_feature(img_ii, 0, 0, 5, 5, 'type-3-x') + >>> feature + array([-1, -2, -3, -4, -1, -2, -3, -4, -1, -2, -3, -4, -1, -2, -3, -4, -1, + -2, -3, -4, -1, -2, -3, -4, -1, -2, -3, -1, -2, -3, -1, -2, -3, -1, + -2, -1, -2, -1, -2, -1, -1, -1]) + + You can compute the feature for some pre-computed coordinates. + + >>> from skimage.feature import haar_like_feature_coord + >>> feature_coord, feature_type = zip( + ... *[haar_like_feature_coord(5, 5, feat_t) + ... for feat_t in ('type-2-x', 'type-3-x')]) + >>> # only select one feature over two + >>> feature_coord = np.concatenate([x[::2] for x in feature_coord]) + >>> feature_type = np.concatenate([x[::2] for x in feature_type]) + >>> feature = haar_like_feature(img_ii, 0, 0, 5, 5, + ... feature_type=feature_type, + ... feature_coord=feature_coord) + >>> feature + array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, -1, -3, -1, -3, -1, -3, -1, -3, -1, + -3, -1, -3, -1, -3, -2, -1, -3, -2, -2, -2, -1]) + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Haar-like_feature + .. [2] Oren, M., Papageorgiou, C., Sinha, P., Osuna, E., & Poggio, T. + (1997, June). Pedestrian detection using wavelet templates. + In Computer Vision and Pattern Recognition, 1997. Proceedings., + 1997 IEEE Computer Society Conference on (pp. 193-199). IEEE. + http://tinyurl.com/y6ulxfta + DOI: 10.1109/CVPR.1997.609319 + .. [3] Viola, Paul, and Michael J. Jones. "Robust real-time face + detection." International journal of computer vision 57.2 + (2004): 137-154. + http://www.merl.com/publications/docs/TR2004-043.pdf + DOI: 10.1109/CVPR.2001.990517 + + """ + if feature_coord is None: + feature_type_ = _validate_feature_type(feature_type) + + return np.hstack(list(chain.from_iterable( + haar_like_feature_wrapper(int_image, r, c, width, height, feat_t, + feature_coord) + for feat_t in feature_type_))) + else: + if feature_coord.shape[0] != feature_type.shape[0]: + raise ValueError("Inconsistent size between feature coordinates" + "and feature types.") + + mask_feature = [feature_type == feat_t for feat_t in FEATURE_TYPE] + haar_feature_idx, haar_feature = zip( + *[(np.flatnonzero(mask), + haar_like_feature_wrapper(int_image, r, c, width, height, + feat_t, feature_coord[mask])) + for mask, feat_t in zip(mask_feature, FEATURE_TYPE) + if np.count_nonzero(mask)]) + + haar_feature_idx = np.concatenate(haar_feature_idx) + haar_feature = np.concatenate(haar_feature) + + haar_feature[haar_feature_idx] = haar_feature.copy() + return haar_feature + + +def draw_haar_like_feature(image, r, c, width, height, + feature_coord, + color_positive_block=(1., 0., 0.), + color_negative_block=(0., 1., 0.), + alpha=0.5, max_n_features=None, random_state=None): + """Visualization of Haar-like features. + + Parameters + ---------- + image : (M, N) ndarray + The region of an integral image for which the features need to be + computed. + r : int + Row-coordinate of top left corner of the detection window. + c : int + Column-coordinate of top left corner of the detection window. + width : int + Width of the detection window. + height : int + Height of the detection window. + feature_coord : ndarray of list of tuples or None, optional + The array of coordinates to be extracted. This is useful when you want + to recompute only a subset of features. In this case `feature_type` + needs to be an array containing the type of each feature, as returned + by :func:`haar_like_feature_coord`. By default, all coordinates are + computed. + color_positive_rectangle : tuple of 3 floats + Floats specifying the color for the positive block. Corresponding + values define (R, G, B) values. Default value is red (1, 0, 0). + color_negative_block : tuple of 3 floats + Floats specifying the color for the negative block Corresponding values + define (R, G, B) values. Default value is blue (0, 1, 0). + alpha : float + Value in the range [0, 1] that specifies opacity of visualization. 1 - + fully transparent, 0 - opaque. + max_n_features : int, default=None + The maximum number of features to be returned. + By default, all features are returned. + random_state : int, RandomState instance or None, optional + If int, random_state is the seed used by the random number generator; + If RandomState instance, random_state is the random number generator; + If None, the random number generator is the RandomState instance used + by `np.random`. The random state is used when generating a set of + features smaller than the total number of available features. + + Returns + ------- + features : (M, N), ndarray + An image in which the different features will be added. + + Examples + -------- + >>> import numpy as np + >>> from skimage.feature import haar_like_feature_coord + >>> from skimage.feature import draw_haar_like_feature + >>> feature_coord, _ = haar_like_feature_coord(2, 2, 'type-4') + >>> image = draw_haar_like_feature(np.zeros((2, 2)), + ... 0, 0, 2, 2, + ... feature_coord, + ... max_n_features=1) + >>> image + array([[[ 0. , 0.5, 0. ], + [ 0.5, 0. , 0. ]], + + [[ 0.5, 0. , 0. ], + [ 0. , 0.5, 0. ]]]) + + """ + random_state = check_random_state(random_state) + color_positive_block = np.asarray(color_positive_block, dtype=np.float64) + color_negative_block = np.asarray(color_negative_block, dtype=np.float64) + + if max_n_features is None: + feature_coord_ = feature_coord + else: + feature_coord_ = random_state.choice(feature_coord, + size=max_n_features, + replace=False) + + output = np.copy(image) + if len(image.shape) < 3: + output = gray2rgb(image) + output = img_as_float(output) + + for coord in feature_coord_: + for idx_rect, rect in enumerate(coord): + coord_start, coord_end = rect + coord_start = tuple(map(add, coord_start, [r, c])) + coord_end = tuple(map(add, coord_end, [r, c])) + rr, cc = rectangle(coord_start, coord_end) + + if ((idx_rect + 1) % 2) == 0: + new_value = ((1 - alpha) * + output[rr, cc] + alpha * color_positive_block) + else: + new_value = ((1 - alpha) * + output[rr, cc] + alpha * color_negative_block) + output[rr, cc] = new_value + + return output diff -Nru skimage-0.13.1/skimage/feature/_haar.pyx skimage-0.14.0/skimage/feature/_haar.pyx --- skimage-0.13.1/skimage/feature/_haar.pyx 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/skimage/feature/_haar.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,311 @@ +#cython: cdivision=True +#cython: boundscheck=False +#cython: nonecheck=False +#cython: wraparound=False +#distutils: language=c++ + +import numpy as np + +from ..transform import integral_image +from .._shared.transform cimport integrate + +FEATURE_TYPE = {'type-2-x': 0, 'type-2-y': 1, + 'type-3-x': 2, 'type-3-y': 3, + 'type-4': 4} + +N_RECTANGLE = {'type-2-x': 2, 'type-2-y': 2, + 'type-3-x': 3, 'type-3-y': 3, + 'type-4': 4} + + +cdef vector[vector[Rectangle]] _haar_like_feature_coord( + Py_ssize_t width, + Py_ssize_t height, + unsigned int feature_type) nogil: + """Private function to compute the coordinates of all Haar-like features. + """ + cdef: + Py_ssize_t max_feature = height * height * width * width + vector[vector[Rectangle]] rect_feat + Rectangle single_rect + Py_ssize_t n_rectangle + Py_ssize_t x, y, dx, dy + + if feature_type == 0 or feature_type == 1: + n_rectangle = 2 + elif feature_type == 2 or feature_type == 3: + n_rectangle = 3 + else: + n_rectangle = 4 + + # Allocate for the number of rectangle (we know from the start) + rect_feat = vector[vector[Rectangle]](n_rectangle) + + for y in range(height): + for x in range(width): + for dy in range(1, height): + for dx in range(1, width): + # type -> 2 rectangles split along x axis + if (feature_type == 0 and + (y + dy <= height and x + 2 * dx <= width)): + set_rectangle_feature(&single_rect, + y, x, + y + dy - 1, x + dx - 1) + rect_feat[0].push_back(single_rect) + set_rectangle_feature(&single_rect, + y, x + dx, + y + dy - 1, x + 2 * dx - 1) + rect_feat[1].push_back(single_rect) + # type -> 2 rectangles split along y axis + elif (feature_type == 1 and + (y + 2 * dy <= height and x + dx <= width)): + set_rectangle_feature(&single_rect, + y, x, + y + dy - 1, x + dx - 1) + rect_feat[0].push_back(single_rect) + set_rectangle_feature(&single_rect, + y + dy, x, + y + 2 * dy - 1, x + dx - 1) + rect_feat[1].push_back(single_rect) + # type -> 3 rectangles split along x axis + elif (feature_type == 2 and + (y + dy <= height and x + 3 * dx <= width)): + set_rectangle_feature(&single_rect, + y, x, + y + dy - 1, x + dx - 1) + rect_feat[0].push_back(single_rect) + set_rectangle_feature(&single_rect, + y, x + dx, + y + dy - 1, x + 2 * dx - 1) + rect_feat[1].push_back(single_rect) + set_rectangle_feature(&single_rect, + y, x + 2 * dx, + y + dy - 1, x + 3 * dx - 1) + rect_feat[2].push_back(single_rect) + # type -> 3 rectangles split along y axis + elif (feature_type == 3 and + (y + 3 * dy <= height and x + dx <= width)): + set_rectangle_feature(&single_rect, + y, x, + y + dy - 1, x + dx - 1) + rect_feat[0].push_back(single_rect) + set_rectangle_feature(&single_rect, + y + dy, x, + y + 2 * dy - 1, x + dx - 1) + rect_feat[1].push_back(single_rect) + set_rectangle_feature(&single_rect, + y + 2 * dy, x, + y + 3 * dy - 1, x + dx - 1) + rect_feat[2].push_back(single_rect) + # type -> 4 rectangles split along x and y axis + elif (feature_type == 4 and + (y + 2 * dy <= height and x + 2 * dx <= width)): + set_rectangle_feature(&single_rect, + y, x, + y + dy - 1, x + dx - 1) + rect_feat[0].push_back(single_rect) + set_rectangle_feature(&single_rect, + y, x + dx, + y + dy - 1, x + 2 * dx - 1) + rect_feat[1].push_back(single_rect) + set_rectangle_feature(&single_rect, + y + dy, x, + y + 2 * dy - 1, x + dx - 1) + rect_feat[3].push_back(single_rect) + set_rectangle_feature(&single_rect, + y + dy, x + dx, + y + 2 * dy - 1, x + 2 * dx - 1) + rect_feat[2].push_back(single_rect) + + return rect_feat + + +cpdef haar_like_feature_coord_wrapper(width, height, feature_type): + """Compute the coordinates of Haar-like features. + + Parameters + ---------- + width : int + Width of the detection window. + height : int + Height of the detection window. + feature_type : str + The type of feature to consider: + + - 'type-2-x': 2 rectangles varying along the x axis; + - 'type-2-y': 2 rectangles varying along the y axis; + - 'type-3-x': 3 rectangles varying along the x axis; + - 'type-3-y': 3 rectangles varying along the y axis; + - 'type-4': 4 rectangles varying along x and y axis. + + Returns + ------- + feature_coord : (n_features, n_rectangles, 2, 2), ndarray of list of \ +tuple coord + Coordinates of the rectangles for each feature. + feature_type : (n_features,), ndarray of str + The corresponding type for each feature. + + """ + cdef: + vector[vector[Rectangle]] rect + Py_ssize_t n_rectangle, n_feature + Py_ssize_t i, j + # cast the height and width to the right type + Py_ssize_t height_win = height + Py_ssize_t width_win = width + + rect = _haar_like_feature_coord(width_win, height_win, + FEATURE_TYPE[feature_type]) + n_feature = rect[0].size() + n_rectangle = rect.size() + + # allocate the output based on the number of rectangle + output = np.empty((n_feature,), dtype=object) + for j in range(n_feature): + coord_feature = [] + for i in range(n_rectangle): + coord_feature.append([(rect[i][j].top_left.row, + rect[i][j].top_left.col), + (rect[i][j].bottom_right.row, + rect[i][j].bottom_right.col)]) + output[j] = coord_feature + + return output, np.array([feature_type] * n_feature, dtype=object) + + +cdef integral_floating[:, ::1] _haar_like_feature( + integral_floating[:, ::1] int_image, + vector[vector[Rectangle]] coord, + Py_ssize_t n_rectangle, Py_ssize_t n_feature): + """Private function releasing the GIL to compute the integral for the + different rectangle.""" + cdef: + integral_floating[:, ::1] rect_feature = np.empty( + (n_rectangle, n_feature), dtype=int_image.base.dtype) + + Py_ssize_t idx_rect, idx_feature + + with nogil: + for idx_rect in range(n_rectangle): + for idx_feature in range(n_feature): + rect_feature[idx_rect, idx_feature] = integrate( + int_image, + coord[idx_rect][idx_feature].top_left.row, + coord[idx_rect][idx_feature].top_left.col, + coord[idx_rect][idx_feature].bottom_right.row, + coord[idx_rect][idx_feature].bottom_right.col) + + return rect_feature + + +cpdef haar_like_feature_wrapper( + cnp.ndarray[integral_floating, ndim=2] int_image, + r, c, width, height, feature_type, feature_coord): + """Compute the Haar-like features for a region of interest (ROI) of an + integral image. + + Haar-like features have been successfully used for image classification and + object detection [1]_. It has been used for real-time face detection + algorithm proposed in [2]_. + + Parameters + ---------- + int_image : (M, N) ndarray + Integral image for which the features need to be computed. + r : int + Row-coordinate of top left corner of the detection window. + c : int + Column-coordinate of top left corner of the detection window. + width : int + Width of the detection window. + height : int + Height of the detection window. + feature_type : str + The type of feature to consider: + + - 'type-2-x': 2 rectangles varying along the x axis; + - 'type-2-y': 2 rectangles varying along the y axis; + - 'type-3-x': 3 rectangles varying along the x axis; + - 'type-3-y': 3 rectangles varying along the y axis; + - 'type-4': 4 rectangles varying along x and y axis. + + Returns + ------- + haar_features : (n_features,) ndarray + Resulting Haar-like features. Each value is equal to the subtraction of + sums of the positive and negative rectangles. The data type depends of + the data type of `int_image`: `int` when the data type of `int_image` + is `uint` or `int` and `float` when the data type of `int_image` is + `float`. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Haar-like_feature + .. [2] Oren, M., Papageorgiou, C., Sinha, P., Osuna, E., & Poggio, T. + (1997, June). Pedestrian detection using wavelet templates. + In Computer Vision and Pattern Recognition, 1997. Proceedings., + 1997 IEEE Computer Society Conference on (pp. 193-199). IEEE. + http://tinyurl.com/y6ulxfta + DOI: 10.1109/CVPR.1997.609319 + .. [3] Viola, Paul, and Michael J. Jones. "Robust real-time face + detection." International journal of computer vision 57.2 + (2004): 137-154. + http://www.merl.com/publications/docs/TR2004-043.pdf + DOI: 10.1109/CVPR.2001.990517 + + """ + cdef: + vector[vector[Rectangle]] coord + Py_ssize_t n_rectangle, n_feature + Py_ssize_t idx_rect, idx_feature + integral_floating[:, ::1] rect_feature + # FIXME: currently cython does not support read-only memory views. + # Those are used with joblib when using Parallel. Therefore, we use + # ndarray as input. We take a copy of this ndarray to create a memory + # view to be able to release the GIL in some later processing. + # Check the following issue to check the status of read-only memory + # views in cython: + # https://github.com/cython/cython/issues/1605 to be resolved + integral_floating[:, ::1] int_image_memview = int_image[ + r : r + height, c : c + width].copy() + + if feature_coord is None: + # compute all possible coordinates with a specific type of feature + coord = _haar_like_feature_coord(width, height, + FEATURE_TYPE[feature_type]) + n_feature = coord[0].size() + n_rectangle = coord.size() + else: + # build the coordinate from the set provided + n_rectangle = N_RECTANGLE[feature_type] + n_feature = len(feature_coord) + + # the vector can be directly pre-allocated since that the size is known + coord = vector[vector[Rectangle]](n_rectangle, + vector[Rectangle](n_feature)) + + for idx_rect in range(n_rectangle): + for idx_feature in range(n_feature): + set_rectangle_feature( + &coord[idx_rect][idx_feature], + feature_coord[idx_feature][idx_rect][0][0], + feature_coord[idx_feature][idx_rect][0][1], + feature_coord[idx_feature][idx_rect][1][0], + feature_coord[idx_feature][idx_rect][1][1]) + + rect_feature = _haar_like_feature(int_image_memview, + coord, n_rectangle, n_feature) + + # convert the memory view to numpy array and convert it to signed array if + # necessary to avoid overflow during subtraction + rect_feature_ndarray = np.asarray(rect_feature) + data_type = rect_feature_ndarray.dtype + if 'uint' in data_type.name: + rect_feature_ndarray = rect_feature_ndarray.astype( + data_type.name.replace('u', '')) + + # the rectangles with odd indices can always be subtracted to the rectangle + # with even indices + return (np.sum(rect_feature_ndarray[1::2], axis=0) - + np.sum(rect_feature_ndarray[::2], axis=0)) diff -Nru skimage-0.13.1/skimage/feature/_hoghistogram.pyx skimage-0.14.0/skimage/feature/_hoghistogram.pyx --- skimage-0.13.1/skimage/feature/_hoghistogram.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/_hoghistogram.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -111,14 +111,14 @@ gradient_rows) cdef double[:, ::1] orientation = \ np.rad2deg(np.arctan2(gradient_rows, gradient_columns)) % 180 - cdef int i, x, y, o, yi, xi, cc, cr, x0, y0, \ + cdef int i, c, r, o, r_i, c_i, cc, cr, c_0, r_0, \ range_rows_start, range_rows_stop, \ range_columns_start, range_columns_stop cdef float orientation_start, orientation_end, \ number_of_orientations_per_180 - y0 = cell_rows / 2 - x0 = cell_columns / 2 + r_0 = cell_rows / 2 + c_0 = cell_columns / 2 cc = cell_rows * number_of_cells_rows cr = cell_columns * number_of_cells_columns range_rows_stop = cell_rows / 2 @@ -133,25 +133,25 @@ # isolate orientations in this range orientation_start = number_of_orientations_per_180 * (i + 1) orientation_end = number_of_orientations_per_180 * i - x = x0 - y = y0 - yi = 0 - xi = 0 + c = c_0 + r = r_0 + r_i = 0 + c_i = 0 - while y < cc: - xi = 0 - x = x0 + while r < cc: + c_i = 0 + c = c_0 - while x < cr: - orientation_histogram[yi, xi, i] = \ + while c < cr: + orientation_histogram[r_i, c_i, i] = \ cell_hog(magnitude, orientation, orientation_start, orientation_end, - cell_columns, cell_rows, x, y, + cell_columns, cell_rows, c, r, size_columns, size_rows, range_rows_start, range_rows_stop, range_columns_start, range_columns_stop) - xi += 1 - x += cell_columns + c_i += 1 + c += cell_columns - yi += 1 - y += cell_rows + r_i += 1 + r += cell_rows diff -Nru skimage-0.13.1/skimage/feature/_hog.py skimage-0.14.0/skimage/feature/_hog.py --- skimage-0.13.1/skimage/feature/_hog.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/_hog.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,8 +1,7 @@ from __future__ import division import numpy as np -from .._shared.utils import assert_nD -from .._shared.utils import skimage_deprecation, warn from . import _hoghistogram +from .._shared.utils import skimage_deprecation, warn def _hog_normalize_block(block, method, eps=1e-5): @@ -15,30 +14,54 @@ elif method == 'L2-Hys': out = block / np.sqrt(np.sum(block ** 2) + eps ** 2) out = np.minimum(out, 0.2) - out = out / np.sqrt(np.sum(block ** 2) + eps ** 2) + out = out / np.sqrt(np.sum(out ** 2) + eps ** 2) else: raise ValueError('Selected block normalization method is invalid.') return out +def _hog_channel_gradient(channel): + """Compute unnormalized gradient image along `row` and `col` axes. + + Parameters + ---------- + channel : (M, N) ndarray + Grayscale image or one of image channel. + + Returns + ------- + g_row, g_col : channel gradient along `row` and `col` axes correspondingly. + """ + g_row = np.empty(channel.shape, dtype=np.double) + g_row[0, :] = 0 + g_row[-1, :] = 0 + g_row[1:-1, :] = channel[2:, :] - channel[:-2, :] + g_col = np.empty(channel.shape, dtype=np.double) + g_col[:, 0] = 0 + g_col[:, -1] = 0 + g_col[:, 1:-1] = channel[:, 2:] - channel[:, :-2] + + return g_row, g_col + + def hog(image, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3), - block_norm='L1', visualise=False, transform_sqrt=False, - feature_vector=True, normalise=None): + block_norm=None, visualize=False, visualise=None, transform_sqrt=False, + feature_vector=True, multichannel=None): """Extract Histogram of Oriented Gradients (HOG) for a given image. Compute a Histogram of Oriented Gradients (HOG) by 1. (optional) global image normalization - 2. computing the gradient image in x and y + 2. computing the gradient image in `row` and `col` 3. computing gradient histograms 4. normalizing across blocks 5. flattening into a feature vector Parameters ---------- - image : (M, N) ndarray - Input image (greyscale). + image : (M, N[, C]) ndarray + Input image. orientations : int, optional Number of orientation bins. pixels_per_cell : 2-tuple (int, int), optional @@ -60,8 +83,12 @@ renormalization using L2-norm. For details, see [3]_, [4]_. - visualise : bool, optional - Also return an image of the HOG. + visualize : bool, optional + Also return an image of the HOG. For each cell and orientation bin, + the image contains a line segment that is centered at the cell center, + is perpendicular to the midpoint of the range of angles spanned by the + orientation bin, and has intensity proportional to the corresponding + histogram value. transform_sqrt : bool, optional Apply power law compression to normalize the image before processing. DO NOT use this if the image contains negative @@ -69,16 +96,17 @@ feature_vector : bool, optional Return the data as a feature vector by calling .ravel() on the result just before returning. - normalise : bool, deprecated - The parameter is deprecated. Use `transform_sqrt` for power law - compression. `normalise` has been deprecated. + multichannel : boolean, optional + If True, the last `image` dimension is considered as a color channel, + otherwise as spatial. Returns ------- - newarr : ndarray - HOG for the image as a 1D (flattened) array. - hog_image : ndarray (if visualise=True) - A visualisation of the HOG image. + out : (n_blocks_row, n_blocks_col, n_cells_row, n_cells_col, n_orient) ndarray + HOG descriptor for the image. If `feature_vector` is True, a 1D + (flattened) array is returned. + hog_image : (M, N) ndarray, optional + A visualisation of the HOG image. Only provided if `visualize` is True. References ---------- @@ -114,32 +142,34 @@ and then applies the hog algorithm to the image. """ - if block_norm == 'L1': + if block_norm is None: + block_norm = 'L1' warn('Default value of `block_norm`==`L1` is deprecated and will ' - 'be changed to `L2-Hys` in v0.15', skimage_deprecation) + 'be changed to `L2-Hys` in v0.15. To supress this message ' + 'specify explicitly the normalization method.', + skimage_deprecation) image = np.atleast_2d(image) + if multichannel is None: + multichannel = (image.ndim == 3) + + ndim_spatial = image.ndim - 1 if multichannel else image.ndim + if ndim_spatial != 2: + raise ValueError('Only images with 2 spatial dimensions are ' + 'supported. If using with color/multichannel ' + 'images, specify `multichannel=True`.') + """ The first stage applies an optional global image normalization equalisation that is designed to reduce the influence of illumination effects. In practice we use gamma (power law) compression, either - computing the square root or the log of each colour channel. + computing the square root or the log of each color channel. Image texture strength is typically proportional to the local surface illumination so this compression helps to reduce the effects of local shadowing and illumination variations. """ - assert_nD(image, 2) - - if normalise is not None: - raise ValueError("The ``normalise`` parameter was removed due to " - "incorrect behavior: it only applied a square root " - "instead of a true normalization. " - "If you wish to duplicate the old behavior, set " - "``transform_sqrt=True``. ``normalise`` will be " - "completely removed in v0.14.") - if transform_sqrt: image = np.sqrt(image) @@ -147,7 +177,7 @@ The second stage computes first order image gradients. These capture contour, silhouette and some texture information, while providing further resistance to illumination variations. The locally dominant - colour channel is used, which provides colour invariance to a large + color channel is used, which provides color invariance to a large extent. Variant methods may also include second order image derivatives, which act as primitive bar detectors - a useful feature for capturing, e.g. bar like structures in bicycles and limbs in humans. @@ -158,8 +188,27 @@ # to avoid problems with subtracting unsigned numbers image = image.astype('float') - gy, gx = [np.ascontiguousarray(g, dtype=np.double) - for g in np.gradient(image)] + if multichannel: + g_row_by_ch = np.empty_like(image, dtype=np.double) + g_col_by_ch = np.empty_like(image, dtype=np.double) + g_magn = np.empty_like(image, dtype=np.double) + + for idx_ch in range(image.shape[2]): + g_row_by_ch[:, :, idx_ch], g_col_by_ch[:, :, idx_ch] = \ + _hog_channel_gradient(image[:, :, idx_ch]) + g_magn[:, :, idx_ch] = np.hypot(g_row_by_ch[:, :, idx_ch], + g_col_by_ch[:, :, idx_ch]) + + # For each pixel select the channel with the highest gradient magnitude + idcs_max = g_magn.argmax(axis=2) + rr, cc = np.meshgrid(np.arange(image.shape[0]), + np.arange(image.shape[1]), + indexing='ij', + sparse=True) + g_row = g_row_by_ch[rr, cc, idcs_max] + g_col = g_col_by_ch[rr, cc, idcs_max] + else: + g_row, g_col = _hog_channel_gradient(image) """ The third stage aims to produce an encoding that is sensitive to @@ -176,39 +225,48 @@ cell are used to vote into the orientation histogram. """ - sy, sx = image.shape - cx, cy = pixels_per_cell - bx, by = cells_per_block + s_row, s_col = image.shape[:2] + c_row, c_col = pixels_per_cell + b_row, b_col = cells_per_block - n_cellsx = int(sx // cx) # number of cells in x - n_cellsy = int(sy // cy) # number of cells in y + n_cells_row = int(s_row // c_row) # number of cells along row-axis + n_cells_col = int(s_col // c_col) # number of cells along col-axis # compute orientations integral images - orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations)) + orientation_histogram = np.zeros((n_cells_row, n_cells_col, orientations)) - _hoghistogram.hog_histograms(gx, gy, cx, cy, sx, sy, n_cellsx, n_cellsy, + _hoghistogram.hog_histograms(g_col, g_row, c_col, c_row, s_col, s_row, + n_cells_col, n_cells_row, orientations, orientation_histogram) # now compute the histogram for each cell hog_image = None - if visualise: + if visualise is not None: + visualize = visualise + warn('Argument `visualise` is deprecated and will ' + 'be changed to `visualize` in v0.16', skimage_deprecation) + if visualize: from .. import draw - radius = min(cx, cy) // 2 - 1 + radius = min(c_row, c_col) // 2 - 1 orientations_arr = np.arange(orientations) - dx_arr = radius * np.cos(orientations_arr / orientations * np.pi) - dy_arr = radius * np.sin(orientations_arr / orientations * np.pi) - hog_image = np.zeros((sy, sx), dtype=float) - for x in range(n_cellsx): - for y in range(n_cellsy): - for o, dx, dy in zip(orientations_arr, dx_arr, dy_arr): - centre = tuple([y * cy + cy // 2, x * cx + cx // 2]) - rr, cc = draw.line(int(centre[0] - dx), - int(centre[1] + dy), - int(centre[0] + dx), - int(centre[1] - dy)) - hog_image[rr, cc] += orientation_histogram[y, x, o] + # set dr_arr, dc_arr to correspond to midpoints of orientation bins + orientation_bin_midpoints = ( + np.pi * (orientations_arr + .5) / orientations) + dr_arr = radius * np.sin(orientation_bin_midpoints) + dc_arr = radius * np.cos(orientation_bin_midpoints) + hog_image = np.zeros((s_row, s_col), dtype=float) + for r in range(n_cells_row): + for c in range(n_cells_col): + for o, dr, dc in zip(orientations_arr, dr_arr, dc_arr): + centre = tuple([r * c_row + c_row // 2, + c * c_col + c_col // 2]) + rr, cc = draw.line(int(centre[0] - dc), + int(centre[1] + dr), + int(centre[0] + dc), + int(centre[1] - dr)) + hog_image[rr, cc] += orientation_histogram[r, c, o] """ The fourth stage computes normalization, which takes local groups of @@ -225,15 +283,15 @@ Gradient (HOG) descriptors. """ - n_blocksx = (n_cellsx - bx) + 1 - n_blocksy = (n_cellsy - by) + 1 - normalized_blocks = np.zeros((n_blocksy, n_blocksx, - by, bx, orientations)) - - for x in range(n_blocksx): - for y in range(n_blocksy): - block = orientation_histogram[y:y + by, x:x + bx, :] - normalized_blocks[y, x, :] = \ + n_blocks_row = (n_cells_row - b_row) + 1 + n_blocks_col = (n_cells_col - b_col) + 1 + normalized_blocks = np.zeros((n_blocks_row, n_blocks_col, + b_row, b_col, orientations)) + + for r in range(n_blocks_row): + for c in range(n_blocks_col): + block = orientation_histogram[r:r + b_row, c:c + b_col, :] + normalized_blocks[r, c, :] = \ _hog_normalize_block(block, method=block_norm) """ @@ -245,7 +303,7 @@ if feature_vector: normalized_blocks = normalized_blocks.ravel() - if visualise: + if visualize: return normalized_blocks, hog_image else: return normalized_blocks diff -Nru skimage-0.13.1/skimage/feature/__init__.py skimage-0.14.0/skimage/feature/__init__.py --- skimage-0.13.1/skimage/feature/__init__.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/__init__.py 2018-05-29 01:27:44.000000000 +0000 @@ -22,6 +22,8 @@ from .match import match_descriptors from .util import plot_matches from .blob import blob_dog, blob_log, blob_doh +from .haar import (haar_like_feature, haar_like_feature_coord, + draw_haar_like_feature) __all__ = ['canny', @@ -57,4 +59,7 @@ 'plot_matches', 'blob_dog', 'blob_doh', - 'blob_log'] + 'blob_log', + 'haar_like_feature', + 'haar_like_feature_coord', + 'draw_haar_like_feature'] diff -Nru skimage-0.13.1/skimage/feature/match.py skimage-0.14.0/skimage/feature/match.py --- skimage-0.13.1/skimage/feature/match.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/match.py 2018-05-29 01:27:44.000000000 +0000 @@ -3,7 +3,7 @@ def match_descriptors(descriptors1, descriptors2, metric=None, p=2, - max_distance=np.inf, cross_check=True): + max_distance=np.inf, cross_check=True, max_ratio=1.0): """Brute-force matching of descriptors. For each descriptor in the first set this matcher finds the closest @@ -32,6 +32,14 @@ matched pair (keypoint1, keypoint2) is returned if keypoint2 is the best match for keypoint1 in second image and keypoint1 is the best match for keypoint2 in first image. + max_ratio : float + Maximum ratio of distances between first and second closest descriptor + in the second set of descriptors. This threshold is useful to filter + ambiguous matches between the two descriptor sets. The choice of this + value depends on the statistics of the chosen descriptor, e.g., + for SIFT descriptors a value of 0.8 is usually chosen, see + D.G. Lowe, "Distinctive Image Features from Scale-Invariant Keypoints", + International Journal of Computer Vision, 2004. Returns ------- @@ -46,7 +54,7 @@ raise ValueError("Descriptor length must equal.") if metric is None: - if np.issubdtype(descriptors1.dtype, np.bool): + if np.issubdtype(descriptors1.dtype, np.bool_): metric = 'hamming' else: metric = 'euclidean' @@ -62,9 +70,23 @@ indices1 = indices1[mask] indices2 = indices2[mask] - matches = np.column_stack((indices1, indices2)) - if max_distance < np.inf: - matches = matches[distances[indices1, indices2] < max_distance] + mask = distances[indices1, indices2] < max_distance + indices1 = indices1[mask] + indices2 = indices2[mask] + + if max_ratio < 1.0: + best_distances = distances[indices1, indices2] + distances[indices1, indices2] = np.inf + second_best_indices2 = np.argmin(distances[indices1], axis=1) + second_best_distances = distances[indices1, second_best_indices2] + second_best_distances[second_best_distances == 0] \ + = np.finfo(np.double).eps + ratio = best_distances / second_best_distances + mask = ratio < max_ratio + indices1 = indices1[mask] + indices2 = indices2[mask] + + matches = np.column_stack((indices1, indices2)) return matches diff -Nru skimage-0.13.1/skimage/feature/orb.py skimage-0.14.0/skimage/feature/orb.py --- skimage-0.13.1/skimage/feature/orb.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/orb.py 2018-05-29 01:27:44.000000000 +0000 @@ -5,7 +5,7 @@ _prepare_grayscale_input_2D) from ..feature import (corner_fast, corner_orientations, corner_peaks, - corner_harris) + corner_harris) from ..transform import pyramid_gaussian from .._shared.utils import assert_nD @@ -132,7 +132,8 @@ def _build_pyramid(self, image): image = _prepare_grayscale_input_2D(image) - return list(pyramid_gaussian(image, self.n_scales - 1, self.downscale)) + return list(pyramid_gaussian(image, self.n_scales - 1, + self.downscale, multichannel=False)) def _detect_octave(self, octave_image): # Extract keypoints for current octave @@ -314,14 +315,14 @@ keypoints_list.append(keypoints[mask] * self.downscale ** octave) responses_list.append(responses[mask]) orientations_list.append(orientations[mask]) - scales_list.append(self.downscale ** octave - * np.ones(keypoints.shape[0], dtype=np.intp)) + scales_list.append(self.downscale ** octave * + np.ones(keypoints.shape[0], dtype=np.intp)) descriptors_list.append(descriptors) if len(scales_list) == 0: - raise RuntimeError("ORB found no features. Try passing in an image " - "containing greater intensity contrasts between " - "adjacent pixels.") + raise RuntimeError( + "ORB found no features. Try passing in an image containing " + "greater intensity contrasts between adjacent pixels.") keypoints = np.vstack(keypoints_list) responses = np.hstack(responses_list) diff -Nru skimage-0.13.1/skimage/feature/setup.py skimage-0.14.0/skimage/feature/setup.py --- skimage-0.13.1/skimage/feature/setup.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/setup.py 2018-05-29 01:27:44.000000000 +0000 @@ -19,6 +19,7 @@ cython(['_texture.pyx'], working_path=base_path) cython(['_hessian_det_appx.pyx'], working_path=base_path) cython(['_hoghistogram.pyx'], working_path=base_path) + cython(['_haar.pyx'], working_path=base_path) config.add_extension('corner_cy', sources=['corner_cy.c'], include_dirs=[get_numpy_include_dirs()]) @@ -34,6 +35,9 @@ include_dirs=[get_numpy_include_dirs()]) config.add_extension('_hoghistogram', sources=['_hoghistogram.c'], include_dirs=[get_numpy_include_dirs(), '../_shared']) + config.add_extension('_haar', sources=['_haar.cpp'], + include_dirs=[get_numpy_include_dirs(), '../_shared'], + language="c++") return config diff -Nru skimage-0.13.1/skimage/feature/tests/test_blob.py skimage-0.14.0/skimage/feature/tests/test_blob.py --- skimage-0.13.1/skimage/feature/tests/test_blob.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/tests/test_blob.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,14 +1,16 @@ import numpy as np from skimage.draw import circle +from skimage.draw.draw3d import ellipsoid from skimage.feature import blob_dog, blob_log, blob_doh +from skimage.feature.blob import _blob_overlap +from skimage import util import math -from numpy.testing import assert_raises +from numpy.testing import assert_almost_equal def test_blob_dog(): r2 = math.sqrt(2) img = np.ones((512, 512)) - img3 = np.ones((5, 5, 5)) xs, ys = circle(400, 130, 5) img[xs, ys] = 255 @@ -39,17 +41,29 @@ assert abs(b[1] - 350) <= thresh assert abs(radius(b) - 45) <= thresh - assert_raises(ValueError, blob_dog, img3) - # Testing no peaks img_empty = np.zeros((100,100)) assert blob_dog(img_empty).size == 0 + # Testing 3D + r = 10 + pad = 10 + im3 = ellipsoid(r, r, r) + im3 = util.pad(im3, pad, mode='constant') + + blobs = blob_dog(im3, min_sigma=3, max_sigma=10, + sigma_ratio=1.2, threshold=0.1) + b = blobs[0] + + assert b[0] == r + pad + 1 + assert b[1] == r + pad + 1 + assert b[2] == r + pad + 1 + assert abs(math.sqrt(3) * b[3] - r) < 1 + def test_blob_log(): r2 = math.sqrt(2) img = np.ones((256, 256)) - img3 = np.ones((5, 5, 5)) xs, ys = circle(200, 65, 5) img[xs, ys] = 255 @@ -117,16 +131,27 @@ assert abs(b[1] - 175) <= thresh assert abs(radius(b) - 30) <= thresh - assert_raises(ValueError, blob_log, img3) - # Testing no peaks img_empty = np.zeros((100,100)) assert blob_log(img_empty).size == 0 + # Testing 3D + r = 6 + pad = 10 + im3 = ellipsoid(r, r, r) + im3 = util.pad(im3, pad, mode='constant') + + blobs = blob_log(im3, min_sigma=3, max_sigma=10) + b = blobs[0] + + assert b[0] == r + pad + 1 + assert b[1] == r + pad + 1 + assert b[2] == r + pad + 1 + assert abs(math.sqrt(3) * b[3] - r) < 1 + def test_blob_doh(): img = np.ones((512, 512), dtype=np.uint8) - img3 = np.ones((5, 5, 5)) xs, ys = circle(400, 130, 20) img[xs, ys] = 255 @@ -200,15 +225,13 @@ assert abs(b[1] - 350) <= thresh assert abs(radius(b) - 50) <= thresh - assert_raises(ValueError, blob_doh, img3) - # Testing no peaks img_empty = np.zeros((100,100)) assert blob_doh(img_empty).size == 0 def test_blob_overlap(): - img = np.ones((512, 512), dtype=np.uint8) + img = np.ones((256, 256), dtype=np.uint8) xs, ys = circle(100, 100, 20) img[xs, ys] = 255 @@ -224,3 +247,27 @@ threshold=.05) assert len(blobs) == 1 + + r1, r2 = 7, 6 + pad1, pad2 = 11, 12 + blob1 = ellipsoid(r1, r1, r1) + blob1 = util.pad(blob1, pad1, mode='constant') + blob2 = ellipsoid(r2, r2, r2) + blob2 = util.pad(blob2, [(pad2, pad2), (pad2 - 9, pad2 + 9), + (pad2, pad2)], + mode='constant') + im3 = np.logical_or(blob1, blob2) + + blobs = blob_log(im3, min_sigma=2, max_sigma=10, overlap=0.1) + assert len(blobs) == 1 + + # Two circles with distance between centers equal to radius + overlap = _blob_overlap(np.array([0, 0, 10 / math.sqrt(2)]), + np.array([0, 10, 10 / math.sqrt(2)])) + assert_almost_equal(overlap, + 1./math.pi * (2 * math.acos(1./2) - math.sqrt(3)/2.)) + +def test_no_blob(): + im = np.zeros((10, 10)) + blobs = blob_log(im, min_sigma=2, max_sigma=5, num_sigma=4) + assert len(blobs) == 0 diff -Nru skimage-0.13.1/skimage/feature/tests/test_brief.py skimage-0.14.0/skimage/feature/tests/test_brief.py --- skimage-0.13.1/skimage/feature/tests/test_brief.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/tests/test_brief.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,7 +1,8 @@ import numpy as np -from numpy.testing import assert_array_equal, assert_raises +from skimage._shared.testing import assert_array_equal from skimage import data from skimage.feature import BRIEF, corner_peaks, corner_harris +from skimage._shared import testing from skimage._shared.testing import test_parallel @@ -9,7 +10,8 @@ """Brief descriptors can be evaluated on gray-scale images only.""" img = np.zeros((20, 20, 3)) keypoints = np.asarray([[7, 5], [11, 13]]) - assert_raises(ValueError, BRIEF().extract, img, keypoints) + with testing.raises(ValueError): + BRIEF().extract(img, keypoints) def test_normal_mode(): @@ -59,7 +61,8 @@ def test_unsupported_mode(): - assert_raises(ValueError, BRIEF, mode='foobar') + with testing.raises(ValueError): + BRIEF(mode='foobar') def test_border(): @@ -71,8 +74,3 @@ assert extractor.descriptors.shape[0] == 3 assert_array_equal(extractor.mask, (False, True, True, True)) - - -if __name__ == '__main__': - from numpy import testing - testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/feature/tests/test_canny.py skimage-0.14.0/skimage/feature/tests/test_canny.py --- skimage-0.13.1/skimage/feature/tests/test_canny.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/tests/test_canny.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,6 +1,6 @@ import unittest import numpy as np -from numpy.testing import assert_equal +from skimage._shared.testing import assert_equal from scipy.ndimage import binary_dilation, binary_erosion import skimage.feature as F from skimage import data, img_as_float diff -Nru skimage-0.13.1/skimage/feature/tests/test_censure.py skimage-0.14.0/skimage/feature/tests/test_censure.py --- skimage-0.13.1/skimage/feature/tests/test_censure.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/tests/test_censure.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,8 +1,9 @@ import numpy as np -from numpy.testing import assert_array_equal, assert_raises +from skimage._shared.testing import assert_array_equal from skimage.data import moon from skimage.feature import CENSURE from skimage._shared.testing import test_parallel +from skimage._shared import testing from skimage.transform import rescale @@ -20,19 +21,22 @@ def test_keypoints_censure_color_image_unsupported_error(): """Censure keypoints can be extracted from gray-scale images only.""" - assert_raises(ValueError, CENSURE().detect, np.zeros((20, 20, 3))) + with testing.raises(ValueError): + CENSURE().detect(np.zeros((20, 20, 3))) def test_keypoints_censure_mode_validity_error(): """Mode argument in keypoints_censure can be either DoB, Octagon or STAR.""" - assert_raises(ValueError, CENSURE, mode='dummy') + with testing.raises(ValueError): + CENSURE(mode='dummy') def test_keypoints_censure_scale_range_error(): """Difference between the the max_scale and min_scale parameters in keypoints_censure should be greater than or equal to two.""" - assert_raises(ValueError, CENSURE, min_scale=1, max_scale=2) + with testing.raises(ValueError): + CENSURE(min_scale=1, max_scale=2) def test_keypoints_censure_moon_image_dob(): @@ -61,7 +65,11 @@ the expected values for Octagon filter.""" detector = CENSURE(mode='octagon') - detector.detect(rescale(img, 0.25)) # quarter scale image for speed + # quarter scale image for speed + detector.detect(rescale(img, 0.25, + multichannel=False, + anti_aliasing=False, + mode='constant')) expected_keypoints = np.array([[ 23, 27], [ 29, 89], [ 31, 87], @@ -78,7 +86,11 @@ """Verify the actual Censure keypoints and their corresponding scale with the expected values for STAR filter.""" detector = CENSURE(mode='star') - detector.detect(rescale(img, 0.25)) # quarter scale image for speed + # quarter scale image for speed + detector.detect(rescale(img, 0.25, + multichannel=False, + anti_aliasing=False, + mode='constant')) expected_keypoints = np.array([[ 23, 27], [ 29, 89], [ 30, 86], @@ -91,8 +103,3 @@ assert_array_equal(expected_keypoints, detector.keypoints) assert_array_equal(expected_scales, detector.scales) - - -if __name__ == '__main__': - from numpy import testing - testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/feature/tests/test_corner.py skimage-0.14.0/skimage/feature/tests/test_corner.py --- skimage-0.13.1/skimage/feature/tests/test_corner.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/tests/test_corner.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,12 +1,14 @@ import numpy as np -from numpy.testing import (assert_array_equal, assert_raises, - assert_almost_equal, assert_warns) - +from skimage._shared.testing import assert_array_equal +from skimage._shared.testing import assert_almost_equal, assert_warns from skimage import data from skimage import img_as_float +from skimage import draw from skimage.color import rgb2gray from skimage.morphology import octagon -from skimage._shared.testing import test_parallel +from skimage._shared.testing import test_parallel, expected_warnings +from skimage._shared import testing +import pytest from skimage.feature import (corner_moravec, corner_harris, corner_shi_tomasi, corner_subpix, peak_local_max, corner_peaks, @@ -17,6 +19,15 @@ hessian_matrix_det, shape_index) +@pytest.fixture +def im3d(): + r = 10 + pad = 10 + im3 = draw.ellipsoid(r, r, r) + im3 = np.pad(im3, pad, mode='constant').astype(np.uint8) + return im3 + + def test_structure_tensor(): square = np.zeros((5, 5)) square[2, 2] = 1 @@ -97,8 +108,8 @@ def test_hessian_matrix_eigvals(): square = np.zeros((5, 5)) square[2, 2] = 4 - Hrr, Hrc, Hcc = hessian_matrix(square, sigma=0.1, order='rc') - l1, l2 = hessian_matrix_eigvals(Hrr, Hrc, Hcc) + H = hessian_matrix(square, sigma=0.1, order='rc') + l1, l2 = hessian_matrix_eigvals(H) assert_almost_equal(l1, np.array([[0, 0, 2, 0, 0], [0, 1, 0, 1, 0], [2, 0, -2, 0, 2], @@ -111,6 +122,26 @@ [0, 0, 0, 0, 0]])) +def test_hessian_matrix_eigvals_3d(im3d): + H = hessian_matrix(im3d) + E = hessian_matrix_eigvals(H) + # test descending order: + e0, e1, e2 = E + assert np.all(e0 >= e1) and np.all(e1 >= e2) + + E0, E1, E2 = E[:, E.shape[1] // 2] # cross section + row_center, col_center = np.array(E0.shape) // 2 + circles = [draw.circle_perimeter(row_center, col_center, radius, + shape=E0.shape) + for radius in range(1, E0.shape[1] // 2 - 1)] + response0 = np.array([np.mean(E0[c]) for c in circles]) + response2 = np.array([np.mean(E2[c]) for c in circles]) + # eigenvalues are negative just inside the sphere, positive just outside + assert np.argmin(response2) < np.argmax(response0) + assert np.min(response2) < 0 + assert np.max(response0) > 0 + + @test_parallel() def test_hessian_matrix_det(): image = np.zeros((5, 5)) @@ -119,10 +150,28 @@ assert_almost_equal(det, 0, decimal=3) +def test_hessian_matrix_det_3d(im3d): + D = hessian_matrix_det(im3d) + D0 = D[D.shape[0] // 2] + row_center, col_center = np.array(D0.shape) // 2 + # testing in 3D is hard. We test this by showing that you get the + # expected flat-then-low-then-high 2nd derivative response in a circle + # around the midplane of the sphere. + circles = [draw.circle_perimeter(row_center, col_center, r, shape=D0.shape) + for r in range(1, D0.shape[1] // 2 - 1)] + response = np.array([np.mean(D0[c]) for c in circles]) + lowest = np.argmin(response) + highest = np.argmax(response) + assert lowest < highest + assert response[lowest] < 0 + assert response[highest] > 0 + + def test_shape_index(): square = np.zeros((5, 5)) square[2, 2] = 4 - s = shape_index(square, sigma=0.1) + with expected_warnings(['divide by zero', 'invalid value']): + s = shape_index(square, sigma=0.1) assert_almost_equal( s, np.array([[ np.nan, np.nan, -0.5, np.nan, np.nan], [ np.nan, 0, np.nan, 0, np.nan], @@ -332,7 +381,8 @@ def test_corner_fast_image_unsupported_error(): img = np.zeros((20, 20, 3)) - assert_raises(ValueError, corner_fast, img) + with testing.raises(ValueError): + corner_fast(img) @test_parallel() @@ -382,14 +432,18 @@ def test_corner_orientations_image_unsupported_error(): img = np.zeros((20, 20, 3)) - assert_raises(ValueError, corner_orientations, img, - np.asarray([[7, 7]]), np.ones((3, 3))) + with testing.raises(ValueError): + corner_orientations( + img, + np.asarray([[7, 7]]), np.ones((3, 3))) def test_corner_orientations_even_shape_error(): img = np.zeros((20, 20)) - assert_raises(ValueError, corner_orientations, img, - np.asarray([[7, 7]]), np.ones((4, 4))) + with testing.raises(ValueError): + corner_orientations( + img, + np.asarray([[7, 7]]), np.ones((4, 4))) @test_parallel() @@ -423,8 +477,3 @@ expected_orientations_degree = np.array([ 45., 135., -45., -135.]) assert_array_equal(actual_orientations_degrees, expected_orientations_degree) - - -if __name__ == '__main__': - from numpy import testing - testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/feature/tests/test_daisy.py skimage-0.14.0/skimage/feature/tests/test_daisy.py --- skimage-0.13.1/skimage/feature/tests/test_daisy.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/tests/test_daisy.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,15 +1,17 @@ import numpy as np -from numpy.testing import assert_raises, assert_almost_equal +from skimage._shared.testing import assert_almost_equal from numpy import sqrt, ceil from skimage import data from skimage import img_as_float from skimage.feature import daisy +from skimage._shared import testing def test_daisy_color_image_unsupported_error(): img = np.zeros((20, 20, 3)) - assert_raises(ValueError, daisy, img) + with testing.raises(ValueError): + daisy(img) def test_daisy_desc_dims(): @@ -56,7 +58,8 @@ img = img_as_float(data.astronaut()[:64, :64].mean(axis=2)) sigmas = [1, 2] radii = [1, 2] - assert_raises(ValueError, daisy, img, sigmas=sigmas, ring_radii=radii) + with testing.raises(ValueError): + daisy(img, sigmas=sigmas, ring_radii=radii) def test_daisy_normalization(): @@ -89,15 +92,11 @@ for j in range(descs.shape[1]): assert_almost_equal(np.sum(descs[i, j, :]), 0) - assert_raises(ValueError, daisy, img, normalization='does_not_exist') + with testing.raises(ValueError): + daisy(img, normalization='does_not_exist') def test_daisy_visualization(): img = img_as_float(data.astronaut()[:32, :32].mean(axis=2)) descs, descs_img = daisy(img, visualize=True) assert(descs_img.shape == (32, 32, 3)) - - -if __name__ == '__main__': - from numpy import testing - testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/feature/tests/test_haar.py skimage-0.14.0/skimage/feature/tests/test_haar.py --- skimage-0.13.1/skimage/feature/tests/test_haar.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/skimage/feature/tests/test_haar.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,153 @@ +from random import shuffle +from itertools import chain + +import pytest + +import numpy as np +from numpy.testing import assert_allclose +from numpy.testing import assert_array_equal + +from skimage.transform import integral_image +from skimage.feature import haar_like_feature +from skimage.feature import haar_like_feature_coord +from skimage.feature import draw_haar_like_feature + + +def test_haar_like_feature_error(): + img = np.ones((5, 5), dtype=np.float32) + img_ii = integral_image(img) + + feature_type = 'unknown_type' + with pytest.raises(ValueError): + haar_like_feature(img_ii, 0, 0, 5, 5, feature_type=feature_type) + haar_like_feature_coord(5, 5, feature_type=feature_type) + draw_haar_like_feature(img, 0, 0, 5, 5, feature_type=feature_type) + + feat_coord, feat_type = haar_like_feature_coord(5, 5, 'type-2-x') + with pytest.raises(ValueError): + haar_like_feature(img_ii, 0, 0, 5, 5, feature_type=feat_type[:3], + feature_coord=feat_coord) + + +@pytest.mark.parametrize("dtype", [np.uint8, np.int8, + np.float32, np.float64]) +@pytest.mark.parametrize("feature_type,shape_feature,expected_feature_value", + [('type-2-x', (84,), [0.]), + ('type-2-y', (84,), [0.]), + ('type-3-x', (42,), [-4., -3., -2., -1.]), + ('type-3-y', (42,), [-4., -3., -2., -1.]), + ('type-4', (36,), [0.])]) +def test_haar_like_feature(feature_type, shape_feature, + expected_feature_value, dtype): + # test Haar-like feature on a basic one image + img = np.ones((5, 5), dtype=dtype) + img_ii = integral_image(img) + haar_feature = haar_like_feature(img_ii, 0, 0, 5, 5, + feature_type=feature_type) + assert_allclose(np.sort(np.unique(haar_feature)), expected_feature_value) + + +@pytest.mark.parametrize("dtype", [np.uint8, np.int8, + np.float32, np.float64]) +@pytest.mark.parametrize("feature_type", ['type-2-x', 'type-2-y', + 'type-3-x', 'type-3-y', + 'type-4']) +def test_haar_like_feature_fused_type(dtype, feature_type): + # check that the input type is kept + img = np.ones((5, 5), dtype=dtype) + img_ii = integral_image(img) + expected_dtype = img_ii.dtype + # to avoid overflow, unsigned type are converted to signed + if 'uint' in expected_dtype.name: + expected_dtype = np.dtype(expected_dtype.name.replace('u', '')) + haar_feature = haar_like_feature(img_ii, 0, 0, 5, 5, + feature_type=feature_type) + assert haar_feature.dtype == expected_dtype + + +def test_haar_like_feature_list(): + img = np.ones((5, 5), dtype=np.int8) + img_ii = integral_image(img) + feature_type = ['type-2-x', 'type-2-y', 'type-3-x', 'type-3-y', 'type-4'] + haar_list = haar_like_feature(img_ii, 0, 0, 5, 5, + feature_type=feature_type) + haar_all = haar_like_feature(img_ii, 0, 0, 5, 5) + assert_array_equal(haar_list, haar_all) + + +@pytest.mark.parametrize("feature_type", ['type-2-x', 'type-2-y', + 'type-3-x', 'type-3-y', + 'type-4', + ['type-2-y', 'type-3-x', + 'type-4']]) +def test_haar_like_feature_precomputed(feature_type): + img = np.ones((5, 5), dtype=np.int8) + img_ii = integral_image(img) + if isinstance(feature_type, list): + # shuffle the index of the feature to be sure that we are output + # the features in the same order + shuffle(feature_type) + feat_coord, feat_type = zip(*[haar_like_feature_coord(5, 5, feat_t) + for feat_t in feature_type]) + feat_coord = np.concatenate(feat_coord) + feat_type = np.concatenate(feat_type) + else: + feat_coord, feat_type = haar_like_feature_coord(5, 5, feature_type) + haar_feature_precomputed = haar_like_feature(img_ii, 0, 0, 5, 5, + feature_type=feat_type, + feature_coord=feat_coord) + haar_feature = haar_like_feature(img_ii, 0, 0, 5, 5, feature_type) + assert_array_equal(haar_feature_precomputed, haar_feature) + + +@pytest.mark.parametrize("feature_type,height,width,expected_coord", + [('type-2-x', 2, 2, + [[[(0, 0), (0, 0)], [(0, 1), (0, 1)]], + [[(1, 0), (1, 0)], [(1, 1), (1, 1)]]]), + ('type-2-y', 2, 2, + [[[(0, 0), (0, 0)], [(1, 0), (1, 0)]], + [[(0, 1), (0, 1)], [(1, 1), (1, 1)]]]), + ('type-3-x', 3, 3, + [[[(0, 0), (0, 0)], [(0, 1), (0, 1)], + [(0, 2), (0, 2)]], + [[(0, 0), (1, 0)], [(0, 1), (1, 1)], + [(0, 2), (1, 2)]], + [[(1, 0), (1, 0)], [(1, 1), (1, 1)], + [(1, 2), (1, 2)]], + [[(1, 0), (2, 0)], [(1, 1), (2, 1)], + [(1, 2), (2, 2)]], + [[(2, 0), (2, 0)], [(2, 1), (2, 1)], + [(2, 2), (2, 2)]]]), + ('type-3-y', 3, 3, + [[[(0, 0), (0, 0)], [(1, 0), (1, 0)], + [(2, 0), (2, 0)]], + [[(0, 0), (0, 1)], [(1, 0), (1, 1)], + [(2, 0), (2, 1)]], + [[(0, 1), (0, 1)], [(1, 1), (1, 1)], + [(2, 1), (2, 1)]], + [[(0, 1), (0, 2)], [(1, 1), (1, 2)], + [(2, 1), (2, 2)]], + [[(0, 2), (0, 2)], [(1, 2), (1, 2)], + [(2, 2), (2, 2)]]]), + ('type-4', 2, 2, + [[[(0, 0), (0, 0)], [(0, 1), (0, 1)], + [(1, 1), (1, 1)], [(1, 0), (1, 0)]]])]) +def test_haar_like_feature_coord(feature_type, height, width, expected_coord): + feat_coord, feat_type = haar_like_feature_coord(width, height, + feature_type) + # convert the output to a full numpy array just for comparison + feat_coord = np.array([hf for hf in feat_coord]) + assert_array_equal(feat_coord, expected_coord) + assert np.all(feat_type == feature_type) + + +@pytest.mark.parametrize("max_n_features,nnz_values", [(None, 46), + (1, 8)]) +def test_draw_haar_like_feature(max_n_features, nnz_values): + img = np.zeros((5, 5), dtype=np.float32) + coord, _ = haar_like_feature_coord(5, 5, 'type-4') + image = draw_haar_like_feature(img, 0, 0, 5, 5, coord, + max_n_features=max_n_features, + random_state=0) + assert image.shape == (5, 5, 3) + assert np.count_nonzero(image) == nnz_values diff -Nru skimage-0.13.1/skimage/feature/tests/test_hog.py skimage-0.14.0/skimage/feature/tests/test_hog.py --- skimage-0.13.1/skimage/feature/tests/test_hog.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/tests/test_hog.py 2018-05-29 01:27:44.000000000 +0000 @@ -6,15 +6,16 @@ from skimage import feature from skimage import img_as_float from skimage import draw -from numpy.testing import (assert_raises, - assert_almost_equal) +from skimage._shared.testing import assert_almost_equal +from skimage._shared import testing +from skimage._shared.testing import expected_warnings def test_hog_output_size(): img = img_as_float(data.astronaut()[:256, :].mean(axis=2)) fd = feature.hog(img, orientations=9, pixels_per_cell=(8, 8), - cells_per_block=(1, 1)) + cells_per_block=(1, 1), block_norm='L1') assert len(fd) == 9 * (256 // 8) * (512 // 8) @@ -27,7 +28,7 @@ output = feature.hog(img, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3), block_norm='L1', feature_vector=True, transform_sqrt=False, - visualise=False) + visualize=False) assert_almost_equal(output, correct_output) @@ -39,22 +40,17 @@ output = feature.hog(img, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3), block_norm='L2-Hys', feature_vector=True, transform_sqrt=False, - visualise=False) + visualize=False) assert_almost_equal(output, correct_output) def test_hog_image_size_cell_size_mismatch(): image = data.camera()[:150, :200] fd = feature.hog(image, orientations=9, pixels_per_cell=(8, 8), - cells_per_block=(1, 1)) + cells_per_block=(1, 1), block_norm='L1') assert len(fd) == 9 * (150 // 8) * (200 // 8) -def test_hog_color_image_unsupported_error(): - image = np.zeros((20, 20, 3)) - assert_raises(ValueError, feature.hog, image) - - def test_hog_basic_orientations_and_data_types(): # scenario: # 1) create image (with float values) where upper half is filled by @@ -81,16 +77,20 @@ (hog_float, hog_img_float) = feature.hog( image_float, orientations=4, pixels_per_cell=(8, 8), - cells_per_block=(1, 1), visualise=True, transform_sqrt=False) + cells_per_block=(1, 1), visualize=True, transform_sqrt=False, + block_norm='L1') (hog_uint8, hog_img_uint8) = feature.hog( image_uint8, orientations=4, pixels_per_cell=(8, 8), - cells_per_block=(1, 1), visualise=True, transform_sqrt=False) + cells_per_block=(1, 1), visualize=True, transform_sqrt=False, + block_norm='L1') (hog_float_norm, hog_img_float_norm) = feature.hog( image_float, orientations=4, pixels_per_cell=(8, 8), - cells_per_block=(1, 1), visualise=True, transform_sqrt=True) + cells_per_block=(1, 1), visualize=True, transform_sqrt=True, + block_norm='L1') (hog_uint8_norm, hog_img_uint8_norm) = feature.hog( image_uint8, orientations=4, pixels_per_cell=(8, 8), - cells_per_block=(1, 1), visualise=True, transform_sqrt=True) + cells_per_block=(1, 1), visualize=True, transform_sqrt=True, + block_norm='L1') # set to True to enable manual debugging with graphical output, # must be False for automatic testing @@ -124,7 +124,8 @@ assert_almost_equal(hog_float, hog_uint8) assert_almost_equal(hog_img_float, hog_img_uint8) - # resulting features should be almost equal when 'transform_sqrt' is enabled + # resulting features should be almost equal + # when 'transform_sqrt' is enabled # or disabled (for current simple testing image) assert_almost_equal(hog_float, hog_float_norm, decimal=4) assert_almost_equal(hog_float, hog_uint8_norm, decimal=4) @@ -167,8 +168,9 @@ for orientations in range(2, 15): (hog, hog_img) = feature.hog(image, orientations=orientations, pixels_per_cell=(8, 8), - cells_per_block=(1, 1), visualise=True, - transform_sqrt=False) + cells_per_block=(1, 1), visualize=True, + transform_sqrt=False, + block_norm='L1') # set to True to enable manual debugging with graphical output, # must be False for automatic testing @@ -199,15 +201,59 @@ assert_almost_equal(actual, desired, decimal=1) -def test_hog_normalise_none_error_raised(): - img = np.array([1, 2, 3]) - assert_raises(ValueError, feature.hog, img, normalise=True) +def test_hog_visualization_orientation(): + """Test that the visualization produces a line with correct orientation + + The hog visualization is expected to draw line segments perpendicular to + the midpoints of orientation bins. This example verifies that when + orientations=3 and the gradient is entirely in the middle bin (bisected + by the y-axis), the line segment drawn by the visualization is horizontal. + """ + + width = height = 11 + + image = np.zeros((height, width), dtype='float') + image[height // 2:] = 1 + + _, hog_image = feature.hog( + image, + orientations=3, + pixels_per_cell=(width, height), + cells_per_block=(1, 1), + visualize=True, + block_norm='L1' + ) + + middle_index = height // 2 + indices_excluding_middle = [x for x in range(height) if x != middle_index] + + assert (hog_image[indices_excluding_middle, :] == 0).all() + assert (hog_image[middle_index, 1:-1] > 0).all() def test_hog_block_normalization_incorrect_error(): img = np.eye(4) - assert_raises(ValueError, feature.hog, img, block_norm='Linf') + with testing.raises(ValueError): + feature.hog(img, block_norm='Linf') -if __name__ == '__main__': - np.testing.run_module_suite() +@testing.parametrize("shape,multichannel", [ + ((3, 3, 3), False), + ((3, 3), True), + ((3, 3, 3, 3), True), +]) +def test_hog_incorrect_dimensions(shape, multichannel): + img = np.zeros(shape) + with testing.raises(ValueError): + feature.hog(img, multichannel=multichannel, block_norm='L1') + + +def test_hog_output_equivariance_multichannel(): + img = data.astronaut() + img[:, :, (1, 2)] = 0 + hog_ref = feature.hog(img, multichannel=True, block_norm='L1') + + for n in (1, 2): + hog_fact = feature.hog(np.roll(img, n, axis=2), multichannel=True, + block_norm='L1') + assert_almost_equal(hog_ref, hog_fact) diff -Nru skimage-0.13.1/skimage/feature/tests/test_match.py skimage-0.14.0/skimage/feature/tests/test_match.py --- skimage-0.13.1/skimage/feature/tests/test_match.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/tests/test_match.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,26 +1,28 @@ import numpy as np -from numpy.testing import assert_equal, assert_raises +from skimage._shared.testing import assert_equal from skimage import data from skimage import transform as tf from skimage.color import rgb2gray from skimage.feature import (BRIEF, match_descriptors, corner_peaks, corner_harris) +from skimage._shared import testing def test_binary_descriptors_unequal_descriptor_sizes_error(): """Sizes of descriptors of keypoints to be matched should be equal.""" descs1 = np.array([[True, True, False, True], - [False, True, False, True]]) + [False, True, False, True]]) descs2 = np.array([[True, False, False, True, False], - [False, True, True, True, False]]) - assert_raises(ValueError, match_descriptors, descs1, descs2) + [False, True, True, True, False]]) + with testing.raises(ValueError): + match_descriptors(descs1, descs2) def test_binary_descriptors(): descs1 = np.array([[True, True, False, True, True], - [False, True, False, True, True]]) + [False, True, False, True, True]]) descs2 = np.array([[True, False, False, True, False], - [False, False, True, True, True]]) + [False, False, True, True, True]]) matches = match_descriptors(descs1, descs2) assert_equal(matches, [[0, 0], [1, 1]]) @@ -119,6 +121,46 @@ assert_equal(matches, [[1, 0]]) -if __name__ == '__main__': - from numpy import testing - testing.run_module_suite() +def test_max_ratio(): + descs1 = 10 * np.arange(10)[:, None].astype(np.float32) + descs2 = 10 * np.arange(15)[:, None].astype(np.float32) + + descs2[0] = 5.0 + + matches = match_descriptors(descs1, descs2, metric='euclidean', + max_ratio=1.0, cross_check=False) + assert_equal(len(matches), 10) + + matches = match_descriptors(descs1, descs2, metric='euclidean', + max_ratio=0.6, cross_check=False) + assert_equal(len(matches), 10) + + matches = match_descriptors(descs1, descs2, metric='euclidean', + max_ratio=0.5, cross_check=False) + assert_equal(len(matches), 9) + + descs1[0] = 7.5 + + matches = match_descriptors(descs1, descs2, metric='euclidean', + max_ratio=0.5, cross_check=False) + assert_equal(len(matches), 9) + + descs2 = 10 * np.arange(1)[:, None].astype(np.float32) + + matches = match_descriptors(descs1, descs2, metric='euclidean', + max_ratio=1.0, cross_check=False) + assert_equal(len(matches), 10) + + matches = match_descriptors(descs1, descs2, metric='euclidean', + max_ratio=0.5, cross_check=False) + assert_equal(len(matches), 10) + + descs1 = 10 * np.arange(1)[:, None].astype(np.float32) + + matches = match_descriptors(descs1, descs2, metric='euclidean', + max_ratio=1.0, cross_check=False) + assert_equal(len(matches), 1) + + matches = match_descriptors(descs1, descs2, metric='euclidean', + max_ratio=0.5, cross_check=False) + assert_equal(len(matches), 1) diff -Nru skimage-0.13.1/skimage/feature/tests/test_orb.py skimage-0.14.0/skimage/feature/tests/test_orb.py --- skimage-0.13.1/skimage/feature/tests/test_orb.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/tests/test_orb.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,7 +1,7 @@ import numpy as np -from numpy.testing import (assert_equal, assert_almost_equal, run_module_suite, - assert_raises) +from skimage._shared.testing import assert_equal, assert_almost_equal from skimage.feature import ORB +from skimage._shared import testing from skimage import data from skimage._shared.testing import test_parallel @@ -107,8 +107,5 @@ def test_no_descriptors_extracted_orb(): img = np.ones((128, 128)) detector_extractor = ORB() - assert_raises(RuntimeError, detector_extractor.detect_and_extract, img) - - -if __name__ == '__main__': - run_module_suite() + with testing.raises(RuntimeError): + detector_extractor.detect_and_extract(img) diff -Nru skimage-0.13.1/skimage/feature/tests/test_peak.py skimage-0.14.0/skimage/feature/tests/test_peak.py --- skimage-0.13.1/skimage/feature/tests/test_peak.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/tests/test_peak.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,7 +1,7 @@ import numpy as np import unittest -from numpy.testing import (assert_array_almost_equal as assert_close, - assert_equal, assert_raises) +from skimage._shared.testing import assert_array_almost_equal +from skimage._shared.testing import assert_equal from scipy import ndimage as ndi from skimage.feature import peak @@ -37,7 +37,7 @@ image[3, 3] = 20 peaks = peak.peak_local_max(image, min_distance=1, threshold_rel=0.5) assert len(peaks) == 1 - assert_close(peaks, [(3, 3)]) + assert_array_almost_equal(peaks, [(3, 3)]) def test_absolute_threshold(self): image = np.zeros((5, 5), dtype=np.uint8) @@ -45,7 +45,7 @@ image[3, 3] = 20 peaks = peak.peak_local_max(image, min_distance=1, threshold_abs=10) assert len(peaks) == 1 - assert_close(peaks, [(3, 3)]) + assert_array_almost_equal(peaks, [(3, 3)]) def test_constant_image(self): image = 128 * np.ones((20, 20), dtype=np.uint8) @@ -445,6 +445,3 @@ min_distance=1, threshold_rel=0, indices=False, exclude_border=False) assert np.all(labels == labelsin) - -if __name__ == '__main__': - np.testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/feature/tests/test_register_translation.py skimage-0.14.0/skimage/feature/tests/test_register_translation.py --- skimage-0.13.1/skimage/feature/tests/test_register_translation.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/tests/test_register_translation.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,11 +1,13 @@ import numpy as np -from numpy.testing import assert_allclose, assert_raises +from skimage._shared.testing import assert_allclose from skimage.feature.register_translation import (register_translation, _upsampled_dft) from skimage.data import camera, binary_blobs from scipy.ndimage import fourier_shift from skimage import img_as_float +from skimage._shared import testing + def test_correlation(): reference_image = np.fft.fftn(camera()) @@ -73,46 +75,50 @@ shifted_image, space="fourier") assert_allclose(result, -np.array(shift), atol=0.5) - assert_raises(NotImplementedError, register_translation, reference_image, - shifted_image, upsample_factor=100, - space="fourier") + with testing.raises(NotImplementedError): + register_translation( + reference_image, + shifted_image, upsample_factor=100, + space="fourier") def test_unknown_space_input(): image = np.ones((5, 5)) - assert_raises(ValueError, register_translation, image, image, - space="frank") + with testing.raises(ValueError): + register_translation( + image, image, + space="frank") def test_wrong_input(): # Dimensionality mismatch image = np.ones((5, 5, 1)) template = np.ones((5, 5)) - assert_raises(ValueError, register_translation, template, image) + with testing.raises(ValueError): + register_translation(template, image) # Greater than 2 dimensions does not support subpixel precision # (TODO: should support 3D at some point.) image = np.ones((5, 5, 5)) template = np.ones((5, 5, 5)) - assert_raises(NotImplementedError, register_translation, - template, image, 2) + with testing.raises(NotImplementedError): + register_translation(template, image, 2) # Size mismatch image = np.ones((5, 5)) template = np.ones((4, 4)) - assert_raises(ValueError, register_translation, template, image) + with testing.raises(ValueError): + register_translation(template, image) def test_mismatch_upsampled_region_size(): - assert_raises(ValueError, _upsampled_dft, np.ones((4, 4)), - upsampled_region_size=[3, 2, 1, 4]) + with testing.raises(ValueError): + _upsampled_dft( + np.ones((4, 4)), + upsampled_region_size=[3, 2, 1, 4]) def test_mismatch_offsets_size(): - assert_raises(ValueError, _upsampled_dft, np.ones((4, 4)), 3, - axis_offsets=[3, 2, 1, 4]) - - -if __name__ == "__main__": - from numpy import testing - testing.run_module_suite() + with testing.raises(ValueError): + _upsampled_dft(np.ones((4, 4)), 3, + axis_offsets=[3, 2, 1, 4]) diff -Nru skimage-0.13.1/skimage/feature/tests/test_template.py skimage-0.14.0/skimage/feature/tests/test_template.py --- skimage-0.13.1/skimage/feature/tests/test_template.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/tests/test_template.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,9 +1,10 @@ import numpy as np -from numpy.testing import assert_almost_equal, assert_equal, assert_raises +from skimage._shared.testing import assert_almost_equal, assert_equal from skimage import data, img_as_float from skimage.morphology import diamond from skimage.feature import match_template, peak_local_max +from skimage._shared import testing def test_template(): @@ -32,7 +33,7 @@ positions = positions[np.argsort(positions[:, 0])] for xy_target, xy in zip(target_positions, positions): - yield assert_almost_equal, xy, xy_target + assert_almost_equal(xy, xy_target) def test_normalization(): @@ -89,7 +90,8 @@ def test_switched_arguments(): image = np.ones((5, 5)) template = np.ones((3, 3)) - assert_raises(ValueError, match_template, template, image) + with testing.raises(ValueError): + match_template(template, image) def test_pad_input(): @@ -160,15 +162,18 @@ def test_wrong_input(): image = np.ones((5, 5, 1)) template = np.ones((3, 3)) - assert_raises(ValueError, match_template, template, image) + with testing.raises(ValueError): + match_template(template, image) image = np.ones((5, 5)) template = np.ones((3, 3, 2)) - assert_raises(ValueError, match_template, template, image) + with testing.raises(ValueError): + match_template(template, image) image = np.ones((5, 5, 3, 3)) template = np.ones((3, 3, 2)) - assert_raises(ValueError, match_template, template, image) + with testing.raises(ValueError): + match_template(template, image) def test_bounding_values(): @@ -179,8 +184,3 @@ print(result.max()) assert result.max() < 1 + 1e-7 assert result.min() > -1 - 1e-7 - - -if __name__ == "__main__": - from numpy import testing - testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/feature/tests/test_texture.py skimage-0.14.0/skimage/feature/tests/test_texture.py --- skimage-0.13.1/skimage/feature/tests/test_texture.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/tests/test_texture.py 2018-05-29 01:27:44.000000000 +0000 @@ -3,9 +3,9 @@ greycoprops, local_binary_pattern, multiblock_lbp) - from skimage._shared.testing import test_parallel from skimage.transform import integral_image +from skimage._shared import testing class TestGLCM(): @@ -53,17 +53,21 @@ def test_error_raise_float(self): for dtype in [np.float, np.double, np.float16, np.float32, np.float64]: - np.testing.assert_raises(ValueError, greycomatrix, self.image.astype(dtype), [1], [np.pi], 4) + with testing.raises(ValueError): + greycomatrix(self.image.astype(dtype), [1], [np.pi], 4) def test_error_raise_int_types(self): for dtype in [np.int16, np.int32, np.int64, np.uint16, np.uint32, np.uint64]: - np.testing.assert_raises(ValueError, greycomatrix, self.image.astype(dtype), [1], [np.pi]) + with testing.raises(ValueError): + greycomatrix(self.image.astype(dtype), [1], [np.pi]) def test_error_raise_negative(self): - np.testing.assert_raises(ValueError, greycomatrix, self.image.astype(np.int16) - 1, [1], [np.pi], 4) + with testing.raises(ValueError): + greycomatrix(self.image.astype(np.int16) - 1, [1], [np.pi], 4) def test_error_raise_levels_smaller_max(self): - np.testing.assert_raises(ValueError, greycomatrix, self.image - 1, [1], [np.pi], 3) + with testing.raises(ValueError): + greycomatrix(self.image - 1, [1], [np.pi], 3) def test_image_data_types(self): for dtype in [np.uint16, np.uint32, np.uint64, np.int16, np.int32, np.int64]: @@ -156,8 +160,8 @@ def test_invalid_property(self): result = greycomatrix(self.image, [1], [0], 4) - np.testing.assert_raises(ValueError, greycoprops, - result, 'ABC') + with testing.raises(ValueError): + greycoprops(result, 'ABC') def test_homogeneity(self): result = greycomatrix(self.image, [1], [0, 6], 4, normed=True, @@ -283,7 +287,3 @@ lbp_code = multiblock_lbp(int_img, 0, 0, 3, 3) np.testing.assert_equal(lbp_code, correct_answer) - - -if __name__ == '__main__': - np.testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/feature/tests/test_util.py skimage-0.14.0/skimage/feature/tests/test_util.py --- skimage-0.13.1/skimage/feature/tests/test_util.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/tests/test_util.py 2018-05-29 01:27:44.000000000 +0000 @@ -3,26 +3,33 @@ import matplotlib.pyplot as plt except ImportError: plt = None -from numpy.testing import assert_equal, assert_raises + +from skimage._shared.testing import assert_equal from skimage.feature.util import (FeatureDetector, DescriptorExtractor, _prepare_grayscale_input_2D, _mask_border_keypoints, plot_matches) +from skimage._shared import testing + def test_feature_detector(): - assert_raises(NotImplementedError, FeatureDetector().detect, None) + with testing.raises(NotImplementedError): + FeatureDetector().detect(None) def test_descriptor_extractor(): - assert_raises(NotImplementedError, DescriptorExtractor().extract, - None, None) + with testing.raises(NotImplementedError): + DescriptorExtractor().extract(None, None) def test_prepare_grayscale_input_2D(): - assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 3, 3))) - assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1))) - assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1, 1))) + with testing.raises(ValueError): + _prepare_grayscale_input_2D(np.zeros((3, 3, 3))) + with testing.raises(ValueError): + _prepare_grayscale_input_2D(np.zeros((3, 1))) + with testing.raises(ValueError): + _prepare_grayscale_input_2D(np.zeros((3, 1, 1))) img = _prepare_grayscale_input_2D(np.zeros((3, 3))) img = _prepare_grayscale_input_2D(np.zeros((3, 3, 1))) img = _prepare_grayscale_input_2D(np.zeros((1, 3, 3))) @@ -42,7 +49,7 @@ [0, 0, 0, 0, 1]) -@np.testing.decorators.skipif(plt is None) +@testing.skipif(plt is None, reason="Matplotlib not installed") def test_plot_matches(): fig, ax = plt.subplots(nrows=1, ncols=1) @@ -70,8 +77,5 @@ keypoints_color='r') plot_matches(ax, img1, img2, keypoints1, keypoints2, matches, matches_color='r') - - -if __name__ == '__main__': - from numpy import testing - testing.run_module_suite() + plot_matches(ax, img1, img2, keypoints1, keypoints2, matches, + alignment='vertical') diff -Nru skimage-0.13.1/skimage/feature/texture.py skimage-0.14.0/skimage/feature/texture.py --- skimage-0.13.1/skimage/feature/texture.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/texture.py 2018-05-29 01:27:44.000000000 +0000 @@ -106,7 +106,7 @@ image_max = image.max() - if np.issubdtype(image.dtype, np.float): + if np.issubdtype(image.dtype, np.floating): raise ValueError("Float images are not supported by greycomatrix. " "Convert the image to an unsigned integer type.") @@ -301,7 +301,7 @@ .. [1] Multiresolution Gray-Scale and Rotation Invariant Texture Classification with Local Binary Patterns. Timo Ojala, Matti Pietikainen, Topi Maenpaa. - http://www.rafbis.it/biplab15/images/stories/docenti/Danielriccio/Articoliriferimento/LBP.pdf, 2002. + http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/pdf_94.pdf, 2002. .. [2] Face recognition with local binary patterns. Timo Ahonen, Abdenour Hadid, Matti Pietikainen, http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851, @@ -367,10 +367,10 @@ return lbp_code -def draw_multiblock_lbp(img, r, c, width, height, +def draw_multiblock_lbp(image, r, c, width, height, lbp_code=0, - color_greater_block=[1, 1, 1], - color_less_block=[0, 0.69, 0.96], + color_greater_block=(1, 1, 1), + color_less_block=(0, 0.69, 0.96), alpha=0.5 ): """Multi-block local binary pattern visualization. @@ -381,7 +381,7 @@ Parameters ---------- - img : ndarray of float or uint + image : ndarray of float or uint Image on which to visualize the pattern. r : int Row-coordinate of top left corner of a rectangle containing feature. @@ -396,15 +396,15 @@ lbp_code : int The descriptor of feature to visualize. If not provided, the descriptor with 0 value will be used. - color_greater_block : list of 3 floats + color_greater_block : tuple of 3 floats Floats specifying the color for the block that has greater intensity value. They should be in the range [0, 1]. Corresponding values define (R, G, B) values. Default value - is white [1, 1, 1]. - color_greater_block : list of 3 floats + is white (1, 1, 1). + color_greater_block : tuple of 3 floats Floats specifying the color for the block that has greater intensity value. They should be in the range [0, 1]. Corresponding values define - (R, G, B) values. Default value is cyan [0, 0.69, 0.96]. + (R, G, B) values. Default value is cyan (0, 0.69, 0.96). alpha : float Value in the range [0, 1] that specifies opacity of visualization. 1 - fully transparent, 0 - opaque. @@ -429,11 +429,11 @@ color_less_block = np.asarray(color_less_block, dtype=np.float64) # Copy array to avoid the changes to the original one. - output = np.copy(img) + output = np.copy(image) # As the visualization uses RGB color we need 3 bands. - if len(img.shape) < 3: - output = gray2rgb(img) + if len(image.shape) < 3: + output = gray2rgb(image) # Colors are specified in floats. output = img_as_float(output) diff -Nru skimage-0.13.1/skimage/feature/_texture.pyx skimage-0.14.0/skimage/feature/_texture.pyx --- skimage-0.13.1/skimage/feature/_texture.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/_texture.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -39,7 +39,7 @@ List of pixel pair angles in radians. levels : int The input image should contain integers in [0, `levels`-1], - where levels indicate the number of grey-levels counted + where levels indicate the number of gray-levels counted (typically 256 for an 8-bit image). out : ndarray On input a 4D array of zeros, and on output it contains diff -Nru skimage-0.13.1/skimage/feature/util.py skimage-0.14.0/skimage/feature/util.py --- skimage-0.13.1/skimage/feature/util.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/feature/util.py 2018-05-29 01:27:44.000000000 +0000 @@ -41,7 +41,8 @@ def plot_matches(ax, image1, image2, keypoints1, keypoints2, matches, - keypoints_color='k', matches_color=None, only_matches=False): + keypoints_color='k', matches_color=None, only_matches=False, + alignment='horizontal'): """Plot matched features. Parameters @@ -67,6 +68,9 @@ color is chosen randomly. only_matches : bool, optional Whether to only plot matches and not plot the keypoint locations. + alignment : {'horizontal', 'vertical'}, optional + Whether to show images side by side, ``'horizontal'``, or one above + the other, ``'vertical'``. """ @@ -96,18 +100,28 @@ new_image2[:image2.shape[0], :image2.shape[1]] = image2 image2 = new_image2 - image = np.concatenate([image1, image2], axis=1) - - offset = image1.shape + offset = np.array(image1.shape) + if alignment == 'horizontal': + image = np.concatenate([image1, image2], axis=1) + offset[0] = 0 + elif alignment == 'vertical': + image = np.concatenate([image1, image2], axis=0) + offset[1] = 0 + else: + mesg = ("plot_matches accepts either 'horizontal' or 'vertical' for " + "alignment, but '{}' was given. See " + "http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.plot_matches " # noqa + "for details.").format(alignment) + raise ValueError(mesg) if not only_matches: ax.scatter(keypoints1[:, 1], keypoints1[:, 0], facecolors='none', edgecolors=keypoints_color) - ax.scatter(keypoints2[:, 1] + offset[1], keypoints2[:, 0], + ax.scatter(keypoints2[:, 1] + offset[1], keypoints2[:, 0] + offset[0], facecolors='none', edgecolors=keypoints_color) ax.imshow(image, interpolation='nearest', cmap='gray') - ax.axis((0, 2 * offset[1], offset[0], 0)) + ax.axis((0, image1.shape[1] + offset[1], image1.shape[0] + offset[0], 0)) for i in range(matches.shape[0]): idx1 = matches[i, 0] @@ -119,7 +133,7 @@ color = matches_color ax.plot((keypoints1[idx1, 1], keypoints2[idx2, 1] + offset[1]), - (keypoints1[idx1, 0], keypoints2[idx2, 0]), + (keypoints1[idx1, 0], keypoints2[idx2, 0] + offset[0]), '-', color=color) diff -Nru skimage-0.13.1/skimage/filters/_frangi.py skimage-0.14.0/skimage/filters/_frangi.py --- skimage-0.13.1/skimage/filters/_frangi.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/filters/_frangi.py 2018-05-29 01:27:44.000000000 +0000 @@ -46,15 +46,13 @@ # Filtering for all sigmas for i, sigma in enumerate(sigmas): # Make 2D hessian - (Drr, Drc, Dcc) = hessian_matrix(image, sigma, order='rc') + D = hessian_matrix(image, sigma, order='rc') # Correct for scale - Drr = (sigma ** 2) * Drr - Drc = (sigma ** 2) * Drc - Dcc = (sigma ** 2) * Dcc + D = np.array(D) * (sigma ** 2) # Calculate (abs sorted) eigenvalues and vectors - (lambda1, lambda2) = hessian_matrix_eigvals(Drr, Drc, Dcc) + lambda1, lambda2 = hessian_matrix_eigvals(D) # Compute some similarity measures lambda1[lambda1 == 0] = 1e-10 @@ -80,7 +78,7 @@ whole image containing such objects. Calculates the eigenvectors of the Hessian to compute the similarity of - an image region to vessels, according to the method described in _[1]. + an image region to vessels, according to the method described in [1]_. Parameters ---------- @@ -139,7 +137,7 @@ image containing such objects. Almost equal to Frangi filter, but uses alternative method of smoothing. - Refer to _[1] to find the differences between Frangi and Hessian filters. + Refer to [1]_ to find the differences between Frangi and Hessian filters. Parameters ---------- diff -Nru skimage-0.13.1/skimage/filters/__init__.py skimage-0.14.0/skimage/filters/__init__.py --- skimage-0.13.1/skimage/filters/__init__.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/filters/__init__.py 2018-05-29 01:27:44.000000000 +0000 @@ -13,18 +13,10 @@ threshold_isodata, threshold_li, threshold_minimum, threshold_mean, threshold_triangle, threshold_niblack, threshold_sauvola, - try_all_threshold) + try_all_threshold, apply_hysteresis_threshold) from . import rank from .rank import median -from .._shared.utils import deprecated, copy_func - - -gaussian_filter = copy_func(gaussian, name='gaussian_filter') -gaussian_filter = deprecated('skimage.filters.gaussian')(gaussian_filter) -gabor_filter = copy_func(gabor, name='gabor_filter') -gabor_filter = deprecated('skimage.filters.gabor')(gabor_filter) - __all__ = ['inverse', 'wiener', 'LPIFilter2D', @@ -43,9 +35,6 @@ 'roberts_pos_diag', 'roberts_neg_diag', 'laplace', - 'denoise_tv_chambolle', - 'denoise_bilateral', - 'denoise_tv_bregman', 'rank_order', 'gabor_kernel', 'gabor', @@ -57,9 +46,11 @@ 'threshold_yen', 'threshold_isodata', 'threshold_li', + 'threshold_local', 'threshold_minimum', 'threshold_mean', 'threshold_niblack', 'threshold_sauvola', 'threshold_triangle', + 'apply_hysteresis_threshold', 'rank'] diff -Nru skimage-0.13.1/skimage/filters/rank/generic_cy.pyx skimage-0.14.0/skimage/filters/rank/generic_cy.pyx --- skimage-0.13.1/skimage/filters/rank/generic_cy.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/filters/rank/generic_cy.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -377,7 +377,7 @@ Py_ssize_t s0, Py_ssize_t s1) nogil: cdef Py_ssize_t i cdef Py_ssize_t max_i - cdef double P, mu1, mu2, q1, new_q1, sigma_b, max_sigma_b + cdef double P, mu1, mu2, q1, new_q1, sigma_b, max_sigma_b, t cdef double mu = 0. # compute local mean @@ -400,7 +400,8 @@ if new_q1 > 0: mu1 = (q1 * mu1 + i * P) / new_q1 mu2 = (mu - new_q1 * mu1) / (1. - new_q1) - sigma_b = new_q1 * (1. - new_q1) * (mu1 - mu2) ** 2 + t = mu1 - mu2 + sigma_b = new_q1 * (1. - new_q1) * (t * t) if sigma_b > max_sigma_b: max_sigma_b = sigma_b max_i = i diff -Nru skimage-0.13.1/skimage/filters/rank/generic.py skimage-0.14.0/skimage/filters/rank/generic.py --- skimage-0.13.1/skimage/filters/rank/generic.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/filters/rank/generic.py 2018-05-29 01:27:44.000000000 +0000 @@ -641,7 +641,7 @@ structuring element). Returns - Output image. + ------- out : 2-D array (same dtype as input image) The result of the local enhance_contrast. diff -Nru skimage-0.13.1/skimage/filters/rank/tests/test_rank.py skimage-0.14.0/skimage/filters/rank/tests/test_rank.py --- skimage-0.13.1/skimage/filters/rank/tests/test_rank.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/filters/rank/tests/test_rank.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,6 +1,7 @@ import os import numpy as np -from numpy.testing import run_module_suite, assert_equal, assert_raises +from skimage._shared.testing import assert_equal +from skimage._shared import testing import skimage from skimage import img_as_ubyte, img_as_float @@ -252,7 +253,8 @@ selem = disk(20) image = (np.random.rand(500, 500) * 256).astype(np.uint8) out = image - assert_raises(NotImplementedError, rank.mean, image, selem, out=out) + with testing.raises(NotImplementedError): + rank.mean(image, selem, out=out) def test_compare_autolevels(self): @@ -725,7 +727,3 @@ assert_equal(rank.median(a), rank.median(a, full_selem)) assert rank.median(a)[1, 1] == 0 assert rank.median(a, disk(1))[1, 1] == 1 - - -if __name__ == "__main__": - run_module_suite() diff -Nru skimage-0.13.1/skimage/filters/tests/test_edges.py skimage-0.14.0/skimage/filters/tests/test_edges.py --- skimage-0.13.1/skimage/filters/tests/test_edges.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/filters/tests/test_edges.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,10 +1,11 @@ import numpy as np -from numpy.testing import (assert_array_almost_equal as assert_close, - assert_, assert_allclose) - from skimage import filters from skimage.filters.edges import _mask_filter_result +from skimage._shared import testing +from skimage._shared.testing import (assert_array_almost_equal, + assert_, assert_allclose) + def test_roberts_zeros(): """Roberts' filter on an array of all zeros.""" @@ -19,7 +20,7 @@ np.tri(10, 10, -2).astype(bool).transpose()) expected = _mask_filter_result(expected, None) result = filters.roberts(image).astype(bool) - assert_close(result, expected) + assert_array_almost_equal(result, expected) def test_roberts_diagonal2(): @@ -29,7 +30,7 @@ np.tri(10, 10, -2).astype(bool).transpose()) expected = _mask_filter_result(expected, None) result = filters.roberts(image).astype(bool) - assert_close(result, expected) + assert_array_almost_equal(result, expected) def test_sobel_zeros(): @@ -363,7 +364,10 @@ assert (np.all(result == 0)) -def test_horizontal_mask_line(): +@testing.parametrize("grad_func", ( + filters.prewitt_h, filters.sobel_h, filters.scharr_h +)) +def test_horizontal_mask_line(grad_func): """Horizontal edge filters mask pixels surrounding input mask.""" vgrad, _ = np.mgrid[:1:11j, :1:11j] # vertical gradient with spacing 0.1 vgrad[5, :] = 1 # bad horizontal line @@ -375,11 +379,14 @@ expected[1:-1, 1:-1] = 0.2 # constant gradient for most of image, expected[4:7, 1:-1] = 0 # but line and neighbors masked - for grad_func in (filters.prewitt_h, filters.sobel_h, filters.scharr_h): - result = grad_func(vgrad, mask) - yield assert_close, result, expected + result = grad_func(vgrad, mask) + assert_allclose(result, expected) + -def test_vertical_mask_line(): +@testing.parametrize("grad_func", ( + filters.prewitt_v, filters.sobel_v, filters.scharr_v +)) +def test_vertical_mask_line(grad_func): """Vertical edge filters mask pixels surrounding input mask.""" _, hgrad = np.mgrid[:1:11j, :1:11j] # horizontal gradient with spacing 0.1 hgrad[:, 5] = 1 # bad vertical line @@ -391,9 +398,8 @@ expected[1:-1, 1:-1] = 0.2 # constant gradient for most of image, expected[1:-1, 4:7] = 0 # but line and neighbors masked - for grad_func in (filters.prewitt_v, filters.sobel_v, filters.scharr_v): - result = grad_func(hgrad, mask) - yield assert_close, result, expected + result = grad_func(hgrad, mask) + assert_allclose(result, expected) def test_range(): @@ -410,8 +416,3 @@ "Maximum of `{0}` is larger than 1".format( detector.__name__) ) - - -if __name__ == "__main__": - from numpy import testing - testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/filters/tests/test_frangi.py skimage-0.14.0/skimage/filters/tests/test_frangi.py --- skimage-0.13.1/skimage/filters/tests/test_frangi.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/filters/tests/test_frangi.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,9 +1,11 @@ import numpy as np -from numpy.testing import assert_equal, assert_almost_equal, assert_allclose from skimage.filters import frangi, hessian from skimage.data import camera from skimage.util import crop +from skimage._shared.testing import (assert_equal, assert_almost_equal, + assert_allclose) + def test_null_matrix(): a = np.zeros((3, 3)) @@ -29,10 +31,6 @@ def test_cropped_camera_image(): image = crop(camera(), ((206, 206), (206, 206))) assert_allclose(frangi(image), np.zeros((100, 100)), atol=1e-03) - assert_allclose(frangi(image, black_ridges=True), np.zeros((100,100)), atol=1e-03) + assert_allclose(frangi(image, black_ridges=True), + np.zeros((100, 100)), atol=1e-03) assert_allclose(hessian(image), np.ones((100, 100)), atol=1-1e-07) - - -if __name__ == "__main__": - from numpy import testing - testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/filters/tests/test_gaussian.py skimage-0.14.0/skimage/filters/tests/test_gaussian.py --- skimage-0.13.1/skimage/filters/tests/test_gaussian.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/filters/tests/test_gaussian.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,16 +1,19 @@ import numpy as np -from numpy.testing import assert_raises from skimage.filters._gaussian import gaussian +from skimage._shared import testing from skimage._shared._warnings import expected_warnings def test_negative_sigma(): a = np.zeros((3, 3)) a[1, 1] = 1. - assert_raises(ValueError, gaussian, a, sigma=-1.0) - assert_raises(ValueError, gaussian, a, sigma=[-1.0, 1.0]) - assert_raises(ValueError, gaussian, a, - sigma=np.asarray([-1.0, 1.0])) + with testing.raises(ValueError): + gaussian(a, sigma=-1.0) + with testing.raises(ValueError): + gaussian(a, sigma=[-1.0, 1.0]) + with testing.raises(ValueError): + gaussian(a, + sigma=np.asarray([-1.0, 1.0])) def test_null_sigma(): @@ -58,7 +61,3 @@ def test_preserve_range(): img = np.array([[10.0, -10.0], [-4, 3]], dtype=np.float32) gaussian(img, 1, preserve_range=True) - -if __name__ == "__main__": - from numpy import testing - testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/filters/tests/test_lpi_filter.py skimage-0.14.0/skimage/filters/tests/test_lpi_filter.py --- skimage-0.13.1/skimage/filters/tests/test_lpi_filter.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/filters/tests/test_lpi_filter.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,17 +1,19 @@ import numpy as np -from numpy.testing import (assert_raises, assert_, assert_equal, - run_module_suite) +from numpy.testing import assert_, assert_equal +from skimage._shared import testing +import unittest from skimage import data from skimage.filters import LPIFilter2D, inverse, wiener -class TestLPIFilter2D(object): +class TestLPIFilter2D(unittest.TestCase): img = data.camera()[:50, :50] def filt_func(self, r, c): return np.exp(-np.hypot(r, c) / 1) + @testing.fixture(autouse=True) def setUp(self): self.f = LPIFilter2D(self.filt_func) @@ -53,8 +55,5 @@ assert_((g - g1[::-1, ::-1]).sum() < 1) def test_non_callable(self): - assert_raises(ValueError, LPIFilter2D, None) - - -if __name__ == "__main__": - run_module_suite() + with testing.raises(ValueError): + LPIFilter2D(None) diff -Nru skimage-0.13.1/skimage/filters/tests/test_thresholding.py skimage-0.14.0/skimage/filters/tests/test_thresholding.py --- skimage-0.13.1/skimage/filters/tests/test_thresholding.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/filters/tests/test_thresholding.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,8 +1,5 @@ import numpy as np from scipy import ndimage as ndi -from numpy.testing import (assert_equal, - assert_almost_equal, - assert_raises) import skimage from skimage import data @@ -19,6 +16,8 @@ threshold_triangle, threshold_minimum, _mean_std) +from skimage._shared import testing +from skimage._shared.testing import assert_equal, assert_almost_equal class TestSimpleImage(): @@ -52,7 +51,8 @@ assert 2 <= threshold_li(image) < 3 def test_li_constant_image(self): - assert_raises(ValueError, threshold_li, np.ones((10,10))) + with testing.raises(ValueError): + threshold_li(np.ones((10, 10))) def test_yen(self): assert threshold_yen(self.image) == 2 @@ -216,7 +216,8 @@ def test_otsu_one_color_image(): img = np.ones((10, 10), dtype=np.uint8) - assert_raises(ValueError, threshold_otsu, img) + with testing.raises(ValueError): + threshold_otsu(img) def test_li_camera_image(): @@ -256,7 +257,8 @@ def test_adaptive_even_block_size_error(): img = data.camera() - assert_raises(ValueError, threshold_local, img, block_size=4) + with testing.raises(ValueError): + threshold_local(img, block_size=4) def test_isodata_camera_image(): @@ -343,9 +345,11 @@ threshold = threshold_minimum(img) assert_equal(threshold, 95) + def test_threshold_minimum_failure(): img = np.zeros((16*16), dtype=np.uint8) - assert_raises(RuntimeError, threshold_minimum, img) + with testing.raises(RuntimeError): + threshold_minimum(img) def test_mean(): @@ -419,5 +423,11 @@ np.testing.assert_allclose(s, expected_s) -if __name__ == '__main__': - np.testing.run_module_suite() +def test_niblack_sauvola_pathological_image(): + # For certain values, floating point error can cause + # E(X^2) - (E(X))^2 to be negative, and taking the square root of this + # resulted in NaNs. Here we check that these are safely caught. + # see https://github.com/scikit-image/scikit-image/issues/3007 + value = 0.03082192 + 2.19178082e-09 + src_img = np.full((4, 4), value).astype(np.float64) + assert not np.any(np.isnan(threshold_niblack(src_img))) diff -Nru skimage-0.13.1/skimage/filters/thresholding.py skimage-0.14.0/skimage/filters/thresholding.py --- skimage-0.13.1/skimage/filters/thresholding.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/filters/thresholding.py 2018-05-29 01:27:44.000000000 +0000 @@ -2,13 +2,11 @@ import math import numpy as np from scipy import ndimage as ndi -from scipy.ndimage import filters as ndif from collections import OrderedDict from ..exposure import histogram from .._shared.utils import assert_nD, warn, deprecated from ..transform import integral_image -from .. import util -from skimage import dtype_limits +from ..util import crop, dtype_limits __all__ = ['try_all_threshold', @@ -17,11 +15,13 @@ 'threshold_yen', 'threshold_isodata', 'threshold_li', + 'threshold_local', 'threshold_minimum', 'threshold_mean', 'threshold_niblack', 'threshold_sauvola', - 'threshold_triangle'] + 'threshold_triangle', + 'apply_hysteresis_threshold'] def _try_all(image, methods=None, figsize=None, num_cols=2, verbose=True): @@ -51,8 +51,7 @@ num_rows = math.ceil((len(methods) + 1.) / num_cols) num_rows = int(num_rows) # Python 2.7 support fig, ax = plt.subplots(num_rows, num_cols, figsize=figsize, - sharex=True, sharey=True, - subplot_kw={'adjustable': 'box-forced'}) + sharex=True, sharey=True) ax = ax.ravel() ax[0].imshow(image, cmap=plt.cm.gray) @@ -354,10 +353,10 @@ """Return threshold value(s) based on ISODATA method. Histogram-based threshold, known as Ridler-Calvard method or inter-means. - Threshold values returned satisfy the following equality: + Threshold values returned satisfy the following equality:: - `threshold = (image[image <= threshold].mean() +` - `image[image > threshold].mean()) / 2.0` + threshold = (image[image <= threshold].mean() + + image[image > threshold].mean()) / 2.0 That is, returned thresholds are intensities that separate the image into two groups of pixels, where the threshold intensity is midway between the @@ -601,7 +600,7 @@ smooth_hist = np.copy(hist).astype(np.float64) for counter in range(max_iter): - smooth_hist = ndif.uniform_filter1d(smooth_hist, 3) + smooth_hist = ndi.uniform_filter1d(smooth_hist, 3) maximum_idxs = find_local_maxima_idx(smooth_hist) if len(maximum_idxs) < 3: break @@ -771,10 +770,12 @@ kern[indices] = (-1) ** (image.ndim % 2 != np.sum(indices) % 2) sum_full = ndi.correlate(integral, kern, mode='constant') - m = util.crop(sum_full, (left_pad, right_pad)) / (w ** image.ndim) + m = crop(sum_full, (left_pad, right_pad)) / (w ** image.ndim) sum_sq_full = ndi.correlate(integral_sq, kern, mode='constant') - g2 = util.crop(sum_sq_full, (left_pad, right_pad)) / (w ** image.ndim) - s = np.sqrt(g2 - m * m) + g2 = crop(sum_sq_full, (left_pad, right_pad)) / (w ** image.ndim) + # Note: we use np.clip because g2 is not guaranteed to be greater than + # m*m when floating point error is considered + s = np.sqrt(np.clip(g2 - m * m, 0, None)) return m, s @@ -782,9 +783,9 @@ """Applies Niblack local threshold to an array. A threshold T is calculated for every pixel in the image using the - following formula: + following formula:: - T = m(x,y) - k * s(x,y) + T = m(x,y) - k * s(x,y) where m(x,y) and s(x,y) are the mean and standard deviation of pixel (x,y) neighborhood defined by a rectangular window with size w @@ -830,9 +831,9 @@ modification of Niblack technique. In the original method a threshold T is calculated for every pixel - in the image using the following formula: + in the image using the following formula:: - T = m(x,y) * (1 + k * ((s(x,y) / R) - 1)) + T = m(x,y) * (1 + k * ((s(x,y) / R) - 1)) where m(x,y) and s(x,y) are the mean and standard deviation of pixel (x,y) neighborhood defined by a rectangular window with size w @@ -873,11 +874,58 @@ -------- >>> from skimage import data >>> image = data.page() - >>> binary_sauvola = threshold_sauvola(image, - ... window_size=15, k=0.2) + >>> t_sauvola = threshold_sauvola(image, window_size=15, k=0.2) + >>> binary_image = image > t_sauvola """ if r is None: imin, imax = dtype_limits(image, clip_negative=False) r = 0.5 * (imax - imin) m, s = _mean_std(image, window_size) return m * (1 + k * ((s / r) - 1)) + + +def apply_hysteresis_threshold(image, low, high): + """Apply hysteresis thresholding to `image`. + + This algorithm finds regions where `image` is greater than `high` + OR `image` is greater than `low` *and* that region is connected to + a region greater than `high`. + + Parameters + ---------- + image : array, shape (M,[ N, ..., P]) + Grayscale input image. + low : float, or array of same shape as `image` + Lower threshold. + high : float, or array of same shape as `image` + Higher threshold. + + Returns + ------- + thresholded : array of bool, same shape as `image` + Array in which `True` indicates the locations where `image` + was above the hysteresis threshold. + + Examples + -------- + >>> image = np.array([1, 2, 3, 2, 1, 2, 1, 3, 2]) + >>> apply_hysteresis_threshold(image, 1.5, 2.5).astype(int) + array([0, 1, 1, 1, 0, 0, 0, 1, 1]) + + References + ---------- + .. [1] J. Canny. A computational approach to edge detection. + IEEE Transactions on Pattern Analysis and Machine Intelligence. + 1986; vol. 8, pp.679-698. + DOI: 10.1109/TPAMI.1986.4767851 + """ + low = np.clip(low, a_min=None, a_max=high) # ensure low always below high + mask_low = image > low + mask_high = image > high + # Connected components of mask_low + labels_low, num_labels = ndi.label(mask_low) + # Check which connected components contain pixels from mask_high + sums = ndi.sum(mask_high, labels_low, np.arange(num_labels + 1)) + connected_to_high = sums > 0 + thresholded = connected_to_high[labels_low] + return thresholded diff -Nru skimage-0.13.1/skimage/future/graph/graph_cut.py skimage-0.14.0/skimage/future/graph/graph_cut.py --- skimage-0.13.1/skimage/future/graph/graph_cut.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/future/graph/graph_cut.py 2018-05-29 01:27:44.000000000 +0000 @@ -111,7 +111,7 @@ >>> from skimage import data, segmentation >>> from skimage.future import graph >>> img = data.astronaut() - >>> labels = segmentation.slic(img, compactness=30, n_segments=400) + >>> labels = segmentation.slic(img) >>> rag = graph.rag_mean_color(img, labels, mode='similarity') >>> new_labels = graph.cut_normalized(labels, rag) diff -Nru skimage-0.13.1/skimage/future/graph/rag.py skimage-0.14.0/skimage/future/graph/rag.py --- skimage-0.13.1/skimage/future/graph/rag.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/future/graph/rag.py 2018-05-29 01:27:44.000000000 +0000 @@ -5,9 +5,6 @@ from scipy import sparse import math from ... import measure, segmentation, util, color -from matplotlib import colors, cm -from matplotlib import pyplot as plt -from matplotlib.collections import LineCollection def _edge_generator_from_csr(csr_matrix): @@ -111,7 +108,7 @@ """ The Region Adjacency Graph (RAG) of an image, subclasses - `networx.Graph `_ + `networx.Graph `_ Parameters ---------- @@ -450,7 +447,7 @@ return rag -def show_rag(labels, rag, img, border_color='black', edge_width=1.5, +def show_rag(labels, rag, image, border_color='black', edge_width=1.5, edge_cmap='magma', img_cmap='bone', in_place=True, ax=None): """Show a Region Adjacency Graph on an image. @@ -464,7 +461,7 @@ The labelled image. rag : RAG The Region Adjacency Graph. - img : ndarray, shape (M, N[, 3]) + image : ndarray, shape (M, N[, 3]) Input image. If `colormap` is `None`, the image should be in RGB format. border_color : color spec, optional @@ -493,29 +490,34 @@ -------- >>> from skimage import data, segmentation >>> from skimage.future import graph + >>> import matplotlib.pyplot as plt + >>> >>> img = data.coffee() >>> labels = segmentation.slic(img) >>> g = graph.rag_mean_color(img, labels) >>> lc = graph.show_rag(labels, g, img) >>> cbar = plt.colorbar(lc) """ + from matplotlib import colors, cm + from matplotlib import pyplot as plt + from matplotlib.collections import LineCollection if not in_place: rag = rag.copy() if ax is None: fig, ax = plt.subplots() - out = util.img_as_float(img, force_copy=True) + out = util.img_as_float(image, force_copy=True) if img_cmap is None: - if img.ndim < 3 or img.shape[2] not in [3, 4]: + if image.ndim < 3 or image.shape[2] not in [3, 4]: msg = 'If colormap is `None`, an RGB or RGBA image should be given' raise ValueError(msg) # Ignore the alpha channel - out = img[:, :, :3] + out = image[:, :, :3] else: img_cmap = cm.get_cmap(img_cmap) - out = color.rgb2gray(img) + out = color.rgb2gray(image) # Ignore the alpha channel out = img_cmap(out)[:, :, :3] diff -Nru skimage-0.13.1/skimage/future/graph/tests/test_rag.py skimage-0.14.0/skimage/future/graph/tests/test_rag.py --- skimage-0.13.1/skimage/future/graph/tests/test_rag.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/future/graph/tests/test_rag.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,9 +1,8 @@ import numpy as np from skimage.future import graph from skimage._shared.version_requirements import is_installed -from numpy.testing.decorators import skipif from skimage import segmentation -from numpy import testing +from skimage._shared import testing def max_edge(g, src, dst, n): @@ -13,7 +12,8 @@ return {'weight': max(w1, w2)} -@skipif(not is_installed('networkx')) +@testing.skipif(not is_installed('networkx'), + reason="networkx not installed") def test_rag_merge(): g = graph.rag.RAG() @@ -48,7 +48,8 @@ assert list(g.edges()) == [] -@skipif(not is_installed('networkx')) +@testing.skipif(not is_installed('networkx'), + reason="networkx not installed") def test_threshold_cut(): img = np.zeros((100, 100, 3), dtype='uint8') @@ -73,7 +74,8 @@ assert new_labels.max() == 1 -@skipif(not is_installed('networkx')) +@testing.skipif(not is_installed('networkx'), + reason="networkx not installed") def test_cut_normalized(): img = np.zeros((100, 100, 3), dtype='uint8') @@ -100,14 +102,16 @@ assert new_labels.max() == 1 -@skipif(not is_installed('networkx')) +@testing.skipif(not is_installed('networkx'), + reason="networkx not installed") def test_rag_error(): img = np.zeros((10, 10, 3), dtype='uint8') labels = np.zeros((10, 10), dtype='uint8') labels[:5, :] = 0 labels[5:, :] = 1 - testing.assert_raises(ValueError, graph.rag_mean_color, img, labels, - 2, 'non existant mode') + with testing.raises(ValueError): + graph.rag_mean_color(img, labels, + 2, 'non existant mode') def _weight_mean_color(graph, src, dst, n): @@ -130,7 +134,8 @@ _weight_mean_color) -@skipif(not is_installed('networkx')) +@testing.skipif(not is_installed('networkx'), + reason="networkx not installed") def test_rag_hierarchical(): img = np.zeros((8, 8, 3), dtype='uint8') labels = np.zeros((8, 8), dtype='uint8') @@ -161,7 +166,8 @@ assert np.all(result == result[0, 0]) -@skipif(not is_installed('networkx')) +@testing.skipif(not is_installed('networkx'), + reason="networkx not installed") def test_ncut_stable_subgraph(): """ Test to catch an error thrown when subgraph has all equal edges. """ diff -Nru skimage-0.13.1/skimage/future/__init__.py skimage-0.14.0/skimage/future/__init__.py --- skimage-0.13.1/skimage/future/__init__.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/future/__init__.py 2018-05-29 01:27:44.000000000 +0000 @@ -6,5 +6,7 @@ """ from . import graph +from .manual_segmentation import manual_polygon_segmentation, manual_lasso_segmentation -__all__ = ['graph'] + +__all__ = ['graph', 'manual_lasso_segmentation', 'manual_polygon_segmentation'] diff -Nru skimage-0.13.1/skimage/future/manual_segmentation.py skimage-0.14.0/skimage/future/manual_segmentation.py --- skimage-0.13.1/skimage/future/manual_segmentation.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/skimage/future/manual_segmentation.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,217 @@ +from functools import reduce +import numpy as np +import matplotlib +import matplotlib.pyplot as plt +from matplotlib.patches import Polygon +from matplotlib.collections import PatchCollection +from ..draw import polygon + + +LEFT_CLICK = 1 +RIGHT_CLICK = 3 + + +def _mask_from_vertices(vertices, shape, label): + mask = np.zeros(shape, dtype=int) + pr = [y for x, y in vertices] + pc = [x for x, y in vertices] + rr, cc = polygon(pr, pc, shape) + mask[rr, cc] = label + return mask + + +def _draw_polygon(ax, vertices, alpha=0.4): + polygon = Polygon(vertices, closed=True) + p = PatchCollection([polygon], match_original=True, alpha=alpha) + polygon_object = ax.add_collection(p) + plt.draw() + return polygon_object + + +def manual_polygon_segmentation(image, alpha=0.4, return_all=False): + """Return a label image based on polygon selections made with the mouse. + + Parameters + ---------- + image : (M, N[, 3]) array + Grayscale or RGB image. + + alpha : float, optional + Transparency value for polygons drawn over the image. + + return_all : bool, optional + If True, an array containing each separate polygon drawn is returned. + (The polygons may overlap.) If False (default), latter polygons + "overwrite" earlier ones where they overlap. + + Returns + ------- + labels : array of int, shape ([Q, ]M, N) + The segmented regions. If mode is `'separate'`, the leading dimension + of the array corresponds to the number of regions that the user drew. + + Notes + ----- + Use left click to select the vertices of the polygon + and right click to confirm the selection once all vertices are selected. + + Examples + -------- + >>> from skimage import data, future, io + >>> camera = data.camera() + >>> mask = future.manual_polygon_segmentation(camera) # doctest: +SKIP + >>> io.imshow(mask) # doctest: +SKIP + >>> io.show() # doctest: +SKIP + """ + list_of_vertex_lists = [] + polygons_drawn = [] + + temp_list = [] + preview_polygon_drawn = [] + + if image.ndim not in (2, 3): + raise ValueError('Only 2D grayscale or RGB images are supported.') + + fig, ax = plt.subplots() + fig.subplots_adjust(bottom=0.2) + ax.imshow(image, cmap="gray") + ax.set_axis_off() + + def _undo(*args, **kwargs): + if list_of_vertex_lists: + list_of_vertex_lists.pop() + # Remove last polygon from list of polygons... + last_poly = polygons_drawn.pop() + # ... then from the plot + last_poly.remove() + fig.canvas.draw_idle() + + undo_pos = fig.add_axes([0.85, 0.05, 0.075, 0.075]) + undo_button = matplotlib.widgets.Button(undo_pos, u'\u27F2') + undo_button.on_clicked(_undo) + + def _extend_polygon(event): + # Do not record click events outside axis or in undo button + if event.inaxes is None or event.inaxes is undo_pos: + return + # Do not record click events when toolbar is active + if fig.canvas.manager.toolbar._active is not None: + return + + if event.button == LEFT_CLICK: # Select vertex + temp_list.append([event.xdata, event.ydata]) + # Remove previously drawn preview polygon if any. + if preview_polygon_drawn: + poly = preview_polygon_drawn.pop() + poly.remove() + + # Preview polygon with selected vertices. + polygon = _draw_polygon(ax, temp_list, alpha=(alpha / 1.4)) + preview_polygon_drawn.append(polygon) + + elif event.button == RIGHT_CLICK: # Confirm the selection + if not temp_list: + return + + # Store the vertices of the polygon as shown in preview. + # Redraw polygon and store it in polygons_drawn so that + # `_undo` works correctly. + list_of_vertex_lists.append(temp_list[:]) + polygon_object = _draw_polygon(ax, temp_list, alpha=alpha) + polygons_drawn.append(polygon_object) + + # Empty the temporary variables. + preview_poly = preview_polygon_drawn.pop() + preview_poly.remove() + del temp_list[:] + + plt.draw() + + fig.canvas.mpl_connect('button_press_event', _extend_polygon) + + plt.show(block=True) + + labels = (_mask_from_vertices(vertices, image.shape[:2], i) + for i, vertices in enumerate(list_of_vertex_lists, start=1)) + if return_all: + return np.stack(labels) + else: + return reduce(np.maximum, labels, np.broadcast_to(0, image.shape[:2])) + + +def manual_lasso_segmentation(image, alpha=0.4, return_all=False): + """Return a label image based on freeform selections made with the mouse. + + Parameters + ---------- + image : (M, N[, 3]) array + Grayscale or RGB image. + + alpha : float, optional + Transparency value for polygons drawn over the image. + + return_all : bool, optional + If True, an array containing each separate polygon drawn is returned. + (The polygons may overlap.) If False (default), latter polygons + "overwrite" earlier ones where they overlap. + + Returns + ------- + labels : array of int, shape ([Q, ]M, N) + The segmented regions. If mode is `'separate'`, the leading dimension + of the array corresponds to the number of regions that the user drew. + + Notes + ----- + Press and hold the left mouse button to draw around each object. + + Examples + -------- + >>> from skimage import data, future, io + >>> camera = data.camera() + >>> mask = future.manual_lasso_segmentation(camera) # doctest: +SKIP + >>> io.imshow(mask) # doctest: +SKIP + >>> io.show() # doctest: +SKIP + """ + list_of_vertex_lists = [] + polygons_drawn = [] + + if image.ndim not in (2, 3): + raise ValueError('Only 2D grayscale or RGB images are supported.') + + fig, ax = plt.subplots() + fig.subplots_adjust(bottom=0.2) + ax.imshow(image, cmap="gray") + ax.set_axis_off() + + def _undo(*args, **kwargs): + if list_of_vertex_lists: + list_of_vertex_lists.pop() + # Remove last polygon from list of polygons... + last_poly = polygons_drawn.pop() + # ... then from the plot + last_poly.remove() + fig.canvas.draw_idle() + + undo_pos = fig.add_axes([0.85, 0.05, 0.075, 0.075]) + undo_button = matplotlib.widgets.Button(undo_pos, u'\u27F2') + undo_button.on_clicked(_undo) + + def _on_lasso_selection(vertices): + if len(vertices) < 3: + return + list_of_vertex_lists.append(vertices) + polygon_object = _draw_polygon(ax, vertices, alpha=alpha) + polygons_drawn.append(polygon_object) + plt.draw() + + lasso = matplotlib.widgets.LassoSelector(ax, _on_lasso_selection) + + plt.show(block=True) + + labels = (_mask_from_vertices(vertices, image.shape[:2], i) + for i, vertices in enumerate(list_of_vertex_lists, start=1)) + if return_all: + return np.stack(labels) + else: + return reduce(np.maximum, labels, np.broadcast_to(0, image.shape[:2])) diff -Nru skimage-0.13.1/skimage/graph/tests/test_anisotropy.py skimage-0.14.0/skimage/graph/tests/test_anisotropy.py --- skimage-0.13.1/skimage/graph/tests/test_anisotropy.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/graph/tests/test_anisotropy.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,12 +1,10 @@ -import skimage.graph.mcp as mcp -from numpy.testing import (assert_array_equal, - assert_almost_equal, - ) - import numpy as np +import skimage.graph.mcp as mcp + +from skimage._shared.testing import assert_array_equal -a = np.ones((8, 8), dtype=np.float32) +a = np.ones((8, 8), dtype=np.float32) horizontal_ramp = np.array([[ 0., 1., 2., 3., 4., 5., 6., 7.,], [ 0., 1., 2., 3., 4., 5., 6., 7.,], @@ -28,14 +26,12 @@ def test_anisotropy(): - # Create seeds; vertical seeds create a horizonral ramp - seeds_for_horizontal = [(i, 0) for i in range(8) ] - seeds_for_vertcal = [(0, i) for i in range(8) ] - - + seeds_for_horizontal = [(i, 0) for i in range(8)] + seeds_for_vertcal = [(0, i) for i in range(8)] + for sy in range(1, 5): - for sx in range(1,5): + for sx in range(1, 5): sampling = sy, sx # Trace horizontally m1 = mcp.MCP_Geometric(a, sampling=sampling, fully_connected=True) @@ -47,7 +43,3 @@ # Check assert_array_equal(costs1, horizontal_ramp * sx) assert_array_equal(costs2, vertical_ramp * sy) - - -if __name__ == "__main__": - np.testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/graph/tests/test_connect.py skimage-0.14.0/skimage/graph/tests/test_connect.py --- skimage-0.13.1/skimage/graph/tests/test_connect.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/graph/tests/test_connect.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,17 +1,15 @@ +import numpy as np import skimage.graph.mcp as mcp # import stentseg.graph._mcp as mcp -from numpy.testing import (assert_array_equal, - assert_almost_equal, - ) - -import numpy as np +from skimage._shared.testing import assert_array_equal -a = np.ones((8, 8), dtype=np.float32) +a = np.ones((8, 8), dtype=np.float32) count = 0 + + class MCP(mcp.MCP_Connect): - def _reset(self): """ Reset the id map. """ @@ -19,7 +17,6 @@ self._conn = {} self._bestconn = {} - def create_connection(self, id1, id2, pos1, pos2, cost1, cost2): # Process data hash = min(id1, id2), max(id1, id2) @@ -34,10 +31,9 @@ def test_connections(): - # Create MCP object with three seed points mcp = MCP(a) - costs, traceback = mcp.find_costs([ (1,1), (7,7), (1,7) ]) + costs, traceback = mcp.find_costs([(1, 1), (7, 7), (1, 7)]) # Test that all three seed points are connected connections = set(mcp._conn.keys()) @@ -52,32 +48,28 @@ assert n1 == n2 # For seed 0 and 1 - cost, pos1, pos2 = mcp._bestconn[(0,1)] + cost, pos1, pos2 = mcp._bestconn[(0, 1)] # Test meeting points - assert (pos1, pos2) == ( (3,3), (4,4) ) + assert (pos1, pos2) == ((3, 3), (4, 4)) # Test the whole path path = mcp.traceback(pos1) + list(reversed(mcp.traceback(pos2))) - assert_array_equal(path, - [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7)]) + assert_array_equal( + path, [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7)]) # For seed 1 and 2 - cost, pos1, pos2 = mcp._bestconn[(1,2)] + cost, pos1, pos2 = mcp._bestconn[(1, 2)] # Test meeting points - assert (pos1, pos2) == ( (3,7), (4,7) ) + assert (pos1, pos2) == ((3, 7), (4, 7)) # Test the whole path path = mcp.traceback(pos1) + list(reversed(mcp.traceback(pos2))) - assert_array_equal(path, - [(1, 7), (2, 7), (3, 7), (4, 7), (5, 7), (6, 7), (7, 7)]) + assert_array_equal( + path, [(1, 7), (2, 7), (3, 7), (4, 7), (5, 7), (6, 7), (7, 7)]) # For seed 0 and 2 - cost, pos1, pos2 = mcp._bestconn[(0,2)] + cost, pos1, pos2 = mcp._bestconn[(0, 2)] # Test meeting points - assert (pos1, pos2) == ( (1,3), (1,4) ) + assert (pos1, pos2) == ((1, 3), (1, 4)) # Test the whole path path = mcp.traceback(pos1) + list(reversed(mcp.traceback(pos2))) - assert_array_equal(path, - [(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7)]) - - -if __name__ == "__main__": - np.testing.run_module_suite() \ No newline at end of file + assert_array_equal( + path, [(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7)]) diff -Nru skimage-0.13.1/skimage/graph/tests/test_flexible.py skimage-0.14.0/skimage/graph/tests/test_flexible.py --- skimage-0.13.1/skimage/graph/tests/test_flexible.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/graph/tests/test_flexible.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,9 +1,8 @@ -import skimage.graph.mcp as mcp -from numpy.testing import (assert_array_equal, - assert_almost_equal, - ) - import numpy as np +import skimage.graph.mcp as mcp + +from skimage._shared.testing import assert_array_equal + a = np.ones((8, 8), dtype=np.float32) a[1::2] *= 2.0 @@ -32,30 +31,23 @@ pass # We do not test this def update_node(self, index, new_index, offset_length): - self._distance[new_index] = self._distance[index] + 1 + self._distance[new_index] = self._distance[index] + 1 def test_flexible(): - # Create MCP and do a traceback mcp = FlexibleMCP(a) costs, traceback = mcp.find_costs([(0, 0)]) # Check that inner part is correct. This basically # tests whether travel_cost works. - assert_array_equal(costs[:4,:4], [[1, 2, 3, 4], - [2, 2, 3, 4], - [3, 3, 3, 4], - [4, 4, 4, 4]]) + assert_array_equal(costs[:4, :4], [[1, 2, 3, 4], + [2, 2, 3, 4], + [3, 3, 3, 4], + [4, 4, 4, 4]]) # Test that the algorithm stopped at the right distance. # Note that some of the costs are filled in but not yet frozen, # so we take a bit of margin - assert np.all( costs[-2:,:] == np.inf ) - assert np.all( costs[:,-2:] == np.inf ) - - #print(costs) - - -if __name__ == "__main__": - np.testing.run_module_suite() + assert np.all(costs[-2:, :] == np.inf) + assert np.all(costs[:, -2:] == np.inf) diff -Nru skimage-0.13.1/skimage/graph/tests/test_heap.py skimage-0.14.0/skimage/graph/tests/test_heap.py --- skimage-0.13.1/skimage/graph/tests/test_heap.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/graph/tests/test_heap.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,6 +1,7 @@ import time import random import skimage.graph.heap as heap + from skimage._shared.testing import test_parallel @@ -47,7 +48,3 @@ assert(b[i] >= b[i - 1]) return t1 - t0 - -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() diff -Nru skimage-0.13.1/skimage/graph/tests/test_mcp.py skimage-0.14.0/skimage/graph/tests/test_mcp.py --- skimage-0.13.1/skimage/graph/tests/test_mcp.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/graph/tests/test_mcp.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,11 +1,11 @@ import numpy as np -from numpy.testing import (assert_array_equal, - assert_almost_equal, - ) - import skimage.graph.mcp as mcp + +from skimage._shared.testing import (assert_array_equal, assert_almost_equal, + parametrize) from skimage._shared._warnings import expected_warnings + np.random.seed(0) a = np.ones((8, 8), dtype=np.float32) a[1:-1, 1] = 0 @@ -13,6 +13,7 @@ warning_optional = r'|\A\Z' + def test_basic(): with expected_warnings(['Upgrading NumPy' + warning_optional]): m = mcp.MCP(a, fully_connected=True) @@ -135,9 +136,9 @@ [10, 0, 1, 2, 3, 4, 5, 6]]) -def test_crashing(): - for shape in [(100, 100), (5, 8, 13, 17)] * 5: - yield _test_random, shape +@parametrize("shape", [(100, 100), (5, 8, 13, 17)] * 5) +def test_crashing(shape): + _test_random(shape) def _test_random(shape): @@ -158,8 +159,3 @@ for end in ends: m.traceback(end) return a, costs, offsets - - - -if __name__ == "__main__": - np.testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/graph/tests/test_spath.py skimage-0.14.0/skimage/graph/tests/test_spath.py --- skimage-0.13.1/skimage/graph/tests/test_spath.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/graph/tests/test_spath.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,8 +1,8 @@ import numpy as np -from numpy.testing import assert_equal, assert_array_equal - import skimage.graph.spath as spath +from skimage._shared.testing import assert_equal, assert_array_equal + def test_basic(): x = np.array([[1, 1, 3], @@ -30,7 +30,3 @@ path, cost = spath.shortest_path(x, reach=2) assert_array_equal(path, [2, 1, 1, 2, 3, 3, 2]) assert_equal(cost, 0) - - -if __name__ == "__main__": - np.testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/__init__.py skimage-0.14.0/skimage/__init__.py --- skimage-0.13.1/skimage/__init__.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/__init__.py 2018-05-29 01:27:44.000000000 +0000 @@ -65,34 +65,33 @@ pkg_dir = osp.abspath(osp.dirname(__file__)) data_dir = osp.join(pkg_dir, 'data') -__version__ = '0.13.1' +__version__ = '0.14.0' try: - imp.find_module('nose') + imp.find_module('pytest') except ImportError: def _test(doctest=False, verbose=False): - """This would run all unit tests, but nose couldn't be + """This would run all unit tests, but pytest couldn't be imported so the test suite can not run. """ - raise ImportError("Could not load nose. Unit tests not available.") + raise ImportError("Could not load pytest. Unit tests not available.") else: def _test(doctest=False, verbose=False): """Run all unit tests.""" - import nose + import pytest import warnings - args = ['', pkg_dir, '--exe', '--ignore-files=^_test'] + args = ['skimage'] if verbose: args.extend(['-v', '-s']) if doctest: - args.extend(['--with-doctest', '--ignore-files=^\.', - '--ignore-files=^setup\.py$$', '--ignore-files=test']) + args.extend(['--doctest-modules']) # Make sure warnings do not break the doc tests with warnings.catch_warnings(): warnings.simplefilter("ignore") - success = nose.run('skimage', argv=args) + success = pytest.main(args) else: - success = nose.run('skimage', argv=args) + success = pytest.main(args) # Return sys.exit code if success: return 0 @@ -158,4 +157,18 @@ from .util.dtype import * +def lookfor(what): + """Do a keyword search on scikit-image docstrings. + + Parameters + ---------- + what : str + Words to look for. + + """ + import numpy as np + import sys + return np.lookfor(what, sys.modules[__name__]) + + del warnings, functools, osp, imp, sys diff -Nru skimage-0.13.1/skimage/io/__init__.py skimage-0.14.0/skimage/io/__init__.py --- skimage-0.13.1/skimage/io/__init__.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/__init__.py 2018-05-29 01:27:44.000000000 +0000 @@ -58,5 +58,6 @@ return doc + if __doc__ is not None: __doc__ = _update_doc(__doc__) diff -Nru skimage-0.13.1/skimage/io/_io.py skimage-0.14.0/skimage/io/_io.py --- skimage-0.13.1/skimage/io/_io.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/_io.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,20 +1,18 @@ -from io import BytesIO - import numpy as np import six from ..io.manage_plugins import call_plugin -from ..color import rgb2grey +from ..color import rgb2gray from .util import file_or_url_context from ..exposure import is_low_contrast -from .._shared.utils import all_warnings, warn +from .._shared.utils import warn __all__ = ['imread', 'imsave', 'imshow', 'show', 'imread_collection', 'imshow_collection'] -def imread(fname, as_grey=False, plugin=None, flatten=None, +def imread(fname, as_gray=False, plugin=None, flatten=None, **plugin_args): """Load an image from file. @@ -22,10 +20,10 @@ ---------- fname : string Image file name, e.g. ``test.jpg`` or URL. - as_grey : bool - If True, convert color images to grey-scale (64-bit floats). - Images that are already in grey-scale format are not converted. - plugin : str + as_gray : bool, optional + If True, convert color images to gray-scale (64-bit floats). + Images that are already in gray-scale format are not converted. + plugin : str, optional Name of plugin to use. By default, the different plugins are tried (starting with the Python Imaging Library) until a suitable candidate is found. If not given and fname is a tiff file, the @@ -36,22 +34,25 @@ plugin_args : keywords Passed to the given plugin. flatten : bool - Backward compatible keyword, superseded by `as_grey`. - - plugin_args : keywords - Passed to the given plugin. + Backward compatible keyword, superseded by `as_gray`. Returns ------- img_array : ndarray - The different colour bands/channels are stored in the - third dimension, such that a grey-image is MxN, an + The different color bands/channels are stored in the + third dimension, such that a gray-image is MxN, an RGB-image MxNx3 and an RGBA-image MxNx4. """ + if 'as_grey' in plugin_args.keys(): + as_gray = plugin_args.pop('as_grey', as_gray) + warn('`as_grey` has been deprecated in favor of `as_gray`') + # Backward compatibility if flatten is not None: - as_grey = flatten + as_gray = flatten + warn('`flatten` has been deprecated in favor of `as_gray`' + ' and will be removed in v0.16.') if plugin is None and hasattr(fname, 'lower'): if fname.lower().endswith(('.tiff', '.tif')): @@ -68,8 +69,8 @@ img = np.swapaxes(img, -1, -3) img = np.swapaxes(img, -2, -3) - if as_grey: - img = rgb2grey(img) + if as_gray: + img = rgb2gray(img) return img @@ -124,12 +125,21 @@ plugin_args : keywords Passed to the given plugin. + Notes + ----- + When saving a JPEG, the compression ratio may be controlled using the + ``quality`` keyword argument which is an integer with values in [1, 100] + where 1 is worst quality and smallest file size, and 100 is best quality + and largest file size (default 75). This is only available when using + the PIL and imageio plugins. """ if plugin is None and hasattr(fname, 'lower'): if fname.lower().endswith(('.tiff', '.tif')): plugin = 'tifffile' if is_low_contrast(arr): warn('%s is a low contrast image' % fname) + if arr.dtype == bool: + warn('%s is a boolean image: setting True to 1 and False to 0' % fname) return call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args) diff -Nru skimage-0.13.1/skimage/io/manage_plugins.py skimage-0.14.0/skimage/io/manage_plugins.py --- skimage-0.13.1/skimage/io/manage_plugins.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/manage_plugins.py 2018-05-29 01:27:44.000000000 +0000 @@ -45,7 +45,7 @@ # the following preferences. preferred_plugins = { # Default plugins for all types (overridden by specific types below). - 'all': ['pil', 'matplotlib', 'qt', 'freeimage'], + 'all': ['pil', 'matplotlib', 'qt'], 'imshow': ['matplotlib'], 'imshow_collection': ['matplotlib'] } @@ -62,6 +62,8 @@ 'imread_collection': [], 'imshow_collection': [], '_app_show': []} + + _clear_plugins() @@ -122,7 +124,7 @@ valid_provides = [p for p in provides if p in plugin_store] for p in provides: - if not p in plugin_store: + if p not in plugin_store: print("Plugin `%s` wants to provide non-existent `%s`." " Ignoring." % (name, p)) @@ -136,6 +138,7 @@ plugin_module_name[name] = os.path.basename(filename)[:-4] + _scan_plugins() @@ -186,7 +189,7 @@ Passed to the plugin function. """ - if not kind in plugin_store: + if kind not in plugin_store: raise ValueError('Invalid function (%s) requested.' % kind) plugin_funcs = plugin_store[kind] @@ -243,7 +246,7 @@ if kind is None: kind = plugin_store.keys() else: - if not kind in plugin_provides[name]: + if kind not in plugin_provides[name]: raise RuntimeError("Plugin %s does not support `%s`." % (name, kind)) @@ -255,7 +258,7 @@ _load(name) for k in kind: - if not k in plugin_store: + if k not in plugin_store: raise RuntimeError("'%s' is not a known plugin function." % k) funcs = plugin_store[k] @@ -291,7 +294,7 @@ """ if plugin in find_available_plugins(loaded=True): return - if not plugin in plugin_module_name: + if plugin not in plugin_module_name: raise ValueError("Plugin %s not found." % plugin) else: modname = plugin_module_name[plugin] diff -Nru skimage-0.13.1/skimage/io/_plugins/_colormixer.pyx skimage-0.14.0/skimage/io/_plugins/_colormixer.pyx --- skimage-0.13.1/skimage/io/_plugins/_colormixer.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/_plugins/_colormixer.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -3,7 +3,7 @@ #cython: nonecheck=False #cython: wraparound=False -"""Colour Mixer +"""Color Mixer NumPy does not do overflow checking when adding or multiplying integers, so currently the only way to clip results efficiently @@ -20,7 +20,7 @@ def add(cnp.ndarray[cnp.uint8_t, ndim=3] img, cnp.ndarray[cnp.uint8_t, ndim=3] stateimg, Py_ssize_t channel, Py_ssize_t amount): - """Add a given amount to a colour channel of `stateimg`, and + """Add a given amount to a color channel of `stateimg`, and store the result in `img`. Overflow is clipped. Parameters @@ -66,7 +66,7 @@ def multiply(cnp.ndarray[cnp.uint8_t, ndim=3] img, cnp.ndarray[cnp.uint8_t, ndim=3] stateimg, Py_ssize_t channel, float amount): - """Multiply a colour channel of `stateimg` by a certain amount, and + """Multiply a color channel of `stateimg` by a certain amount, and store the result in `img`. Overflow is clipped. Parameters diff -Nru skimage-0.13.1/skimage/io/_plugins/fits_plugin.py skimage-0.14.0/skimage/io/_plugins/fits_plugin.py --- skimage-0.13.1/skimage/io/_plugins/fits_plugin.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/_plugins/fits_plugin.py 2018-05-29 01:27:44.000000000 +0000 @@ -28,10 +28,10 @@ Returns ------- img_array : ndarray - Unlike plugins such as PIL, where different colour bands/channels are + Unlike plugins such as PIL, where different color bands/channels are stored in the third dimension, FITS images are greyscale-only and can be N-dimensional, so an array of the native FITS dimensionality is - returned, without colour channels. + returned, without color channels. Currently if no image is found in the file, None will be returned diff -Nru skimage-0.13.1/skimage/io/_plugins/freeimage_plugin.ini skimage-0.14.0/skimage/io/_plugins/freeimage_plugin.ini --- skimage-0.13.1/skimage/io/_plugins/freeimage_plugin.ini 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/_plugins/freeimage_plugin.ini 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -[freeimage] -description = Load images using the FreeImage library -provides = imread, imsave diff -Nru skimage-0.13.1/skimage/io/_plugins/freeimage_plugin.py skimage-0.14.0/skimage/io/_plugins/freeimage_plugin.py --- skimage-0.13.1/skimage/io/_plugins/freeimage_plugin.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/_plugins/freeimage_plugin.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -from imageio import imread as imread_imageio, imsave as imsave_imageio -from ..._shared.utils import deprecated - - -@deprecated('imageio plugin') -def imread(filename): - """ - img = imread(filename) - - Reads an image from file `filename` - - Parameters - ---------- - filename : file name - Returns - ------- - img : ndarray - """ - img = imread_imageio(filename) - return img - - -@deprecated('imageio plugin') -def imsave(filename, img): - ''' - imsave(filename, img) - - Save image to disk - - Image type is inferred from filename - - Parameters - ---------- - filename : file name - img : image to be saved as nd array - ''' - imsave_imageio(filename, img) diff -Nru skimage-0.13.1/skimage/io/_plugins/matplotlib_plugin.py skimage-0.14.0/skimage/io/_plugins/matplotlib_plugin.py --- skimage-0.13.1/skimage/io/_plugins/matplotlib_plugin.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/_plugins/matplotlib_plugin.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,3 +1,4 @@ +from __future__ import division from collections import namedtuple import numpy as np import matplotlib.pyplot as plt @@ -6,6 +7,8 @@ from ...exposure import is_low_contrast from ...util.colormap import viridis from ..._shared.utils import warn +from math import floor, ceil + _default_colormap = 'gray' _nonstandard_colormap = viridis @@ -48,10 +51,10 @@ lo, hi = immin, immax signed = immin < 0 - out_of_range_float = (np.issubdtype(image.dtype, np.float) and + out_of_range_float = (np.issubdtype(image.dtype, np.floating) and (immin < lo or immax > hi)) low_data_range = (immin != immax and - is_low_contrast(image)) + is_low_contrast(image)) unsupported_dtype = image.dtype not in dtypes._supported_types return ImageProperties(signed, out_of_range_float, @@ -111,7 +114,7 @@ return lo, hi, cmap -def imshow(im, ax=None, show_cbar=None, **kwargs): +def imshow(image, ax=None, show_cbar=None, **kwargs): """Show the input image and return the current axes. By default, the image is displayed in greyscale, rather than @@ -130,7 +133,7 @@ Parameters ---------- - im : array, shape (M, N[, 3]) + image : array, shape (M, N[, 3]) The image to display. ax: `matplotlib.axes.Axes`, optional The axis to use for the image, defaults to plt.gca(). @@ -146,7 +149,7 @@ """ if kwargs.get('cmap', None) == 'viridis': kwargs['cmap'] = viridis - lo, hi, cmap = _get_display_range(im) + lo, hi, cmap = _get_display_range(image) kwargs.setdefault('interpolation', 'nearest') kwargs.setdefault('cmap', cmap) @@ -154,12 +157,11 @@ kwargs.setdefault('vmax', hi) ax = ax or plt.gca() - ax_im = ax.imshow(im, **kwargs) + ax_im = ax.imshow(image, **kwargs) if (cmap != _default_colormap and show_cbar is not False) or show_cbar: divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) plt.colorbar(ax_im, cax=cax) - ax.set_adjustable('box-forced') ax.get_figure().tight_layout() return ax_im @@ -168,11 +170,34 @@ def imshow_collection(ic, *args, **kwargs): """Display all images in the collection. + Returns + ------- + fig : `matplotlib.figure.Figure` + The `Figure` object returned by `plt.subplots`. """ - fig, axes = plt.subplots(1, len(ic)) + if len(ic) < 1: + raise ValueError('Number of images to plot must be greater than 0') + + # The target is to plot images on a grid with aspect ratio 4:3 + num_images = len(ic) + # Two pairs of `nrows, ncols` are possible + k = (num_images * 12)**0.5 + r1 = max(1, floor(k / 4)) + r2 = ceil(k / 4) + c1 = ceil(num_images / r1) + c2 = ceil(num_images / r2) + # Select the one which is closer to 4:3 + if abs(r1 / c1 - 0.75) < abs(r2 / c2 - 0.75): + nrows, ncols = r1, c1 + else: + nrows, ncols = r2, c2 + + fig, axes = plt.subplots(nrows=nrows, ncols=ncols) + ax = np.asarray(axes).ravel() for n, image in enumerate(ic): - kwargs['ax'] = axes[n] - imshow(image, *args, **kwargs) + ax[n].imshow(image, *args, **kwargs) + kwargs['ax'] = axes + return fig imread = plt.imread diff -Nru skimage-0.13.1/skimage/io/_plugins/pil_plugin.py skimage-0.14.0/skimage/io/_plugins/pil_plugin.py --- skimage-0.13.1/skimage/io/_plugins/pil_plugin.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/_plugins/pil_plugin.py 2018-05-29 01:27:44.000000000 +0000 @@ -40,7 +40,7 @@ return pil_to_ndarray(im, dtype=dtype, img_num=img_num) -def pil_to_ndarray(im, dtype=None, img_num=None): +def pil_to_ndarray(image, dtype=None, img_num=None): """Import a PIL Image object to an ndarray, in memory. Parameters @@ -50,59 +50,59 @@ """ try: # this will raise an IOError if the file is not readable - im.getdata()[0] + image.getdata()[0] except IOError as e: site = "http://pillow.readthedocs.org/en/latest/installation.html#external-libraries" pillow_error_message = str(e) error_message = ('Could not load "%s" \n' 'Reason: "%s"\n' 'Please see documentation at: %s' - % (im.filename, pillow_error_message, site)) + % (image.filename, pillow_error_message, site)) raise ValueError(error_message) frames = [] grayscale = None i = 0 while 1: try: - im.seek(i) + image.seek(i) except EOFError: break - frame = im + frame = image if img_num is not None and img_num != i: - im.getdata()[0] + image.getdata()[0] i += 1 continue - if im.format == 'PNG' and im.mode == 'I' and dtype is None: + if image.format == 'PNG' and image.mode == 'I' and dtype is None: dtype = 'uint16' - if im.mode == 'P': + if image.mode == 'P': if grayscale is None: - grayscale = _palette_is_grayscale(im) + grayscale = _palette_is_grayscale(image) if grayscale: - frame = im.convert('L') + frame = image.convert('L') else: - if im.format == 'PNG' and 'transparency' in im.info: - frame = im.convert('RGBA') + if image.format == 'PNG' and 'transparency' in image.info: + frame = image.convert('RGBA') else: - frame = im.convert('RGB') + frame = image.convert('RGB') - elif im.mode == '1': - frame = im.convert('L') + elif image.mode == '1': + frame = image.convert('L') - elif 'A' in im.mode: - frame = im.convert('RGBA') + elif 'A' in image.mode: + frame = image.convert('RGBA') - elif im.mode == 'CMYK': - frame = im.convert('RGB') + elif image.mode == 'CMYK': + frame = image.convert('RGB') - if im.mode.startswith('I;16'): - shape = im.size - dtype = '>u2' if im.mode.endswith('B') else ' 1: return np.array(frames) @@ -145,7 +145,7 @@ palette = np.asarray(pil_image.getpalette()).reshape((256, 3)) # Not all palette colors are used; unused colors have junk values. start, stop = pil_image.getextrema() - valid_palette = palette[start:stop] + valid_palette = palette[start:stop + 1] # Image is grayscale if channel differences (R - G and G - B) # are all zero. return np.allclose(np.diff(valid_palette), 0) diff -Nru skimage-0.13.1/skimage/io/_plugins/test_plugin.ini skimage-0.14.0/skimage/io/_plugins/test_plugin.ini --- skimage-0.13.1/skimage/io/_plugins/test_plugin.ini 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/_plugins/test_plugin.ini 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -[test] -description = Test plugin -provides = imsave, imshow, imread, imread_collection, imshow_collection diff -Nru skimage-0.13.1/skimage/io/_plugins/test_plugin.py skimage-0.14.0/skimage/io/_plugins/test_plugin.py --- skimage-0.13.1/skimage/io/_plugins/test_plugin.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/_plugins/test_plugin.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -# This mock-up is called by ../tests/test_plugin.py -# to verify the behaviour of the plugin infrastructure - -from skimage.io import ImageCollection - - -def imread(fname, dtype=None): - assert fname == 'test.png' - assert dtype == 'i4' - - -def imsave(fname, arr): - assert fname == 'test.png' - assert arr == [1, 2, 3] - - -def imshow(arr, plugin_arg=None): - assert arr == [1, 2, 3] - assert plugin_arg == (1, 2) - - -def imread_collection(x, conserve_memory=True): - assert conserve_memory == False - assert x == '*.png' - return ImageCollection([0, 1], load_func=lambda x: x) - - -def imshow_collection(x): - assert len(x) == 2 diff -Nru skimage-0.13.1/skimage/io/_plugins/util.py skimage-0.14.0/skimage/io/_plugins/util.py --- skimage-0.13.1/skimage/io/_plugins/util.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/_plugins/util.py 2018-05-29 01:27:44.000000000 +0000 @@ -157,12 +157,13 @@ return out -def histograms(img, nbins): +def histograms(image, nbins): '''Calculate the channel histograms of the current image. Parameters ---------- - img : ndarray, ndim=3, dtype=np.uint8 + image : ndarray, ndim=3, dtype=np.uint8 + Input image. nbins : int The number of bins. @@ -175,7 +176,7 @@ ''' - return _histograms.histograms(img, nbins) + return _histograms.histograms(image, nbins) class ImgThread(threading.Thread): diff -Nru skimage-0.13.1/skimage/io/setup.py skimage-0.14.0/skimage/io/setup.py --- skimage-0.13.1/skimage/io/setup.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/setup.py 2018-05-29 01:27:44.000000000 +0000 @@ -29,6 +29,7 @@ return config + if __name__ == '__main__': from numpy.distutils.core import setup setup(maintainer='scikit-image Developers', diff -Nru skimage-0.13.1/skimage/io/sift.py skimage-0.14.0/skimage/io/sift.py --- skimage-0.13.1/skimage/io/sift.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/sift.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,17 +1,10 @@ -"""Read SIFT and SURF feature files. - -See Also --------- -http://people.cs.ubc.ca/~lowe/keypoints/ -http://www.vision.ee.ethz.ch/~surf/ -""" +from six import string_types +import numpy as np __all__ = ['load_sift', 'load_surf'] -import numpy as np - -def _sift_read(f, mode='SIFT'): +def _sift_read(filelike, mode='SIFT'): """Read SIFT or SURF features from externally generated file. This routine reads SIFT or SURF files generated by binary utilities from @@ -24,10 +17,12 @@ Parameters ---------- - f : string or open file + filelike : string or open file Input file generated by the feature detectors from http://people.cs.ubc.ca/~lowe/keypoints/ or - http://www.vision.ee.ethz.ch/~surf/ + http://www.vision.ee.ethz.ch/~surf/ . + mode : {'SIFT', 'SURF'}, optional + Kind of descriptor used to generate `filelike`. Returns ------- @@ -44,8 +39,12 @@ feature values """ - if not hasattr(f, 'readline'): - f = open(f, 'r') + if isinstance(filelike, string_types): + f = open(filelike, 'r') + filelike_is_str = True + else: + f = filelike + filelike_is_str = False if mode == 'SIFT': nr_features, feature_len = map(int, f.readline().split()) @@ -62,9 +61,11 @@ data = np.fromfile(f, sep=' ') if data.size != nr_features * datatype.itemsize / np.dtype(float).itemsize: - raise IOError("Invalid %s feature file." % mode) + raise IOError("Invalid {} feature file.".format(mode)) - f.close() + # If `filelike` is passed to the function as filename - close the file + if filelike_is_str: + f.close() return data.view(datatype) @@ -76,5 +77,6 @@ def load_surf(f): return _sift_read(f, mode='SURF') + load_sift.__doc__ = _sift_read.__doc__ load_surf.__doc__ = _sift_read.__doc__ diff -Nru skimage-0.13.1/skimage/io/tests/test_collection.py skimage-0.14.0/skimage/io/tests/test_collection.py --- skimage-0.13.1/skimage/io/tests/test_collection.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/tests/test_collection.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,12 +1,13 @@ -import os.path +import os import numpy as np -from numpy.testing import assert_raises, assert_equal, assert_allclose - from skimage import data_dir from skimage.io.collection import ImageCollection, alphanumeric_key from skimage.io import reset_plugins +from skimage._shared import testing +from skimage._shared.testing import assert_equal, assert_allclose, TestCase + def test_string_split(): test_string = 'z23a' @@ -23,7 +24,7 @@ assert_equal(sorted_filenames, sorted_filenames) -class TestImageCollection(): +class TestImageCollection(TestCase): pattern = [os.path.join(data_dir, pic) for pic in ['camera.png', 'color.png']] @@ -31,6 +32,7 @@ pattern_matched = [os.path.join(data_dir, pic) for pic in ['camera.png', 'moon.png']] + @testing.fixture(autouse=True) def setUp(self): reset_plugins() # Generic image collection with images of different shapes. @@ -46,13 +48,14 @@ for i in range(-num, num): assert type(self.images[i]) is np.ndarray assert_allclose(self.images[0], - self.images[-num]) + self.images[-num]) - # assert_raises expects a callable, hence this thin wrapper function. def return_img(n): return self.images[n] - assert_raises(IndexError, return_img, num) - assert_raises(IndexError, return_img, -num - 1) + with testing.raises(IndexError): + return_img(num) + with testing.raises(IndexError): + return_img(-num - 1) def test_slicing(self): assert type(self.images[:]) is ImageCollection @@ -69,7 +72,8 @@ def set_files(f): self.images.files = f - assert_raises(AttributeError, set_files, 'newfiles') + with testing.raises(AttributeError): + set_files('newfiles') def test_custom_load(self): load_pattern = [(1, 'one'), (2, 'two')] @@ -94,9 +98,5 @@ assert_equal(array.shape, expected_shape) def test_concatentate_mismatched_image_shapes(self): - assert_raises(ValueError, self.images.concatenate) - - -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() + with testing.raises(ValueError): + self.images.concatenate() diff -Nru skimage-0.13.1/skimage/io/tests/test_colormixer.py skimage-0.14.0/skimage/io/tests/test_colormixer.py --- skimage-0.13.1/skimage/io/tests/test_colormixer.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/tests/test_colormixer.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,12 +1,9 @@ -from numpy.testing import (assert_array_equal, - assert_almost_equal, - assert_equal, - assert_array_almost_equal, - ) import numpy as np - import skimage.io._plugins._colormixer as cm +from skimage._shared.testing import (assert_array_equal, assert_almost_equal, + assert_equal, assert_array_almost_equal) + class ColorMixerTest(object): def setup(self): @@ -137,8 +134,3 @@ def test_hsv_mul_clip_neg(self): cm.hsv_multiply(self.img, self.state, 0, 0, 0) assert_equal(self.img, np.zeros_like(self.state)) - - -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() diff -Nru skimage-0.13.1/skimage/io/tests/test_fits.py skimage-0.14.0/skimage/io/tests/test_fits.py --- skimage-0.13.1/skimage/io/tests/test_fits.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/tests/test_fits.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,9 +1,8 @@ import os.path import numpy as np -from numpy.testing import run_module_suite -from numpy.testing.decorators import skipif import skimage.io as io from skimage import data_dir +from skimage._shared import testing pyfits_available = True @@ -27,16 +26,16 @@ try: io.use_plugin('fits') except ImportError: - assert pyfits_available == False + assert not pyfits_available else: - assert pyfits_available == True + assert pyfits_available def teardown(): io.reset_plugins() -@skipif(not pyfits_available) +@testing.skipif(not pyfits_available, reason="pyfits not installed") def test_imread_MEF(): io.use_plugin('fits') testfile = os.path.join(data_dir, 'multi.fits') @@ -44,7 +43,7 @@ assert np.all(img == pyfits.getdata(testfile, 1)) -@skipif(not pyfits_available) +@testing.skipif(not pyfits_available, reason="pyfits not installed") def test_imread_simple(): io.use_plugin('fits') testfile = os.path.join(data_dir, 'simple.fits') @@ -52,17 +51,18 @@ assert np.all(img == pyfits.getdata(testfile, 0)) -@skipif(not pyfits_available) +@testing.skipif(not pyfits_available, reason="pyfits not installed") def test_imread_collection_single_MEF(): io.use_plugin('fits') testfile = os.path.join(data_dir, 'multi.fits') ic1 = io.imread_collection(testfile) - ic2 = io.ImageCollection([(testfile, 1), (testfile, 2), (testfile, 3)], - load_func=fplug.FITSFactory) + ic2 = io.ImageCollection( + [(testfile, 1), (testfile, 2), (testfile, 3)], + load_func=fplug.FITSFactory) assert _same_ImageCollection(ic1, ic2) -@skipif(not pyfits_available) +@testing.skipif(not pyfits_available, reason="pyfits not installed") def test_imread_collection_MEF_and_simple(): io.use_plugin('fits') testfile1 = os.path.join(data_dir, 'multi.fits') @@ -84,7 +84,3 @@ if not np.all(ext1 == ext2): return False return True - - -if __name__ == '__main__': - run_module_suite() diff -Nru skimage-0.13.1/skimage/io/tests/test_histograms.py skimage-0.14.0/skimage/io/tests/test_histograms.py --- skimage-0.13.1/skimage/io/tests/test_histograms.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/tests/test_histograms.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,10 +1,10 @@ -from numpy.testing import * import numpy as np - from skimage.io._plugins._histograms import histograms +from skimage._shared.testing import assert_array_equal, assert_equal, TestCase + -class TestHistogram: +class TestHistogram(TestCase): def test_basic(self): img = np.ones((50, 50, 3), dtype=np.uint8) r, g, b, v = histograms(img, 255) @@ -23,6 +23,3 @@ assert_array_equal(r, b) assert_array_equal(r, v) assert_array_equal(r, np.ones(255)) - -if __name__ == "__main__": - run_module_suite() diff -Nru skimage-0.13.1/skimage/io/tests/test_imageio.py skimage-0.14.0/skimage/io/tests/test_imageio.py --- skimage-0.13.1/skimage/io/tests/test_imageio.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/tests/test_imageio.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,14 +1,14 @@ import os -import os.path -import numpy as np -from numpy.testing import * -from numpy.testing.decorators import skipif - from tempfile import NamedTemporaryFile +import numpy as np from skimage import data_dir from skimage.io import imread, imsave, use_plugin, reset_plugins +from skimage._shared import testing +from skimage._shared.testing import assert_array_almost_equal, TestCase + + try: import imageio as _imageio except ImportError: @@ -27,7 +27,7 @@ reset_plugins() -@skipif(not imageio_available) +@testing.skipif(not imageio_available, reason="imageio not installed") def test_imageio_flatten(): # a color image is flattened img = imread(os.path.join(data_dir, 'color.png'), flatten=True) @@ -38,22 +38,21 @@ assert np.sctype2char(img.dtype) in np.typecodes['AllInteger'] -@skipif(not imageio_available) +@testing.skipif(not imageio_available, reason="imageio not installed") def test_imageio_palette(): img = imread(os.path.join(data_dir, 'palette_color.png')) assert img.ndim == 3 -@skipif(not imageio_available) +@testing.skipif(not imageio_available, reason="imageio not installed") def test_imageio_truncated_jpg(): # imageio>2.0 uses Pillow / PIL to try and load the file. # Oddly, PIL explicitly raises a SyntaxError when the file read fails. - assert_raises((RuntimeError, ValueError, SyntaxError), - imread, - os.path.join(data_dir, 'truncated.jpg')) + with testing.raises(SyntaxError): + imread(os.path.join(data_dir, 'truncated.jpg')) -class TestSave: +class TestSave(TestCase): def roundtrip(self, x, scaling=1): f = NamedTemporaryFile(suffix='.png') @@ -64,17 +63,14 @@ assert_array_almost_equal((x * scaling).astype(np.int32), y) - @skipif(not imageio_available) + @testing.skipif(not imageio_available, reason="imageio not installed") def test_imsave_roundtrip(self): dtype = np.uint8 for shape in [(10, 10), (10, 10, 3), (10, 10, 4)]: x = np.ones(shape, dtype=dtype) * np.random.rand(*shape) - if np.issubdtype(dtype, float): + if np.issubdtype(dtype, np.floating): yield self.roundtrip, x, 255 else: x = (x * 255).astype(dtype) yield self.roundtrip, x - -if __name__ == "__main__": - run_module_suite() diff -Nru skimage-0.13.1/skimage/io/tests/test_imread.py skimage-0.14.0/skimage/io/tests/test_imread.py --- skimage-0.13.1/skimage/io/tests/test_imread.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/tests/test_imread.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,14 +1,13 @@ import os -import os.path -import numpy as np -from numpy.testing import * -from numpy.testing.decorators import skipif - from tempfile import NamedTemporaryFile -from skimage import data_dir +import numpy as np +from skimage import data_dir, io from skimage.io import imread, imsave, use_plugin, reset_plugins -import skimage.io as sio + +from skimage._shared import testing +from skimage._shared.testing import (TestCase, assert_array_equal, + assert_array_almost_equal) try: import imread as _imread @@ -28,7 +27,7 @@ reset_plugins() -@skipif(not imread_available) +@testing.skipif(not imread_available, reason="imageread not installed") def test_imread_flatten(): # a color image is flattened img = imread(os.path.join(data_dir, 'color.png'), flatten=True) @@ -39,20 +38,19 @@ assert np.sctype2char(img.dtype) in np.typecodes['AllInteger'] -@skipif(not imread_available) +@testing.skipif(not imread_available, reason="imageread not installed") def test_imread_palette(): img = imread(os.path.join(data_dir, 'palette_color.png')) assert img.ndim == 3 -@skipif(not imread_available) +@testing.skipif(not imread_available, reason="imageread not installed") def test_imread_truncated_jpg(): - assert_raises((RuntimeError, ValueError), - sio.imread, - os.path.join(data_dir, 'truncated.jpg')) + with testing.raises(RuntimeError): + io.imread(os.path.join(data_dir, 'truncated.jpg')) -@skipif(not imread_available) +@testing.skipif(not imread_available, reason="imageread not installed") def test_bilevel(): expected = np.zeros((10, 10), bool) expected[::2] = 1 @@ -61,7 +59,7 @@ assert_array_equal(img.astype(bool), expected) -class TestSave: +class TestSave(TestCase): def roundtrip(self, x, scaling=1): f = NamedTemporaryFile(suffix='.png') fname = f.name @@ -71,17 +69,14 @@ assert_array_almost_equal((x * scaling).astype(np.int32), y) - @skipif(not imread_available) + @testing.skipif(not imread_available, reason="imageread not installed") def test_imsave_roundtrip(self): dtype = np.uint8 for shape in [(10, 10), (10, 10, 3), (10, 10, 4)]: x = np.ones(shape, dtype=dtype) * np.random.rand(*shape) - if np.issubdtype(dtype, float): + if np.issubdtype(dtype, np.floating): yield self.roundtrip, x, 255 else: x = (x * 255).astype(dtype) yield self.roundtrip, x - -if __name__ == "__main__": - run_module_suite() diff -Nru skimage-0.13.1/skimage/io/tests/test_io.py skimage-0.14.0/skimage/io/tests/test_io.py --- skimage-0.13.1/skimage/io/tests/test_io.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/tests/test_io.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,11 +1,10 @@ import os -from numpy.testing import assert_array_equal, raises, run_module_suite import numpy as np +from skimage import io, data_dir -import skimage.io as io -from skimage.io.manage_plugins import plugin_store -from skimage import data_dir +from skimage._shared import testing +from skimage._shared.testing import assert_array_equal def test_stack_basic(): @@ -15,9 +14,9 @@ assert_array_equal(io.pop(), x) -@raises(ValueError) def test_stack_non_array(): - io.push([[1, 2, 3]]) + with testing.raises(ValueError): + io.push([[1, 2, 3]]) def test_imread_url(): @@ -27,7 +26,3 @@ image_url = 'file:///{0}/camera.png'.format(data_path) image = io.imread(image_url) assert image.shape == (512, 512) - - -if __name__ == "__main__": - run_module_suite() diff -Nru skimage-0.13.1/skimage/io/tests/test_multi_image.py skimage-0.14.0/skimage/io/tests/test_multi_image.py --- skimage-0.13.1/skimage/io/tests/test_multi_image.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/tests/test_multi_image.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,17 +1,17 @@ import os +import six import numpy as np -from numpy.testing import assert_raises, assert_equal, assert_allclose - -from skimage.io import use_plugin from skimage import data_dir +from skimage.io import use_plugin from skimage.io.collection import MultiImage, ImageCollection -import six +from skimage._shared import testing +from skimage._shared.testing import assert_equal, assert_allclose, TestCase -class TestMultiImage(): - +class TestMultiImage(TestCase): + @testing.fixture(autouse=True) def setUp(self): # This multipage TIF file was created with imagemagick: # convert im1.tif im2.tif -adjoin multipage.tif @@ -58,15 +58,13 @@ assert type(img[i]) is np.ndarray assert_allclose(img[0], img[-num]) - assert_raises(AssertionError, - assert_allclose, - img[0], img[1]) - - # assert_raises expects a callable, hence this thin wrapper function. - def return_img(n): - return img[n] - assert_raises(IndexError, return_img, num) - assert_raises(IndexError, return_img, -num - 1) + with testing.raises(AssertionError): + assert_allclose(img[0], img[1]) + + with testing.raises(IndexError): + img[num] + with testing.raises(IndexError): + img[-num - 1] def test_files_property(self): for img in self.imgs: @@ -75,27 +73,21 @@ assert isinstance(img.filename, six.string_types) - def set_filename(f): - img.filename = f - assert_raises(AttributeError, set_filename, 'newfile') + with testing.raises(AttributeError): + img.filename = "newfile" def test_conserve_memory_property(self): for img in self.imgs: assert isinstance(img.conserve_memory, bool) - def set_mem(val): - img.conserve_memory = val - assert_raises(AttributeError, set_mem, True) + with testing.raises(AttributeError): + img.conserve_memory = True def test_concatenate(self): for img in self.imgs: if img[0].shape != img[-1].shape: - assert_raises(ValueError, img.concatenate) + with testing.raises(ValueError): + img.concatenate() continue array = img.concatenate() assert_equal(array.shape, (len(img),) + img[0].shape) - - -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() diff -Nru skimage-0.13.1/skimage/io/tests/test_pil.py skimage-0.14.0/skimage/io/tests/test_pil.py --- skimage-0.13.1/skimage/io/tests/test_pil.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/tests/test_pil.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,18 +1,10 @@ -import os.path +import os import numpy as np -from numpy.testing import ( - assert_array_equal, assert_array_almost_equal, assert_raises, - assert_allclose, run_module_suite) - +from six import BytesIO from tempfile import NamedTemporaryFile from ... import data_dir, img_as_float from .. import imread, imsave, use_plugin, reset_plugins -from ..._shared.testing import mono_check, color_check -from ..._shared._warnings import expected_warnings -from ..._shared._tempfile import temporary_file - -from six import BytesIO from PIL import Image from .._plugins.pil_plugin import ( @@ -20,6 +12,14 @@ from ...measure import compare_ssim as ssim from ...color import rgb2lab +from skimage._shared import testing +from skimage._shared.testing import (mono_check, color_check, + assert_equal, assert_array_equal, + assert_array_almost_equal, + assert_allclose) +from skimage._shared._warnings import expected_warnings +from skimage._shared._tempfile import temporary_file + def setup(): use_plugin('pil') @@ -39,22 +39,32 @@ except ImportError: pass + def test_png_round_trip(): f = NamedTemporaryFile(suffix='.png') fname = f.name f.close() I = np.eye(3) - imsave(fname, I) + with expected_warnings(['Possible precision loss']): + imsave(fname, I) Ip = img_as_float(imread(fname)) os.remove(fname) assert np.sum(np.abs(Ip-I)) < 1e-3 + +def test_img_as_gray_flatten(): + img = imread(os.path.join(data_dir, 'color.png'), as_gray=True) + with expected_warnings(['deprecated']): + img_flat = imread(os.path.join(data_dir, 'color.png'), flatten=True) + assert_array_equal(img, img_flat) + + def test_imread_flatten(): # a color image is flattened - img = imread(os.path.join(data_dir, 'color.png'), flatten=True) + img = imread(os.path.join(data_dir, 'color.png'), as_gray=True) assert img.ndim == 2 assert img.dtype == np.float64 - img = imread(os.path.join(data_dir, 'camera.png'), flatten=True) + img = imread(os.path.join(data_dir, 'camera.png'), as_gray=True) # check that flattening does not occur for an image that is grey already. assert np.sctype2char(img.dtype) in np.typecodes['AllInteger'] @@ -129,8 +139,8 @@ def test_imread_truncated_jpg(): - assert_raises((IOError, ValueError), imread, - os.path.join(data_dir, 'truncated.jpg')) + with testing.raises(IOError): + imread(os.path.join(data_dir, 'truncated.jpg')) def test_jpg_quality_arg(): @@ -170,7 +180,7 @@ for dtype in (np.uint8, np.uint16, np.float32, np.float64): x = np.ones(shape, dtype=dtype) * np.random.rand(*shape) - if np.issubdtype(dtype, float): + if np.issubdtype(dtype, np.floating): yield (self.verify_roundtrip, dtype, x, roundtrip_function(x), 255) else: @@ -184,10 +194,16 @@ def test_imsave_roundtrip_pil_image(self): self.verify_imsave_roundtrip(self.roundtrip_pil_image) + def test_imsave_incorrect_dimension(): with temporary_file(suffix='.png') as fname: - assert_raises(ValueError, imsave, fname, np.zeros((2, 3, 3, 1))) - assert_raises(ValueError, imsave, fname, np.zeros((2, 3, 2))) + with testing.raises(ValueError): + with expected_warnings([fname + ' is a low contrast image']): + imsave(fname, np.zeros((2, 3, 3, 1))) + with testing.raises(ValueError): + with expected_warnings([fname + ' is a low contrast image']): + imsave(fname, np.zeros((2, 3, 2))) + def test_imsave_filelike(): shape = (2, 2) @@ -202,7 +218,24 @@ # read from file-like object s.seek(0) out = imread(s) - assert out.shape == shape + assert_equal(out.shape, shape) + assert_allclose(out, image) + + +def test_imsave_boolean_input(): + shape = (2, 2) + image = np.eye(*shape, dtype=np.bool) + s = BytesIO() + + # save to file-like object + with expected_warnings( + ['is a boolean image: setting True to 1 and False to 0']): + imsave(s, image) + + # read from file-like object + s.seek(0) + out = imread(s) + assert_equal(out.shape, shape) assert_allclose(out, image) @@ -212,16 +245,19 @@ with expected_warnings(['precision loss']): pil_image = ndarray_to_pil(image) out = pil_to_ndarray(pil_image) - assert out.shape == shape + assert_equal(out.shape, shape) def test_all_color(): - color_check('pil') - color_check('pil', 'bmp') + with expected_warnings(['.* is a boolean image']): + color_check('pil') + with expected_warnings(['.* is a boolean image']): + color_check('pil', 'bmp') def test_all_mono(): - mono_check('pil') + with expected_warnings(['.* is a boolean image']): + mono_check('pil') def test_multi_page_gif(): @@ -259,5 +295,7 @@ sim = ssim(refi, newi, data_range=refi.max() - refi.min()) assert sim > 0.99 -if __name__ == "__main__": - run_module_suite() + +def test_extreme_palette(): + img = imread(os.path.join(data_dir, 'green_palette.png')) + assert_equal(img.ndim, 3) diff -Nru skimage-0.13.1/skimage/io/tests/test_plugin.py skimage-0.14.0/skimage/io/tests/test_plugin.py --- skimage-0.13.1/skimage/io/tests/test_plugin.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/tests/test_plugin.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,19 +1,16 @@ from contextlib import contextmanager -from numpy.testing import assert_equal, raises - from skimage import io from skimage.io import manage_plugins +from skimage._shared import testing +from skimage._shared.testing import assert_equal + io.use_plugin('pil') priority_plugin = 'pil' -def setup_module(): - manage_plugins.use_plugin('test') # see ../_plugins/test_plugin.py - - def teardown_module(): io.reset_plugins() @@ -28,31 +25,9 @@ manage_plugins.preferred_plugins = preferred_plugins -def test_read(): - io.imread('test.png', as_grey=True, dtype='i4', plugin='test') - - -def test_save(): - io.imsave('test.png', [1, 2, 3], plugin='test') - - -def test_show(): - io.imshow([1, 2, 3], plugin_arg=(1, 2), plugin='test') - - -def test_collection(): - ic = io.imread_collection('*.png', conserve_memory=False, plugin='test') - io.imshow_collection(ic) - - -def test_use(): - manage_plugins.use_plugin('test') - manage_plugins.use_plugin('test', 'imshow') - - -@raises(ValueError) def test_failed_use(): - manage_plugins.use_plugin('asd') + with testing.raises(ValueError): + manage_plugins.use_plugin('asd') def test_use_priority(): @@ -60,37 +35,9 @@ plug, func = manage_plugins.plugin_store['imread'][0] assert_equal(plug, priority_plugin) - manage_plugins.use_plugin('test') + manage_plugins.use_plugin('matplotlib') plug, func = manage_plugins.plugin_store['imread'][0] - assert_equal(plug, 'test') - - -def test_use_priority_with_func(): - manage_plugins.use_plugin('pil') - plug, func = manage_plugins.plugin_store['imread'][0] - assert_equal(plug, 'pil') - - manage_plugins.use_plugin('test', 'imread') - plug, func = manage_plugins.plugin_store['imread'][0] - assert_equal(plug, 'test') - - plug, func = manage_plugins.plugin_store['imsave'][0] - assert_equal(plug, 'pil') - - manage_plugins.use_plugin('test') - plug, func = manage_plugins.plugin_store['imsave'][0] - assert_equal(plug, 'test') - - -def test_plugin_order(): - p = io.plugin_order() - assert 'imread' in p - assert 'test' in p['imread'] - - -def test_available(): - assert 'qt' in io.available_plugins - assert 'test' in io.find_available_plugins(loaded=True) + assert_equal(plug, 'matplotlib') def test_load_preferred_plugins_all(): @@ -119,8 +66,3 @@ assert func == pil_plugin.imread plug, func = manage_plugins.plugin_store['imshow'][0] assert func == matplotlib_plugin.imshow, func.__module__ - - -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() diff -Nru skimage-0.13.1/skimage/io/tests/test_plugin_util.py skimage-0.14.0/skimage/io/tests/test_plugin_util.py --- skimage-0.13.1/skimage/io/tests/test_plugin_util.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/tests/test_plugin_util.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,13 +1,15 @@ +import numpy as np from skimage.io._plugins.util import prepare_for_display, WindowManager + +from skimage._shared import testing +from skimage._shared.testing import assert_array_equal, TestCase from skimage._shared._warnings import expected_warnings -from numpy.testing import * -import numpy as np np.random.seed(0) -class TestPrepareForDisplay: +class TestPrepareForDisplay(TestCase): def test_basic(self): with expected_warnings(['precision loss']): prepare_for_display(np.random.rand(10, 10)) @@ -25,7 +27,7 @@ assert x[0, 0, 0] == 0 assert x[3, 2, 0] == 255 - def test_colour(self): + def test_color(self): with expected_warnings(['precision loss']): prepare_for_display(np.random.rand(10, 10, 3)) @@ -33,20 +35,21 @@ with expected_warnings(['precision loss']): prepare_for_display(np.random.rand(10, 10, 4)) - @raises(ValueError) def test_wrong_dimensionality(self): - with expected_warnings(['precision loss']): - prepare_for_display(np.random.rand(10, 10, 1, 1)) + with testing.raises(ValueError): + with expected_warnings(['precision loss']): + prepare_for_display(np.random.rand(10, 10, 1, 1)) - @raises(ValueError) def test_wrong_depth(self): - with expected_warnings(['precision loss']): - prepare_for_display(np.random.rand(10, 10, 5)) + with testing.raises(ValueError): + with expected_warnings(['precision loss']): + prepare_for_display(np.random.rand(10, 10, 5)) -class TestWindowManager: +class TestWindowManager(TestCase): callback_called = False + @testing.fixture(autouse=True) def setup(self): self.wm = WindowManager() self.wm.acquire('test') @@ -71,6 +74,3 @@ def teardown(self): self.wm._release('test') - -if __name__ == "__main__": - run_module_suite() diff -Nru skimage-0.13.1/skimage/io/tests/test_sift.py skimage-0.14.0/skimage/io/tests/test_sift.py --- skimage-0.13.1/skimage/io/tests/test_sift.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/tests/test_sift.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,10 +1,10 @@ -from nose.tools import * -from numpy.testing import assert_equal, run_module_suite -from tempfile import NamedTemporaryFile import os +from tempfile import NamedTemporaryFile from skimage.io import load_sift, load_surf +from skimage._shared.testing import assert_equal + def test_load_sift(): f = NamedTemporaryFile(delete=False) @@ -66,6 +66,3 @@ assert_equal(len(features['data'][0]), 64) assert_equal(features['column'][1], 68.5773) assert_equal(features['row'][0], 62.0491) - -if __name__ == '__main__': - run_module_suite() diff -Nru skimage-0.13.1/skimage/io/tests/test_simpleitk.py skimage-0.14.0/skimage/io/tests/test_simpleitk.py --- skimage-0.13.1/skimage/io/tests/test_simpleitk.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/tests/test_simpleitk.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,12 +1,12 @@ import os.path import numpy as np -from numpy.testing.decorators import skipif -from numpy.testing import assert_raises +import unittest from tempfile import NamedTemporaryFile from skimage import data_dir from skimage.io import imread, imsave, use_plugin, reset_plugins +from skimage._shared import testing try: import SimpleITK as sitk @@ -34,7 +34,7 @@ pass -@skipif(not sitk_available) +@testing.skipif(not sitk_available, reason="simpletk not installed") def test_imread_flatten(): # a color image is flattened img = imread(os.path.join(data_dir, 'color.png'), flatten=True) @@ -45,7 +45,7 @@ assert np.sctype2char(img.dtype) in np.typecodes['AllInteger'] -@skipif(not sitk_available) +@testing.skipif(not sitk_available, reason="simpletk not installed") def test_bilevel(): expected = np.zeros((10, 10)) expected[::2] = 255 @@ -55,7 +55,7 @@ """ #TODO: This test causes a Segmentation fault -@skipif(not sitk_available) +@testing.skipif(not sitk_available) def test_imread_truncated_jpg(): assert_raises((RuntimeError, ValueError), imread, @@ -63,7 +63,7 @@ """ -@skipif(not sitk_available) +@testing.skipif(not sitk_available, reason="simpletk not installed") def test_imread_uint16(): expected = np.load(os.path.join(data_dir, 'chessboard_GRAY_U8.npy')) img = imread(os.path.join(data_dir, 'chessboard_GRAY_U16.tif')) @@ -71,14 +71,14 @@ np.testing.assert_array_almost_equal(img, expected) -@skipif(not sitk_available) +@testing.skipif(not sitk_available, reason="simpletk not installed") def test_imread_uint16_big_endian(): expected = np.load(os.path.join(data_dir, 'chessboard_GRAY_U8.npy')) img = imread(os.path.join(data_dir, 'chessboard_GRAY_U16B.tif')) np.testing.assert_array_almost_equal(img, expected) -class TestSave: +class TestSave(unittest.TestCase): def roundtrip(self, dtype, x): f = NamedTemporaryFile(suffix='.mha') fname = f.name @@ -88,18 +88,14 @@ np.testing.assert_array_almost_equal(x, y) - @skipif(not sitk_available) + @testing.skipif(not sitk_available, reason="simpletk not installed") def test_imsave_roundtrip(self): for shape in [(10, 10), (10, 10, 3), (10, 10, 4)]: for dtype in (np.uint8, np.uint16, np.float32, np.float64): x = np.ones(shape, dtype=dtype) * np.random.rand(*shape) - if np.issubdtype(dtype, float): + if np.issubdtype(dtype, np.floating): yield self.roundtrip, dtype, x else: x = (x * 255).astype(dtype) yield self.roundtrip, dtype, x - -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() diff -Nru skimage-0.13.1/skimage/io/tests/test_tifffile.py skimage-0.14.0/skimage/io/tests/test_tifffile.py --- skimage-0.13.1/skimage/io/tests/test_tifffile.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/io/tests/test_tifffile.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,12 +1,13 @@ import os +import itertools +from tempfile import NamedTemporaryFile from ... import data_dir from .. import imread, imsave, use_plugin, reset_plugins import numpy as np -from numpy.testing import ( - assert_array_equal, assert_array_almost_equal, run_module_suite) - -from tempfile import NamedTemporaryFile +from skimage._shared.testing import (assert_array_equal, + assert_array_almost_equal, + parametrize) def setup(): @@ -46,18 +47,15 @@ y = imread(fname) assert_array_equal(x, y) - def test_imsave_roundtrip(self): - for shape in [(10, 10), (10, 10, 3), (10, 10, 4)]: - for dtype in (np.uint8, np.uint16, np.float32, np.int16, - np.float64): - x = np.random.rand(*shape) - - if not np.issubdtype(dtype, float): - x = (x * np.iinfo(dtype).max).astype(dtype) - else: - x = x.astype(dtype) - yield self.roundtrip, dtype, x - + shapes = ((10, 10), (10, 10, 3), (10, 10, 4)) + dtypes = (np.uint8, np.uint16, np.float32, np.int16, np.float64) -if __name__ == "__main__": - run_module_suite() + @parametrize("shape, dtype", itertools.product(shapes, dtypes)) + def test_imsave_roundtrip(self, shape, dtype): + x = np.random.rand(*shape) + + if not np.issubdtype(dtype, np.floating): + x = (x * np.iinfo(dtype).max).astype(dtype) + else: + x = x.astype(dtype) + self.roundtrip(dtype, x) diff -Nru skimage-0.13.1/skimage/measure/_ccomp.pyx skimage-0.14.0/skimage/measure/_ccomp.pyx --- skimage-0.13.1/skimage/measure/_ccomp.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/_ccomp.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -343,33 +343,36 @@ return reshaped -def label_cython(input, neighbors=None, background=None, return_num=False, +def label_cython(input_, neighbors=None, background=None, return_num=False, connectivity=None): # Connected components search as described in Fiorio et al. # We have to ensure that the shape of the input can be handled by the - # algorithm the input if it is the case - input_corrected, swaps = reshape_array(input) + # algorithm. The input is reshaped as needed for compatibility. + input_, swaps = reshape_array(input_) + shape = input_.shape + ndim = input_.ndim - cdef cnp.ndarray[DTYPE_t, ndim=1] data cdef cnp.ndarray[DTYPE_t, ndim=1] forest # Having data a 2D array slows down access considerably using linear # indices even when using the data_p pointer :-( - data = np.copy(input_corrected.flatten().astype(DTYPE)) + + # np.array makes a copy so it is safe to modify data in-place + data = np.array(input_, order='C', dtype=DTYPE) forest = np.arange(data.size, dtype=DTYPE) cdef DTYPE_t *forest_p = forest.data - cdef DTYPE_t *data_p = data.data + cdef DTYPE_t *data_p = cnp.PyArray_DATA(data) cdef shape_info shapeinfo cdef bginfo bg - get_shape_info(input_corrected.shape, &shapeinfo) + get_shape_info(shape, &shapeinfo) get_bginfo(background, &bg) if neighbors is None and connectivity is None: # use the full connectivity by default - connectivity = input_corrected.ndim + connectivity = ndim elif neighbors is not None: DeprecationWarning("The argument 'neighbors' is deprecated, use " "'connectivity' instead") @@ -377,15 +380,15 @@ if neighbors == 4: connectivity = 1 elif neighbors == 8: - connectivity = input_corrected.ndim + connectivity = ndim else: raise ValueError("Neighbors must be either 4 or 8, got '%d'.\n" % neighbors) - if not 1 <= connectivity <= input_corrected.ndim: + if not 1 <= connectivity <= ndim: raise ValueError( "Connectivity below 1 or above %d is illegal." - % input_corrected.ndim) + % ndim) scanBG(data_p, forest_p, &shapeinfo, &bg) # the data are treated as degenerated 3D arrays if needed @@ -400,14 +403,13 @@ if data.dtype == np.int32: data = data.view(np.int32) - res = data.reshape(input_corrected.shape) - - res_orig = undo_reshape_array(res, swaps) + if swaps: + data = undo_reshape_array(data, swaps) if return_num: - return res_orig, ctr + return data, ctr else: - return res_orig + return data cdef DTYPE_t resolve_labels(DTYPE_t *data_p, DTYPE_t *forest_p, diff -Nru skimage-0.13.1/skimage/measure/entropy.py skimage-0.14.0/skimage/measure/entropy.py --- skimage-0.13.1/skimage/measure/entropy.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/entropy.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,3 +1,4 @@ +from numpy import unique from scipy.stats import entropy as scipy_entropy @@ -5,7 +6,7 @@ """Calculate the Shannon entropy of an image. The Shannon entropy is defined as S = -sum(pk * log(pk)), - where pk are the number of pixels of value k. + where pk are frequency/probability of pixels of value k. Parameters ---------- @@ -32,6 +33,8 @@ -------- >>> from skimage import data >>> shannon_entropy(data.camera()) - 17.732031303342747 + 7.0479552324230861 """ - return scipy_entropy(image.ravel(), base=base) + + _, counts = unique(image, return_counts=True) + return scipy_entropy(counts, base=base) diff -Nru skimage-0.13.1/skimage/measure/_find_contours.py skimage-0.14.0/skimage/measure/_find_contours.py --- skimage-0.13.1/skimage/measure/_find_contours.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/_find_contours.py 2018-05-29 01:27:44.000000000 +0000 @@ -125,10 +125,13 @@ def _take_2(seq): iterator = iter(seq) - while(True): - n1 = next(iterator) - n2 = next(iterator) - yield (n1, n2) + while True: + try: + n1 = next(iterator) + n2 = next(iterator) + yield (n1, n2) + except StopIteration: + return def _assemble_contours(points_iterator): diff -Nru skimage-0.13.1/skimage/measure/fit.py skimage-0.14.0/skimage/measure/fit.py --- skimage-0.13.1/skimage/measure/fit.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/fit.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,7 +1,7 @@ import math import numpy as np from scipy import optimize -from .._shared.utils import check_random_state, skimage_deprecation, warn +from .._shared.utils import check_random_state def _check_data_dim(data, dim): @@ -25,151 +25,12 @@ self.params = None -class LineModel(BaseModel): - - """Total least squares estimator for 2D lines. - - Lines are parameterized using polar coordinates as functional model:: - - dist = x * cos(theta) + y * sin(theta) - - This parameterization is able to model vertical lines in contrast to the - standard line model ``y = a*x + b``. - - This estimator minimizes the squared distances from all points to the - line:: - - min{ sum((dist - x_i * cos(theta) + y_i * sin(theta))**2) } - - A minimum number of 2 points is required to solve for the parameters. - - **Deprecated class**. Use ``LineModelND`` instead. - - Attributes - ---------- - params : tuple - Line model parameters in the following order `dist`, `theta`. - - """ - - def __init__(self): - self.params = None - warn(skimage_deprecation('`LineModel` is deprecated, ' - 'use `LineModelND` instead.')) - - def estimate(self, data): - """Estimate line model from data using total least squares. - - Parameters - ---------- - data : (N, 2) array - N points with ``(x, y)`` coordinates, respectively. - - Returns - ------- - success : bool - True, if model estimation succeeds. - - """ - - _check_data_dim(data, dim=2) - - X0 = data.mean(axis=0) - - if data.shape[0] == 2: # well determined - theta = np.arctan2(data[1, 1] - data[0, 1], - data[1, 0] - data[0, 0]) - elif data.shape[0] > 2: # over-determined - data = data - X0 - # first principal component - _, _, v = np.linalg.svd(data) - theta = np.arctan2(v[0, 1], v[0, 0]) - else: # under-determined - raise ValueError('At least 2 input points needed.') - - # angle perpendicular to line angle - theta = (theta + np.pi / 2) % np.pi - # line always passes through mean - dist = X0[0] * math.cos(theta) + X0[1] * math.sin(theta) - - self.params = (dist, theta) - - return True - - def residuals(self, data): - """Determine residuals of data to model. - - For each point the shortest distance to the line is returned. - - Parameters - ---------- - data : (N, 2) array - N points with ``(x, y)`` coordinates, respectively. - - Returns - ------- - residuals : (N, ) array - Residual for each data point. - - """ - - _check_data_dim(data, dim=2) - - dist, theta = self.params - - x = data[:, 0] - y = data[:, 1] - - return dist - (x * math.cos(theta) + y * math.sin(theta)) - - def predict_x(self, y, params=None): - """Predict x-coordinates using the estimated model. - - Parameters - ---------- - y : array - y-coordinates. - params : (2, ) array, optional - Optional custom parameter set. - - Returns - ------- - x : array - Predicted x-coordinates. - - """ - - if params is None: - params = self.params - dist, theta = params - return (dist - y * math.sin(theta)) / math.cos(theta) - - def predict_y(self, x, params=None): - """Predict y-coordinates using the estimated model. - - Parameters - ---------- - x : array - x-coordinates. - params : (2, ) array, optional - Optional custom parameter set. - - Returns - ------- - y : array - Predicted y-coordinates. - - """ - - if params is None: - params = self.params - dist, theta = params - return (dist - x * math.cos(theta)) / math.sin(theta) - - class LineModelND(BaseModel): """Total least squares estimator for N-dimensional lines. + In contrast to ordinary least squares line estimation, this estimator + minimizes the orthogonal distances of points to the estimated line. + Lines are defined by a point (origin) and a unit vector (direction) according to the following vector equation:: @@ -180,11 +41,32 @@ params : tuple Line model parameters in the following order `origin`, `direction`. + Examples + -------- + >>> x = np.linspace(1, 2, 25) + >>> y = 1.5 * x + 3 + >>> lm = LineModelND() + >>> lm.estimate(np.array([x, y]).T) + True + >>> tuple(np.round(lm.params, 5)) + (array([ 1.5 , 5.25]), array([ 0.5547 , 0.83205])) + >>> res = lm.residuals(np.array([x, y]).T) + >>> np.abs(np.round(res, 9)) + array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]) + >>> np.round(lm.predict_y(x[:5]), 3) + array([ 4.5 , 4.562, 4.625, 4.688, 4.75 ]) + >>> np.round(lm.predict_x(y[:5]), 3) + array([ 1. , 1.042, 1.083, 1.125, 1.167]) + """ def estimate(self, data): """Estimate line model from data. + This minimizes the sum of shortest (orthogonal) distances + from the given data points to the estimated line. + Parameters ---------- data : (N, dim) array @@ -195,51 +77,56 @@ success : bool True, if model estimation succeeds. """ - _check_data_atleast_2D(data) - X0 = data.mean(axis=0) + origin = data.mean(axis=0) + data = data - origin if data.shape[0] == 2: # well determined - u = data[1] - data[0] - norm = np.linalg.norm(u) - if norm > 0: - u /= norm + direction = data[1] - data[0] + norm = np.linalg.norm(direction) + if norm != 0: # this should not happen to be norm 0 + direction /= norm elif data.shape[0] > 2: # over-determined - data = data - X0 - # first principal component - # Note: without full_matrices=False Python dies with joblib - # parallel_for. - _, _, u = np.linalg.svd(data, full_matrices=False) - u = u[0] + # Note: with full_matrices=1 Python dies with joblib parallel_for. + _, _, v = np.linalg.svd(data, full_matrices=False) + direction = v[0] else: # under-determined raise ValueError('At least 2 input points needed.') - self.params = (X0, u) + self.params = (origin, direction) return True - def residuals(self, data): + def residuals(self, data, params=None): """Determine residuals of data to model. - For each point the shortest distance to the line is returned. - It is obtained by projecting the data onto the line. + For each point, the shortest (orthogonal) distance to the line is + returned. It is obtained by projecting the data onto the line. Parameters ---------- data : (N, dim) array N points in a space of dimension dim. + params : (2, ) array, optional + Optional custom parameter set in the form (`origin`, `direction`). Returns ------- residuals : (N, ) array Residual for each data point. """ - - X0, u = self.params - return _norm_along_axis((data - X0) - - np.dot(data - X0, u)[..., np.newaxis] * u, - axis=1) + _check_data_atleast_2D(data) + if params is None: + params = self.params + assert params is not None + if len(params) != 2: + raise ValueError('Parameters are defined by 2 sets.') + + origin, direction = params + res = (data - origin) - \ + np.dot(data - origin, direction)[..., np.newaxis] * direction + return _norm_along_axis(res, axis=1) def predict(self, x, axis=0, params=None): """Predict intersection of the estimated line model with a hyperplane @@ -247,32 +134,38 @@ Parameters ---------- - x : array - coordinates along an axis. + x : (n, 1) array + Coordinates along an axis. axis : int - axis orthogonal to the hyperplane intersecting the line. + Axis orthogonal to the hyperplane intersecting the line. params : (2, ) array, optional Optional custom parameter set in the form (`origin`, `direction`). Returns ------- - y : array + data : (n, m) array Predicted coordinates. - If the line is parallel to the given axis, a ValueError is raised. + Raises + ------ + ValueError + If the line is parallel to the given axis. """ - if params is None: params = self.params + assert params is not None + if len(params) != 2: + raise ValueError('Parameters are defined by 2 sets.') - X0, u = params + origin, direction = params - if u[axis] == 0: + if direction[axis] == 0: # line parallel to axis raise ValueError('Line parallel to axis %s' % axis) - l = (x - X0[axis]) / u[axis] - return X0 + l[..., np.newaxis] * u + l = (x - origin[axis]) / direction[axis] + data = origin + l[..., np.newaxis] * direction + return data def predict_x(self, y, params=None): """Predict x-coordinates for 2D lines using the estimated model. @@ -294,10 +187,11 @@ Predicted x-coordinates. """ - return self.predict(y, axis=1, params=params)[:, 0] + x = self.predict(y, axis=1, params=params)[:, 0] + return x def predict_y(self, x, params=None): - """Predict y-coordinates for 2D lines using the estimated model. + """Predict y-coordinates for 2D lines using the estimated model. Alias for:: @@ -316,7 +210,8 @@ Predicted y-coordinates. """ - return self.predict(x, axis=0, params=params)[:, 1] + y = self.predict(x, axis=0, params=params)[:, 1] + return y class CircleModel(BaseModel): @@ -339,6 +234,20 @@ params : tuple Circle model parameters in the following order `xc`, `yc`, `r`. + Examples + -------- + >>> t = np.linspace(0, 2 * np.pi, 25) + >>> xy = CircleModel().predict_xy(t, params=(2, 3, 4)) + >>> model = CircleModel() + >>> model.estimate(xy) + True + >>> tuple(np.round(model.params, 5)) + (2.0, 3.0, 4.0) + >>> res = model.residuals(xy) + >>> np.abs(np.round(res, 9)) + array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]) + """ def estimate(self, data): @@ -360,34 +269,25 @@ x = data[:, 0] y = data[:, 1] - # pre-allocate jacobian for all iterations - A = np.zeros((3, data.shape[0]), dtype=np.double) - # same for all iterations: r - A[2, :] = -1 - - def dist(xc, yc): - return np.sqrt((x - xc)**2 + (y - yc)**2) - - def fun(params): - xc, yc, r = params - return dist(xc, yc) - r - - def Dfun(params): - xc, yc, r = params - d = dist(xc, yc) - A[0, :] = -(x - xc) / d - A[1, :] = -(y - yc) / d - # same for all iterations, so not changed in each iteration - #A[2, :] = -1 - return A - - xc0 = x.mean() - yc0 = y.mean() - r0 = dist(xc0, yc0).mean() - params0 = (xc0, yc0, r0) - params, _ = optimize.leastsq(fun, params0, Dfun=Dfun, col_deriv=True) - self.params = params + # http://www.had2know.com/academics/best-fit-circle-least-squares.html + x2y2 = (x ** 2 + y ** 2) + sum_x = np.sum(x) + sum_y = np.sum(y) + sum_xy = np.sum(x * y) + m1 = np.array([[np.sum(x ** 2), sum_xy, sum_x], + [sum_xy, np.sum(y ** 2), sum_y], + [sum_x, sum_y, float(len(x))]]) + m2 = np.array([[np.sum(x * x2y2), + np.sum(y * x2y2), + np.sum(x2y2)]]).T + a, b, c = np.linalg.pinv(m1).dot(m2) + a, b, c = a[0], b[0], c[0] + xc = a / 2 + yc = b / 2 + r = np.sqrt(4 * c + a ** 2 + b ** 2) / 2 + + self.params = (xc, yc, r) return True @@ -797,7 +697,7 @@ If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. - + Returns ------- @@ -880,7 +780,7 @@ best_inlier_num = 0 best_inlier_residuals_sum = np.inf best_inliers = None - + random_state = check_random_state(random_state) if min_samples < 0: diff -Nru skimage-0.13.1/skimage/measure/__init__.py skimage-0.14.0/skimage/measure/__init__.py --- skimage-0.13.1/skimage/measure/__init__.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/__init__.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,16 +1,18 @@ from ._find_contours import find_contours -from ._marching_cubes_lewiner import marching_cubes, marching_cubes_lewiner +from ._marching_cubes_lewiner import marching_cubes_lewiner from ._marching_cubes_classic import (marching_cubes_classic, mesh_surface_area, correct_mesh_orientation) from ._regionprops import regionprops, perimeter from .simple_metrics import compare_mse, compare_nrmse, compare_psnr -from ._structural_similarity import compare_ssim, structural_similarity +from ._structural_similarity import compare_ssim from ._polygon import approximate_polygon, subdivide_polygon from .pnpoly import points_in_poly, grid_points_in_poly -from ._moments import moments, moments_central, moments_normalized, moments_hu +from ._moments import (moments, moments_central, moments_coords, + moments_coords_central, moments_normalized, centroid, + moments_hu, inertia_tensor, inertia_tensor_eigvals) from .profile import profile_line -from .fit import LineModel, LineModelND, CircleModel, EllipseModel, ransac +from .fit import LineModelND, CircleModel, EllipseModel, ransac from .block import block_reduce from ._label import label from .entropy import shannon_entropy @@ -21,7 +23,6 @@ 'perimeter', 'approximate_polygon', 'subdivide_polygon', - 'LineModel', 'LineModelND', 'CircleModel', 'EllipseModel', @@ -29,9 +30,10 @@ 'block_reduce', 'moments', 'moments_central', + 'moments_coords', + 'moments_coords_central' 'moments_normalized', 'moments_hu', - 'marching_cubes', 'marching_cubes_lewiner', 'marching_cubes_classic', 'mesh_surface_area', @@ -40,10 +42,9 @@ 'label', 'points_in_poly', 'grid_points_in_poly', - 'structural_similarity', 'compare_ssim', 'compare_mse', 'compare_nrmse', 'compare_psnr', 'shannon_entropy', - ] +] diff -Nru skimage-0.13.1/skimage/measure/_marching_cubes_lewiner_luts.py skimage-0.14.0/skimage/measure/_marching_cubes_lewiner_luts.py --- skimage-0.13.1/skimage/measure/_marching_cubes_lewiner_luts.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/_marching_cubes_lewiner_luts.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- - -# This file was auto-generated from LookUpTable.h by createluts.py. -# The luts are Copyright (C) 2002 by Thomas Lewiner +# This file was auto-generated from `mc_meta/LookUpTable.h` by +# `mc_meta/createluts.py`. #static const char casesClassic[256][16] CASESCLASSIC = (256, 16), """ diff -Nru skimage-0.13.1/skimage/measure/_marching_cubes_lewiner.py skimage-0.14.0/skimage/measure/_marching_cubes_lewiner.py --- skimage-0.13.1/skimage/measure/_marching_cubes_lewiner.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/_marching_cubes_lewiner.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,61 +1,21 @@ import sys import base64 -import dis -import inspect import numpy as np +from . import _marching_cubes_lewiner_luts as mcluts +from . import _marching_cubes_lewiner_cy + + if sys.version_info >= (3, ): base64decode = base64.decodebytes - ordornot = lambda x: x else: base64decode = base64.decodestring - ordornot = ord - -from . import _marching_cubes_lewiner_luts as mcluts -from . import _marching_cubes_lewiner_cy -from .._shared.utils import skimage_deprecation, warn - -def _expected_output_args(): - """ Get number of expected output args. - Please don't use this to influence the algorithmic bahaviour of a function. - For ``a, b, rest*, c = ...`` syntax, returns n + 0.1 (3.1 in this example). - """ - offset = 2 if sys.version_info >= (3, 6) else 3 - f = inspect.currentframe().f_back.f_back - i = f.f_lasti + offset - bytecode = f.f_code.co_code - instruction = ordornot(bytecode[i]) - while True: - if instruction == dis.opmap['DUP_TOP']: - if ordornot(bytecode[i + 1]) == dis.opmap['UNPACK_SEQUENCE']: - return ordornot(bytecode[i + 2]) - i += 4 - instruction = ordornot(bytecode[i]) - continue - if instruction == dis.opmap['STORE_NAME']: - return 1 - if instruction == dis.opmap['UNPACK_SEQUENCE']: - return ordornot(bytecode[i + 1]) - if instruction == dis.opmap.get('UNPACK_EX', -1): # py3k - if ordornot(bytecode[i + 2]) < 10: - return ordornot(bytecode[i + 1]) + ordornot(bytecode[i + 2]) + 0.1 - else: # 3.6 - return ordornot(bytecode[i + 1]) + 0.1 - if instruction == dis.opmap.get('EXTENDED_ARG', -1): # py 3.6 - if ordornot(bytecode[i + 2]) == dis.opmap.get('UNPACK_EX', -1): - return ordornot(bytecode[i + 1]) + ordornot(bytecode[i + 3]) + 0.1 - i += 4 - instruction = ordornot(bytecode[i]) - continue - return 0 - - -def marching_cubes(volume, level=None, spacing=(1., 1., 1.), - gradient_direction='descent', step_size=1, - allow_degenerate=True, use_classic=False): +def marching_cubes_lewiner(volume, level=None, spacing=(1., 1., 1.), + gradient_direction='descent', step_size=1, + allow_degenerate=True, use_classic=False): """ Lewiner marching cubes algorithm to find surfaces in 3d volumetric data. @@ -130,7 +90,7 @@ named `myvolume` about the level 0.0, using the ``mayavi`` package:: >>> from mayavi import mlab # doctest: +SKIP - >>> verts, faces, normals, values = marching_cubes(myvolume, 0.0) # doctest: +SKIP + >>> verts, faces, normals, values = marching_cubes_lewiner(myvolume, 0.0) # doctest: +SKIP >>> mlab.triangular_mesh([vert[0] for vert in verts], ... [vert[1] for vert in verts], ... [vert[2] for vert in verts], @@ -140,7 +100,7 @@ Similarly using the ``visvis`` package:: >>> import visvis as vv # doctest: +SKIP - >>> verts, faces, normals, values = marching_cubes_classic(myvolume, 0.0) # doctest: +SKIP + >>> verts, faces, normals, values = marching_cubes_lewiner(myvolume, 0.0) # doctest: +SKIP >>> vv.mesh(np.fliplr(verts), faces, normals, values) # doctest: +SKIP >>> vv.use().Run() # doctest: +SKIP @@ -158,30 +118,6 @@ skimage.measure.mesh_surface_area """ - # This signature (output args) of this func changed after 0.12 - try: - nout = _expected_output_args() - except Exception: - nout = 0 # always warn if, for whaterver reason, the black magic in above call fails - if nout <= 2: - warn(skimage_deprecation('`marching_cubes` now uses a better and ' - 'faster algorithm, and returns four instead ' - 'of two outputs (see docstring for details). ' - 'Backwards compatibility with 0.12 and prior ' - 'is available with `marching_cubes_classic`. ' - 'This function will be removed in 0.14, ' - 'consider switching to `marching_cubes_lewiner`.')) - - return marching_cubes_lewiner(volume, level, spacing, gradient_direction, - step_size, allow_degenerate, use_classic) - - -def marching_cubes_lewiner(volume, level=None, spacing=(1., 1., 1.), - gradient_direction='descent', step_size=1, - allow_degenerate=True, use_classic=False): - """ Alias for ``marching_cubes()``. - """ - # Check volume and ensure its in the format that the alg needs if not isinstance(volume, np.ndarray) or (volume.ndim != 3): raise ValueError('Input volume should be a 3D numpy array.') @@ -229,14 +165,14 @@ elif not gradient_direction == 'ascent': raise ValueError("Incorrect input %s in `gradient_direction`, see " "docstring." % (gradient_direction)) - if spacing != (1, 1, 1): + if not np.array_equal(spacing, (1, 1, 1)): vertices = vertices * np.r_[spacing] if allow_degenerate: return vertices, faces, normals, values else: fun = _marching_cubes_lewiner_cy.remove_degenerate_faces - return fun(vertices, faces, normals, values) + return fun(vertices.astype(np.float32), faces, normals, values) def _to_array(args): diff -Nru skimage-0.13.1/skimage/measure/mc_meta/createluts.py skimage-0.14.0/skimage/measure/mc_meta/createluts.py --- skimage-0.13.1/skimage/measure/mc_meta/createluts.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/mc_meta/createluts.py 2018-05-29 01:27:44.000000000 +0000 @@ -8,6 +8,8 @@ """ +import numpy as np + import sys import base64 @@ -19,6 +21,7 @@ base64encode = base64.encodestring base64decode = base64.decodestring + def create_luts(fname): # Get the lines in the C header file @@ -151,22 +154,10 @@ import os fname = os.path.join(os.getcwd(), 'LookUpTable.h') - if True: - with open(os.path.join(os.getcwd(), 'mcluts.py'), 'w') as f: - f.write('# -*- coding: utf-8 -*-\n') - f.write('# Copyright (C) 2012, Almar Klein\n# Copyright (C) 2002, Thomas Lewiner\n\n') - f.write('# This file was auto-generated from LookUpTable.h by createluts.py.\n\n') - f.write(create_luts(fname)) - - else: - for prefix in ['TILING', 'TEST']: - tmp = ['luts.'+a for a in getLutNames(prefix)] - print(', '.join(tmp)) - print('') - for name in getLutNames(prefix): - print('self.%s = Lut(%s)' % (name, name)) - print('') - for name in getLutNames(prefix): - print('cdef Lut %s' % name) - print('') - print('') + with open(os.path.join(os.getcwd(), 'mcluts.py'), 'w') as f: + f.write('# -*- coding: utf-8 -*-\n') + f.write( + '# This file was auto-generated from `mc_meta/LookUpTable.h` by\n' + '# `mc_meta/createluts.py`.\n\n' + ) + f.write(create_luts(fname)) diff -Nru skimage-0.13.1/skimage/measure/mc_meta/visual_test.py skimage-0.14.0/skimage/measure/mc_meta/visual_test.py --- skimage-0.13.1/skimage/measure/mc_meta/visual_test.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/mc_meta/visual_test.py 2018-05-29 01:27:44.000000000 +0000 @@ -53,11 +53,11 @@ # Get surface meshes t0 = time.time() -vertices1, faces1, *_ = marching_cubes_lewiner(vol, isovalue, gradient_direction=gradient_dir, use_classic=False) +vertices1, faces1, _ = marching_cubes_lewiner(vol, isovalue, gradient_direction=gradient_dir, use_classic=False) print('finding surface lewiner took %1.0f ms' % (1000*(time.time()-t0)) ) t0 = time.time() -vertices2, faces2, *_ = marching_cubes_classic(vol, isovalue, gradient_direction=gradient_dir) +vertices2, faces2, _ = marching_cubes_classic(vol, isovalue, gradient_direction=gradient_dir) print('finding surface classic took %1.0f ms' % (1000*(time.time()-t0)) ) # Show diff -Nru skimage-0.13.1/skimage/measure/_moments_cy.pyx skimage-0.14.0/skimage/measure/_moments_cy.pyx --- skimage-0.13.1/skimage/measure/_moments_cy.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/_moments_cy.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -1,45 +1,7 @@ #cython: cdivision=True -#cython: boundscheck=False #cython: nonecheck=False #cython: wraparound=False import numpy as np -import cython - - -ctypedef fused image_t: - cython.uchar[:, :] - cython.double[:, :] - - -cpdef moments_central(image_t image, double cr, double cc, Py_ssize_t order): - cdef Py_ssize_t p, q, r, c - cdef double[:, ::1] mu = np.zeros((order + 1, order + 1), dtype=np.double) - cdef double val, dr, dc, dcp, drq - for r in range(image.shape[0]): - dr = r - cr - for c in range(image.shape[1]): - dc = c - cc - val = image[r, c] - dcp = 1 - for p in range(order + 1): - drq = 1 - for q in range(order + 1): - mu[p, q] += val * drq * dcp - drq *= dr - dcp *= dc - return np.asarray(mu) - - -def moments_normalized(double[:, :] mu, Py_ssize_t order=3): - cdef Py_ssize_t p, q - cdef double[:, ::1] nu = np.zeros((order + 1, order + 1), dtype=np.double) - for p in range(order + 1): - for q in range(order + 1): - if p + q >= 2: - nu[p, q] = mu[p, q] / mu[0, 0] ** ((p + q) / 2 + 1) - else: - nu[p, q] = np.nan - return np.asarray(nu) def moments_hu(double[:, :] nu): diff -Nru skimage-0.13.1/skimage/measure/_moments.py skimage-0.14.0/skimage/measure/_moments.py --- skimage-0.13.1/skimage/measure/_moments.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/_moments.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,21 +1,167 @@ # coding: utf-8 +from __future__ import division import numpy as np +from .._shared.utils import assert_nD from . import _moments_cy +import itertools +from warnings import warn + + +def moments_coords(coords, order=3): + """Calculate all raw image moments up to a certain order. + + The following properties can be calculated from raw image moments: + * Area as: ``M[0, 0]``. + * Centroid as: {``M[1, 0] / M[0, 0]``, ``M[0, 1] / M[0, 0]``}. + + Note that raw moments are neither translation, scale nor rotation + invariant. + + Parameters + ---------- + coords : (N, D) double or uint8 array + Array of N points that describe an image of D dimensionality in + Cartesian space. + order : int, optional + Maximum order of moments. Default is 3. + + Returns + ------- + M : (``order + 1``, ``order + 1``, ...) array + Raw image moments. (D dimensions) + + References + ---------- + .. [1] Johannes Kilian. Simple Image Analysis By Moments. Durham + University, version 0.2, Durham, 2001. + + Examples + -------- + >>> coords = np.array([[row, col] + ... for row in range(13, 17) + ... for col in range(14, 18)], dtype=np.double) + >>> M = moments_coords(coords) + >>> centroid_row = M[1, 0] / M[0, 0] + >>> centroid_col = M[0, 1] / M[0, 0] + >>> centroid_row, centroid_col + (14.5, 15.5) + """ + return moments_coords_central(coords, 0, order=order) + + +def moments_coords_central(coords, center=None, order=3): + """Calculate all central image moments up to a certain order. + + The following properties can be calculated from raw image moments: + * Area as: ``M[0, 0]``. + * Centroid as: {``M[1, 0] / M[0, 0]``, ``M[0, 1] / M[0, 0]``}. + + Note that raw moments are neither translation, scale nor rotation + invariant. + + Parameters + ---------- + coords : (N, D) double or uint8 array + Array of N points that describe an image of D dimensionality in + Cartesian space. A tuple of coordinates as returned by + ``np.nonzero`` is also accepted as input. + center : tuple of float, optional + Coordinates of the image centroid. This will be computed if it + is not provided. + order : int, optional + Maximum order of moments. Default is 3. + + Returns + ------- + Mc : (``order + 1``, ``order + 1``, ...) array + Central image moments. (D dimensions) + + References + ---------- + .. [1] Johannes Kilian. Simple Image Analysis By Moments. Durham + University, version 0.2, Durham, 2001. + + Examples + -------- + >>> coords = np.array([[row, col] + ... for row in range(13, 17) + ... for col in range(14, 18)]) + >>> moments_coords_central(coords) + array([[ 16., 0., 20., 0.], + [ 0., 0., 0., 0.], + [ 20., 0., 25., 0.], + [ 0., 0., 0., 0.]]) + + As seen above, for symmetric objects, odd-order moments (columns 1 and 3, + rows 1 and 3) are zero when centered on the centroid, or center of mass, + of the object (the default). If we break the symmetry by adding a new + point, this no longer holds: + + >>> coords2 = np.concatenate((coords, [[17, 17]]), axis=0) + >>> np.round(moments_coords_central(coords2), 2) + array([[ 17. , 0. , 22.12, -2.49], + [ 0. , 3.53, 1.73, 7.4 ], + [ 25.88, 6.02, 36.63, 8.83], + [ 4.15, 19.17, 14.8 , 39.6 ]]) + + Image moments and central image moments are equivalent (by definition) + when the center is (0, 0): + + >>> np.allclose(moments_coords(coords), + ... moments_coords_central(coords, (0, 0))) + True + """ + if isinstance(coords, tuple): + # This format corresponds to coordinate tuples as returned by + # e.g. np.nonzero: (row_coords, column_coords). + # We represent them as an npoints x ndim array. + coords = np.transpose(coords) + assert_nD(coords, 2) + ndim = coords.shape[1] + if center is None: + center = np.mean(coords, axis=0) + + # center the coordinates + coords = coords.astype(float) - center + + # generate all possible exponents for each axis in the given set of points + # produces a matrix of shape (N, D, order + 1) + coords = coords[..., np.newaxis] ** np.arange(order + 1) + + # add extra dimensions for proper broadcasting + coords = coords.reshape(coords.shape + (1,) * (ndim - 1)) + + calc = 1 + + for axis in range(ndim): + # isolate each point's axis + isolated_axis = coords[:, axis] + + # rotate orientation of matrix for proper broadcasting + isolated_axis = np.moveaxis(isolated_axis, 1, 1 + axis) + + # calculate the moments for each point, one axis at a time + calc = calc * isolated_axis + + # sum all individual point moments to get our final answer + Mc = np.sum(calc, axis=0) + + return Mc def moments(image, order=3): """Calculate all raw image moments up to a certain order. The following properties can be calculated from raw image moments: - * Area as: ``m[0, 0]``. - * Centroid as: {``m[0, 1] / m[0, 0]``, ``m[1, 0] / m[0, 0]``}. + * Area as: ``M[0, 0]``. + * Centroid as: {``M[1, 0] / M[0, 0]``, ``M[0, 1] / M[0, 0]``}. Note that raw moments are neither translation, scale nor rotation invariant. Parameters ---------- - image : 2D double or uint8 array + image : nD double or uint8 array Rasterized shape as image. order : int, optional Maximum order of moments. Default is 3. @@ -40,35 +186,40 @@ -------- >>> image = np.zeros((20, 20), dtype=np.double) >>> image[13:17, 13:17] = 1 - >>> m = moments(image) - >>> cr = m[0, 1] / m[0, 0] - >>> cc = m[1, 0] / m[0, 0] + >>> M = moments(image) + >>> cr = M[1, 0] / M[0, 0] + >>> cc = M[0, 1] / M[0, 0] >>> cr, cc (14.5, 14.5) - """ - return _moments_cy.moments_central(image, 0, 0, order) + return moments_central(image, (0,) * image.ndim, order=order) -def moments_central(image, cr, cc, order=3): +def moments_central(image, center=None, cc=None, order=3, **kwargs): """Calculate all central image moments up to a certain order. The center coordinates (cr, cc) can be calculated from the raw moments as: - {``m[0, 1] / m[0, 0]``, ``m[1, 0] / m[0, 0]``}. + {``M[1, 0] / M[0, 0]``, ``M[0, 1] / M[0, 0]``}. Note that central moments are translation invariant but not scale and rotation invariant. Parameters ---------- - image : 2D double or uint8 array + image : nD double or uint8 array Rasterized shape as image. + center : tuple of float, optional + Coordinates of the image centroid. This will be computed if it + is not provided. + order : int, optional + The maximum order of moments computed. + + Other Parameters + ---------------- cr : double - Center row coordinate. + DEPRECATED: Center row coordinate for 2D image. cc : double - Center column coordinate. - order : int, optional - Maximum order of moments. Default is 3. + DEPRECATED: Center column coordinate for 2D image. Returns ------- @@ -90,17 +241,36 @@ -------- >>> image = np.zeros((20, 20), dtype=np.double) >>> image[13:17, 13:17] = 1 - >>> m = moments(image) - >>> cr = m[0, 1] / m[0, 0] - >>> cc = m[1, 0] / m[0, 0] - >>> moments_central(image, cr, cc) + >>> M = moments(image) + >>> cr = M[1, 0] / M[0, 0] + >>> cc = M[0, 1] / M[0, 0] + >>> moments_central(image, (cr, cc)) array([[ 16., 0., 20., 0.], [ 0., 0., 0., 0.], [ 20., 0., 25., 0.], [ 0., 0., 0., 0.]]) """ - - return _moments_cy.moments_central(image, cr, cc, order) + if cc is not None: # using deprecated interface + message = ('Using deprecated 2D-only, xy-coordinate interface to ' + 'moments_central. This interface will be removed in ' + 'scikit-image 0.16. Use ' + 'moments_central(image, center=(cr, cc), order=3).') + warn(message) + if 'cr' in kwargs and center is None: + center = (kwargs['cr'], cc) + else: + center = (center, cc) + return moments_central(image, center=center, order=order).T + if center is None: + center = centroid(image) + calc = image.astype(float) + for dim, dim_length in enumerate(image.shape): + delta = np.arange(dim_length, dtype=float) - center[dim] + powers_of_delta = delta[:, np.newaxis] ** np.arange(order + 1) + calc = np.rollaxis(calc, dim, image.ndim) + calc = np.dot(calc, powers_of_delta) + calc = np.rollaxis(calc, -1, dim) + return calc def moments_normalized(mu, order=3): @@ -111,14 +281,15 @@ Parameters ---------- - mu : (M, M) array - Central image moments, where M must be > ``order``. + mu : (M,[ ...,] M) array + Central image moments, where M must be greater than or equal + to ``order``. order : int, optional Maximum order of moments. Default is 3. Returns ------- - nu : (``order + 1``, ``order + 1``) array + nu : (``order + 1``,[ ...,] ``order + 1``) array Normalized central image moments. References @@ -147,15 +318,20 @@ [ 0. , 0. , 0. , 0. ]]) """ - if mu.ndim != 2: - raise TypeError("Image moments must be 2-dimension") - if mu.shape[0] <= order or mu.shape[1] <= order: - raise TypeError("Shape of image moments must be >= `order`") - return _moments_cy.moments_normalized(mu.astype(np.double), order) + if np.any(np.array(mu.shape) <= order): + raise ValueError("Shape of image moments must be >= `order`") + nu = np.zeros_like(mu) + mu0 = mu.ravel()[0] + for powers in itertools.product(range(order + 1), repeat=mu.ndim): + if sum(powers) < 2: + nu[powers] = np.nan + else: + nu[powers] = mu[powers] / (mu0 ** (sum(powers) / nu.ndim + 1)) + return nu def moments_hu(nu): - """Calculate Hu's set of image moments. + """Calculate Hu's set of image moments (2D-only). Note that this set of moments is proofed to be translation, scale and rotation invariant. @@ -167,7 +343,7 @@ Returns ------- - nu : (7, 1) array + nu : (7,) array Hu's set of image moments. References @@ -186,3 +362,106 @@ """ return _moments_cy.moments_hu(nu.astype(np.double)) + + +def centroid(image): + """Return the (weighted) centroid of an image. + + Parameters + ---------- + image : array + The input image. + + Returns + ------- + center : tuple of float, length ``image.ndim`` + The centroid of the (nonzero) pixels in ``image``. + """ + M = moments_central(image, center=(0,) * image.ndim, order=1) + center = (M[tuple(np.eye(image.ndim, dtype=int))] # array of weighted sums + # for each axis + / M[(0,) * image.ndim]) # weighted sum of all points + return center + + +def inertia_tensor(image, mu=None): + """Compute the inertia tensor of the input image. + + Parameters + ---------- + image : array + The input image. + mu : array, optional + The pre-computed central moments of ``image``. The inertia tensor + computation requires the central moments of the image. If an + application requires both the central moments and the inertia tensor + (for example, `skimage.measure.regionprops`), then it is more + efficient to pre-compute them and pass them to the inertia tensor + call. + + Returns + ------- + T : array, shape ``(image.ndim, image.ndim)`` + The inertia tensor of the input image. :math:`T_{i, j}` contains + the covariance of image intensity along axes :math:`i` and :math:`j`. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Moment_of_inertia#Inertia_tensor + .. [2] Bernd Jähne. Spatio-Temporal Image Processing: Theory and + Scientific Applications. (Chapter 8: Tensor Methods) Springer, 1993. + """ + if mu is None: + mu = moments_central(image) + mu0 = mu[(0,) * image.ndim] + result = np.zeros((image.ndim, image.ndim)) + + # nD expression to get coordinates ([2, 0], [0, 2]) (2D), + # ([2, 0, 0], [0, 2, 0], [0, 0, 2]) (3D), etc. + corners2 = tuple(2 * np.eye(image.ndim, dtype=int)) + d = np.diag(result) + d.flags.writeable = True + d[:] = mu[corners2] / mu0 + + for dims in itertools.combinations(range(image.ndim), 2): + mu_index = np.zeros(image.ndim, dtype=int) + mu_index[list(dims)] = 1 + result[dims] = -mu[tuple(mu_index)] / mu0 + result.T[dims] = -mu[tuple(mu_index)] / mu0 + return result + + +def inertia_tensor_eigvals(image, mu=None, T=None): + """Compute the eigenvalues of the inertia tensor of the image. + + The inertia tensor measures covariance of the image intensity along + the image axes. (See `inertia_tensor`.) The relative magnitude of the + eigenvalues of the tensor is thus a measure of the elongation of a + (bright) object in the image. + + Parameters + ---------- + image : array + The input image. + mu : array, optional + The pre-computed central moments of ``image``. + T : array, shape ``(image.ndim, image.ndim)`` + The pre-computed inertia tensor. If ``T`` is given, ``mu`` and + ``image`` are ignored. + + Returns + ------- + eigvals : list of float, length ``image.ndim`` + The eigenvalues of the inertia tensor of ``image``, in descending + order. + + Notes + ----- + Computing the eigenvalues requires the inertia tensor of the input image. + This is much faster if the central moments (``mu``) are provided, or, + alternatively, one can provide the inertia tensor (``T``) directly. + """ + if T is None: + T = inertia_tensor(image, mu) + eigvals = np.linalg.eigvalsh(T) + return sorted(eigvals, reverse=True) diff -Nru skimage-0.13.1/skimage/measure/profile.py skimage-0.14.0/skimage/measure/profile.py --- skimage-0.13.1/skimage/measure/profile.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/profile.py 2018-05-29 01:27:44.000000000 +0000 @@ -2,13 +2,13 @@ from scipy import ndimage as ndi -def profile_line(img, src, dst, linewidth=1, +def profile_line(image, src, dst, linewidth=1, order=1, mode='constant', cval=0.0): """Return the intensity profile of an image measured along a scan line. Parameters ---------- - img : numeric array, shape (M, N[, C]) + image : numeric array, shape (M, N[, C]) The image, either grayscale (2D array) or multichannel (3D array, where the final axis contains the channel information). @@ -57,13 +57,13 @@ array([ 1., 1., 1., 2., 2., 2.]) """ perp_lines = _line_profile_coordinates(src, dst, linewidth=linewidth) - if img.ndim == 3: - pixels = [ndi.map_coordinates(img[..., i], perp_lines, + if image.ndim == 3: + pixels = [ndi.map_coordinates(image[..., i], perp_lines, order=order, mode=mode, cval=cval) - for i in range(img.shape[2])] + for i in range(image.shape[2])] pixels = np.transpose(np.asarray(pixels), (1, 2, 0)) else: - pixels = ndi.map_coordinates(img, perp_lines, + pixels = ndi.map_coordinates(image, perp_lines, order=order, mode=mode, cval=cval) intensities = pixels.mean(axis=1) diff -Nru skimage-0.13.1/skimage/measure/_regionprops.py skimage-0.14.0/skimage/measure/_regionprops.py --- skimage-0.13.1/skimage/measure/_regionprops.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/_regionprops.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,6 +1,8 @@ # coding: utf-8 from __future__ import division from math import sqrt, atan2, pi as PI +import itertools +from warnings import warn import numpy as np from scipy import ndimage as ndi @@ -13,6 +15,13 @@ __all__ = ['regionprops', 'perimeter'] +XY_TO_RC_DEPRECATION_MESSAGE = ( + 'regionprops and image moments (including moments, normalized moments, ' + 'central moments, and inertia tensor) of 2D images will change from xy ' + 'coordinates to rc coordinates in version 0.16.\nSee ' + 'http://scikit-image.org/docs/0.14.x/release_notes_and_installation.html#deprecations ' + 'for details on how to avoid this message.' +) STREL_4 = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.uint8) @@ -91,7 +100,7 @@ """ def __init__(self, slice, label, label_image, intensity_image, - cache_active): + cache_active, coordinates): if intensity_image is not None: if not intensity_image.shape == label_image.shape: @@ -107,6 +116,21 @@ self._cache_active = cache_active self._cache = {} self._ndim = label_image.ndim + # Note: in PR 2603, we added support for nD moments in regionprops. + # Many properties used xy coordinates, instead of rc. This attribute + # helps with the deprecation process and should be removed in 0.16. + if label_image.ndim > 2 or coordinates == 'rc': + self._use_xy_warning = False + self._transpose_moments = False + elif coordinates == 'xy': + self._use_xy_warning = False # don't warn if 'xy' given explicitly + self._transpose_moments = True + elif coordinates is None: + self._use_xy_warning = True + self._transpose_moments = True + else: + raise ValueError('Incorrect value for regionprops coordinates: %s.' + ' Possible values are: "rc", "xy", or None') @_cached def area(self): @@ -123,17 +147,16 @@ [self._slice[i].stop for i in range(self._ndim)]) def bbox_area(self): - return self._label_image.size + return self.image.size def centroid(self): return tuple(self.coords.mean(axis=0)) - @only2d + @_cached def convex_area(self): return np.sum(self.convex_image) @_cached - @only2d def convex_image(self): from ..morphology.convex_hull import convex_hull_image return convex_hull_image(self.image) @@ -156,10 +179,9 @@ elif self._ndim == 3: return (6 * self.area / PI) ** (1. / 3) - @only2d def euler_number(self): euler_array = self.filled_image != self.image - _, num = label(euler_array, neighbors=8, return_num=True, + _, num = label(euler_array, connectivity=self._ndim, return_num=True, background=0) return -num + 1 @@ -171,7 +193,7 @@ @_cached def filled_image(self): - structure = STREL_8 if self._ndim == 2 else STREL_26_3D + structure = np.ones((3,) * self._ndim) return ndi.binary_fill_holes(self.image, structure) @_cached @@ -179,22 +201,14 @@ return self._label_image[self._slice] == self.label @_cached - @only2d def inertia_tensor(self): mu = self.moments_central - a = mu[2, 0] / mu[0, 0] - b = -mu[1, 1] / mu[0, 0] - c = mu[0, 2] / mu[0, 0] - return np.array([[a, b], [b, c]]) + return _moments.inertia_tensor(self.image, mu) @_cached - @only2d def inertia_tensor_eigvals(self): - a, b, b, c = self.inertia_tensor.flat - # eigen values of inertia tensor - l1 = (a + c) / 2 + sqrt(4 * b ** 2 + (a - c) ** 2) / 2 - l2 = (a + c) / 2 - sqrt(4 * b ** 2 + (a - c) ** 2) / 2 - return l1, l2 + return _moments.inertia_tensor_eigvals(self.image, + T=self.inertia_tensor) @_cached def intensity_image(self): @@ -205,12 +219,12 @@ def _intensity_image_double(self): return self.intensity_image.astype(np.double) - @only2d def local_centroid(self): - m = self.moments - row = m[0, 1] / m[0, 0] - col = m[1, 0] / m[0, 0] - return row, col + M = self.moments + if self._transpose_moments: + M = M.T + return tuple(M[tuple(np.eye(self._ndim, dtype=int))] / + M[(0,) * self._ndim]) def max_intensity(self): return np.max(self.intensity_image[self.image]) @@ -221,88 +235,84 @@ def min_intensity(self): return np.min(self.intensity_image[self.image]) - @only2d def major_axis_length(self): - l1, _ = self.inertia_tensor_eigvals + l1 = self.inertia_tensor_eigvals[0] return 4 * sqrt(l1) - @only2d def minor_axis_length(self): - _, l2 = self.inertia_tensor_eigvals + l2 = self.inertia_tensor_eigvals[-1] return 4 * sqrt(l2) @_cached - @only2d def moments(self): - return _moments.moments(self.image.astype(np.uint8), 3) + M = _moments.moments(self.image.astype(np.uint8), 3) + if self._use_xy_warning: + warn(XY_TO_RC_DEPRECATION_MESSAGE) + if self._transpose_moments: + M = M.T + return M @_cached - @only2d def moments_central(self): - row, col = self.local_centroid - return _moments.moments_central(self.image.astype(np.uint8), - row, col, 3) + mu = _moments.moments_central(self.image.astype(np.uint8), + self.local_centroid, order=3) + if self._use_xy_warning: + warn(XY_TO_RC_DEPRECATION_MESSAGE) + if self._transpose_moments: + mu = mu.T + return mu @only2d def moments_hu(self): return _moments.moments_hu(self.moments_normalized) @_cached - @only2d def moments_normalized(self): return _moments.moments_normalized(self.moments_central, 3) @only2d def orientation(self): a, b, b, c = self.inertia_tensor.flat - b = -b if a - c == 0: - if b > 0: + if b < 0: return -PI / 4. else: return PI / 4. else: - return - 0.5 * atan2(2 * b, (a - c)) + return -0.5 * atan2(-2 * b, (a - c)) @only2d def perimeter(self): return perimeter(self.image, 4) - @only2d def solidity(self): - return self.moments[0, 0] / np.sum(self.convex_image) + return self.area / self.convex_area - @only2d def weighted_centroid(self): - row, col = self.weighted_local_centroid - return row + self._slice[0].start, col + self._slice[1].start + ctr = self.weighted_local_centroid + return tuple(idx + slc.start + for idx, slc in zip(ctr, self._slice)) - @only2d def weighted_local_centroid(self): - m = self.weighted_moments - row = m[0, 1] / m[0, 0] - col = m[1, 0] / m[0, 0] - return row, col + M = self.weighted_moments + return (M[tuple(np.eye(self._ndim, dtype=int))] / + M[(0,) * self._ndim]) @_cached - @only2d def weighted_moments(self): - return _moments.moments_central(self._intensity_image_double(), - 0, 0, 3) + return _moments.moments(self._intensity_image_double(), 3) @_cached - @only2d def weighted_moments_central(self): - row, col = self.weighted_local_centroid + ctr = self.weighted_local_centroid return _moments.moments_central(self._intensity_image_double(), - row, col, 3) + center=ctr, order=3) @only2d def weighted_moments_hu(self): return _moments.moments_hu(self.weighted_moments_normalized) @_cached - @only2d def weighted_moments_normalized(self): return _moments.moments_normalized(self.weighted_moments_central, 3) @@ -347,7 +357,8 @@ return True -def regionprops(label_image, intensity_image=None, cache=True): +def regionprops(label_image, intensity_image=None, cache=True, + coordinates=None): """Measure properties of labeled image regions. Parameters @@ -361,6 +372,9 @@ Determine whether to cache calculated properties. The computation is much faster for cached properties, whereas the memory consumption increases. + coordinates : 'rc' or 'xy', optional + Coordinate conventions for 2D images. (Only 'rc' coordinates are + supported for 3D images.) Returns ------- @@ -536,7 +550,7 @@ raise TypeError('Only 2-D and 3-D images supported.') if not np.issubdtype(label_image.dtype, np.integer): - raise TypeError('Label image must be of integral type.') + raise TypeError('Label image must be of integer type.') regions = [] @@ -548,7 +562,7 @@ label = i + 1 props = _RegionProperties(sl, label, label_image, intensity_image, - cache) + cache, coordinates=coordinates) regions.append(props) return regions diff -Nru skimage-0.13.1/skimage/measure/simple_metrics.py skimage-0.14.0/skimage/measure/simple_metrics.py --- skimage-0.13.1/skimage/measure/simple_metrics.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/simple_metrics.py 2018-05-29 01:27:44.000000000 +0000 @@ -12,8 +12,6 @@ def _assert_compatible(im1, im2): """Raise an error if the shape and dtype do not match.""" - if not im1.dtype == im2.dtype: - raise ValueError('Input images must have the same dtype.') if not im1.shape == im2.shape: raise ValueError('Input images must have the same dimensions.') return @@ -22,10 +20,8 @@ def _as_floats(im1, im2): """Promote im1, im2 to nearest appropriate floating point precision.""" float_type = np.result_type(im1.dtype, im2.dtype, np.float32) - if im1.dtype != float_type: - im1 = im1.astype(float_type) - if im2.dtype != float_type: - im2 = im2.astype(float_type) + im1 = np.asarray(im1, dtype=float_type) + im2 = np.asarray(im2, dtype=float_type) return im1, im2 @@ -63,9 +59,18 @@ NRMSE. There is no standard method of normalization across the literature [1]_. The methods available here are as follows: - - 'Euclidean' : normalize by the Euclidean norm of ``im_true``. + - 'Euclidean' : normalize by the averaged Euclidean norm of + ``im_true``:: + + NRMSE = RMSE * sqrt(N) / || im_true || + + where || . || denotes the Frobenius norm and ``N = im_true.size``. + This result is equivalent to:: + + NRMSE = || im_true - im_test || / || im_true ||. + - 'min-max' : normalize by the intensity range of ``im_true``. - - 'mean' : normalize by the mean of ``im_true``. + - 'mean' : normalize by the mean of ``im_true`` Returns ------- @@ -124,6 +129,9 @@ data_range = dynamic_range if data_range is None: + if im_true.dtype != im_test.dtype: + warn("Inputs have mismatched dtype. Setting data_range based on " + "im_true.") dmin, dmax = dtype_range[im_true.dtype.type] true_min, true_max = np.min(im_true), np.max(im_true) if true_max > dmax or true_min < dmin: diff -Nru skimage-0.13.1/skimage/measure/_structural_similarity.py skimage-0.14.0/skimage/measure/_structural_similarity.py --- skimage-0.13.1/skimage/measure/_structural_similarity.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/_structural_similarity.py 2018-05-29 01:27:44.000000000 +0000 @@ -5,15 +5,14 @@ from ..util.dtype import dtype_range from ..util.arraycrop import crop -from .._shared.utils import deprecated -from .._shared.utils import skimage_deprecation, warn +from .._shared.utils import warn __all__ = ['compare_ssim'] def compare_ssim(X, Y, win_size=None, gradient=False, data_range=None, multichannel=False, gaussian_weights=False, - full=False, dynamic_range=None, **kwargs): + full=False, **kwargs): """Compute the mean structural similarity index between two images. Parameters @@ -25,8 +24,8 @@ odd value. If `gaussian_weights` is True, this is ignored and the window size will depend on `sigma`. gradient : bool, optional - If True, also return the gradient. - data_range : int, optional + If True, also return the gradient with respect to Y. + data_range : float, optional The data range of the input image (distance between minimum and maximum possible values). By default, this is estimated from the image data-type. @@ -74,7 +73,7 @@ structural similarity. IEEE Transactions on Image Processing, 13, 600-612. https://ece.uwaterloo.ca/~z70wang/publications/ssim.pdf, - DOI:10.1.1.11.2477 + DOI:10.1109/TIP.2003.819861 .. [2] Avanaki, A. N. (2009). Exact global histogram specification optimized for structural similarity. Optical Review, 16, 613-621. @@ -82,18 +81,9 @@ DOI:10.1007/s10043-009-0119-z """ - if not X.dtype == Y.dtype: - raise ValueError('Input images must have the same dtype.') - if not X.shape == Y.shape: raise ValueError('Input images must have the same dimensions.') - if dynamic_range is not None: - warn('`dynamic_range` has been deprecated in favor of ' - '`data_range`. The `dynamic_range` keyword argument ' - 'will be removed in v0.14', skimage_deprecation) - data_range = dynamic_range - if multichannel: # loop over channels args = dict(win_size=win_size, @@ -155,6 +145,9 @@ raise ValueError('Window size must be odd.') if data_range is None: + if X.dtype != Y.dtype: + warn("Inputs have mismatched dtype. Setting data_range based on " + "X.dtype.") dmin, dmax = dtype_range[X.dtype.type] data_range = dmax - dmin @@ -228,14 +221,3 @@ return mssim, S else: return mssim - - -@deprecated('compare_ssim', removed_version='0.14') -def structural_similarity(X, Y, win_size=None, gradient=False, - dynamic_range=None, multichannel=False, - gaussian_weights=False, full=False, **kwargs): - """""" + compare_ssim.__doc__ - return compare_ssim(X, Y, win_size=win_size, gradient=gradient, - data_range=dynamic_range, - multichannel=multichannel, - gaussian_weights=gaussian_weights, full=full, **kwargs) diff -Nru skimage-0.13.1/skimage/measure/tests/test_block.py skimage-0.14.0/skimage/measure/tests/test_block.py --- skimage-0.13.1/skimage/measure/tests/test_block.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/tests/test_block.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,7 +1,9 @@ import numpy as np -from numpy.testing import assert_equal, assert_raises from skimage.measure import block_reduce +from skimage._shared import testing +from skimage._shared.testing import assert_equal + def test_block_reduce_sum(): image1 = np.arange(4 * 6).reshape(4, 6) @@ -80,9 +82,7 @@ def test_invalid_block_size(): image = np.arange(4 * 6).reshape(4, 6) - assert_raises(ValueError, block_reduce, image, [1, 2, 3]) - assert_raises(ValueError, block_reduce, image, [1, 0.5]) - - -if __name__ == "__main__": - np.testing.run_module_suite() + with testing.raises(ValueError): + block_reduce(image, [1, 2, 3]) + with testing.raises(ValueError): + block_reduce(image, [1, 0.5]) diff -Nru skimage-0.13.1/skimage/measure/tests/test_entropy.py skimage-0.14.0/skimage/measure/tests/test_entropy.py --- skimage-0.13.1/skimage/measure/tests/test_entropy.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/tests/test_entropy.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,15 +1,16 @@ -from numpy.testing import assert_almost_equal import numpy as np - from skimage.measure import shannon_entropy +from skimage._shared.testing import assert_almost_equal + def test_shannon_ones(): img = np.ones((10, 10)) res = shannon_entropy(img, base=np.e) - assert_almost_equal(res, np.log(10*10)) + assert_almost_equal(res, 0.0) -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() +def test_shannon_all_unique(): + img = np.arange(64) + res = shannon_entropy(img, base=2) + assert_almost_equal(res, np.log(64) / np.log(2)) diff -Nru skimage-0.13.1/skimage/measure/tests/test_find_contours.py skimage-0.14.0/skimage/measure/tests/test_find_contours.py --- skimage-0.13.1/skimage/measure/tests/test_find_contours.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/tests/test_find_contours.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,10 +1,10 @@ import numpy as np -from numpy.testing import (assert_raises, - assert_array_equal, - ) - from skimage.measure import find_contours +from skimage._shared import testing +from skimage._shared.testing import assert_array_equal + + a = np.ones((8, 8), dtype=np.float32) a[1:-1, 1] = 0 a[1, 1:-1] = 0 @@ -45,8 +45,6 @@ assert_array_equal(contours[0][::-1], ref) - - def test_float(): contours = find_contours(r, 0.5) assert len(contours) == 1 @@ -67,10 +65,7 @@ def test_invalid_input(): - assert_raises(ValueError, find_contours, r, 0.5, 'foo', 'bar') - assert_raises(ValueError, find_contours, r[..., None], 0.5) - - -if __name__ == '__main__': - from numpy.testing import run_module_suite - run_module_suite() + with testing.raises(ValueError): + find_contours(r, 0.5, 'foo', 'bar') + with testing.raises(ValueError): + find_contours(r[..., None], 0.5) diff -Nru skimage-0.13.1/skimage/measure/tests/test_fit.py skimage-0.14.0/skimage/measure/tests/test_fit.py --- skimage-0.13.1/skimage/measure/tests/test_fit.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/tests/test_fit.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,14 +1,16 @@ import numpy as np -from numpy.testing import assert_equal, assert_almost_equal, assert_array_less -from numpy.testing import assert_raises, assert_array_less from skimage.measure import LineModelND, CircleModel, EllipseModel, ransac from skimage.transform import AffineTransform from skimage.measure.fit import _dynamic_max_trials -from skimage._shared._warnings import expected_warnings + +from skimage._shared import testing +from skimage._shared.testing import (assert_equal, assert_almost_equal, + assert_array_less) def test_line_model_invalid_input(): - assert_raises(ValueError, LineModelND().estimate, np.empty((1, 3))) + with testing.raises(ValueError): + LineModelND().estimate(np.empty((1, 3))) def test_line_model_predict(): @@ -19,62 +21,50 @@ assert_almost_equal(x, model.predict_x(y)) -def test_line_model_estimate(): - # generate original data without noise - model0 = LineModelND() - model0.params = ((0, 0), (1, 1)) - x0 = np.arange(-100, 100) - y0 = model0.predict_y(x0) +def test_line_model_nd_invalid_input(): + with testing.raises(AssertionError): + LineModelND().predict_x(np.zeros(1)) - data = np.column_stack([x0, y0]) + with testing.raises(AssertionError): + LineModelND().predict_y(np.zeros(1)) - # estimate parameters of noisy data - model_est = LineModelND() - model_est.estimate(data) + with testing.raises(ValueError): + LineModelND().predict_x(np.zeros(1), np.zeros(1)) - # test whether estimated parameters almost equal original parameters - random_state = np.random.RandomState(1234) - x = random_state.rand(100, 2) - assert_almost_equal(model0.predict(x), model_est.predict(x), 1) + with testing.raises(AssertionError): + LineModelND().predict_y(np.zeros(1)) + with testing.raises(ValueError): + LineModelND().predict_y(np.zeros(1), np.zeros(1)) -def test_line_model_residuals(): - model = LineModelND() - model.params = (np.array([0, 0]), np.array([0, 1])) - assert_equal(model.residuals(np.array([[0, 0]])), 0) - assert_equal(model.residuals(np.array([[0, 10]])), 0) - assert_equal(model.residuals(np.array([[10, 0]])), 10) - model.params = (np.array([-2, 0]), np.array([1, 1]) / np.sqrt(2)) - assert_equal(model.residuals(np.array([[0, 0]])), np.sqrt(2)) - assert_almost_equal(model.residuals(np.array([[-4, 0]])), np.sqrt(2)) + with testing.raises(ValueError): + LineModelND().estimate(np.empty((1, 3))) + with testing.raises(AssertionError): + LineModelND().residuals(np.empty((1, 3))) -def test_line_model_under_determined(): data = np.empty((1, 2)) - assert_raises(ValueError, LineModelND().estimate, data) - + with testing.raises(ValueError): + LineModelND().estimate(data) -def test_line_modelND_invalid_input(): - assert_raises(ValueError, LineModelND().estimate, np.empty((5, 1))) - -def test_line_modelND_predict(): +def test_line_model_nd_predict(): model = LineModelND() - model.params = (np.array([0, 0]), np.array([0.2, 0.98])) + model.params = (np.array([0, 0]), np.array([0.2, 0.8])) x = np.arange(-10, 10) y = model.predict_y(x) assert_almost_equal(x, model.predict_x(y)) -def test_line_modelND_estimate(): +def test_line_model_nd_estimate(): # generate original data without noise model0 = LineModelND() - model0.params = (np.array([0,0,0], dtype='float'), - np.array([1,1,1], dtype='float')/np.sqrt(3)) + model0.params = (np.array([0, 0, 0], dtype='float'), + np.array([1, 1, 1], dtype='float')/np.sqrt(3)) # we scale the unit vector with a factor 10 when generating points on the # line in order to compensate for the scale of the random noise data0 = (model0.params[0] + - 10 * np.arange(-100,100)[...,np.newaxis] * model0.params[1]) + 10 * np.arange(-100, 100)[..., np.newaxis] * model0.params[1]) # add gaussian noise to data random_state = np.random.RandomState(1234) @@ -83,6 +73,7 @@ # estimate parameters of noisy data model_est = LineModelND() model_est.estimate(data) + # assert_almost_equal(model_est.residuals(data0), np.zeros(len(data)), 1) # test whether estimated parameters are correct # we use the following geometric property: two aligned vectors have @@ -97,21 +88,27 @@ assert_almost_equal(np.linalg.norm(np.cross(model0.params[1], a)), 0, 1) -def test_line_modelND_residuals(): +def test_line_model_nd_residuals(): model = LineModelND() model.params = (np.array([0, 0, 0]), np.array([0, 0, 1])) assert_equal(abs(model.residuals(np.array([[0, 0, 0]]))), 0) assert_equal(abs(model.residuals(np.array([[0, 0, 1]]))), 0) assert_equal(abs(model.residuals(np.array([[10, 0, 0]]))), 10) + # test params argument in model.rediduals + data = np.array([[10, 0, 0]]) + params = (np.array([0, 0, 0]), np.array([2, 0, 0])) + assert_equal(abs(model.residuals(data, params=params)), 30) def test_line_modelND_under_determined(): data = np.empty((1, 3)) - assert_raises(ValueError, LineModelND().estimate, data) + with testing.raises(ValueError): + LineModelND().estimate(data) def test_circle_model_invalid_input(): - assert_raises(ValueError, CircleModel().estimate, np.empty((5, 3))) + with testing.raises(ValueError): + CircleModel().estimate(np.empty((5, 3))) def test_circle_model_predict(): @@ -140,7 +137,7 @@ model_est.estimate(data) # test whether estimated parameters almost equal original parameters - assert_almost_equal(model0.params, model_est.params, 1) + assert_almost_equal(model0.params, model_est.params, 0) def test_circle_model_residuals(): @@ -153,7 +150,8 @@ def test_ellipse_model_invalid_input(): - assert_raises(ValueError, EllipseModel().estimate, np.empty((5, 3))) + with testing.raises(ValueError): + EllipseModel().estimate(np.empty((5, 3))) def test_ellipse_model_predict(): @@ -256,7 +254,7 @@ random_state=1) # test whether estimated parameters equal original parameters - assert_equal(model0.params, model_est.params) + assert_almost_equal(model0.params, model_est.params) for outlier in outliers: assert outlier not in inliers @@ -286,8 +284,8 @@ def test_ransac_is_data_valid(): - - is_data_valid = lambda data: data.shape[0] > 2 + def is_data_valid(data): + return data.shape[0] > 2 model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf, is_data_valid=is_data_valid, random_state=1) assert_equal(model, None) @@ -295,7 +293,6 @@ def test_ransac_is_model_valid(): - def is_model_valid(model, data): return False model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf, @@ -337,13 +334,12 @@ def test_ransac_invalid_input(): - assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2, - residual_threshold=0, max_trials=-1) - assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2, - residual_threshold=0, stop_probability=-1) - assert_raises(ValueError, ransac, np.zeros((10, 2)), None, min_samples=2, - residual_threshold=0, stop_probability=1.01) - - -if __name__ == "__main__": - np.testing.run_module_suite() + with testing.raises(ValueError): + ransac(np.zeros((10, 2)), None, min_samples=2, + residual_threshold=0, max_trials=-1) + with testing.raises(ValueError): + ransac(np.zeros((10, 2)), None, min_samples=2, + residual_threshold=0, stop_probability=-1) + with testing.raises(ValueError): + ransac(np.zeros((10, 2)), None, min_samples=2, + residual_threshold=0, stop_probability=1.01) diff -Nru skimage-0.13.1/skimage/measure/tests/test_marching_cubes.py skimage-0.14.0/skimage/measure/tests/test_marching_cubes.py --- skimage-0.13.1/skimage/measure/tests/test_marching_cubes.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/tests/test_marching_cubes.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,40 +1,9 @@ -import sys import numpy as np -from numpy.testing import assert_raises - from skimage.draw import ellipsoid, ellipsoid_stats -from skimage.measure import (marching_cubes, - marching_cubes_classic, marching_cubes_lewiner, +from skimage.measure import (marching_cubes_classic, marching_cubes_lewiner, mesh_surface_area, correct_mesh_orientation) -from skimage.measure._marching_cubes_lewiner import _expected_output_args - -def func_that_knows_about_its_outputs(r): - # Must be defined in global scope to avoid syntax error on Python 2 *sigh* - nout = _expected_output_args() - print(nout) - r.append(nout) - return [nout] * int(nout) - - -def test_expected_output_args(): - - foo = func_that_knows_about_its_outputs - - res = [] - foo(res) - a = foo(res) - a, b = foo(res) - a, b, c = foo(res) - assert res == [0, 1, 2, 3] or res == [0, 0, 2, 3] - # ``a = foo()`` somehow yields 0 in test, which is ok for us; - # we only want to distinguish between > 2 args or not - - if sys.version_info >= (3, 3): - res = [] - exec('*a, b, c = foo(res)') - exec('a, b, c, *d = foo(res)') - exec('a, b, *c, d, e = foo(res)') - assert res == [2.1, 3.1, 4.1] +from skimage._shared import testing +from skimage._shared.testing import assert_array_equal def test_marching_cubes_isotropic(): @@ -55,7 +24,8 @@ def test_marching_cubes_anisotropic(): - spacing = (1., 10 / 6., 16 / 6.) + # test spacing as numpy array (and not just tuple) + spacing = np.array([1., 10 / 6., 16 / 6.]) ellipsoid_anisotropic = ellipsoid(6, 10, 16, spacing=spacing, levelset=True) _, surf = ellipsoid_stats(6, 10, 16) @@ -68,26 +38,38 @@ assert surf > surf_calc and surf_calc > surf * 0.985 # Lewiner - verts, faces = marching_cubes_lewiner(ellipsoid_anisotropic, 0., spacing=spacing)[:2] + verts, faces = marching_cubes_lewiner( + ellipsoid_anisotropic, 0., spacing=spacing)[:2] surf_calc = mesh_surface_area(verts, faces) # Test within 1.5% tolerance for anisotropic. Will always underestimate. assert surf > surf_calc and surf_calc > surf * 0.985 + # Test spacing together with allow_degenerate=False + marching_cubes_lewiner(ellipsoid_anisotropic, 0, spacing=spacing, + allow_degenerate=False) + def test_invalid_input(): # Classic - assert_raises(ValueError, marching_cubes_classic, np.zeros((2, 2, 1)), 0) - assert_raises(ValueError, marching_cubes_classic, np.zeros((2, 2, 1)), 1) - assert_raises(ValueError, marching_cubes_classic, np.ones((3, 3, 3)), 1, - spacing=(1, 2)) - assert_raises(ValueError, marching_cubes_classic, np.zeros((20, 20)), 0) + with testing.raises(ValueError): + marching_cubes_classic(np.zeros((2, 2, 1)), 0) + with testing.raises(ValueError): + marching_cubes_classic(np.zeros((2, 2, 1)), 1) + with testing.raises(ValueError): + marching_cubes_classic(np.ones((3, 3, 3)), 1, spacing=(1, 2)) + with testing.raises(ValueError): + marching_cubes_classic(np.zeros((20, 20)), 0) # Lewiner - assert_raises(ValueError, marching_cubes_lewiner, np.zeros((2, 2, 1)), 0) - assert_raises(ValueError, marching_cubes_lewiner, np.zeros((2, 2, 1)), 1) - assert_raises(ValueError, marching_cubes_lewiner, np.ones((3, 3, 3)), 1, - spacing=(1, 2)) - assert_raises(ValueError, marching_cubes_lewiner, np.zeros((20, 20)), 0) + with testing.raises(ValueError): + marching_cubes_lewiner(np.zeros((2, 2, 1)), 0) + with testing.raises(ValueError): + marching_cubes_lewiner(np.zeros((2, 2, 1)), 1) + with testing.raises(ValueError): + marching_cubes_lewiner(np.ones((3, 3, 3)), 1, + spacing=(1, 2)) + with testing.raises(ValueError): + marching_cubes_lewiner(np.zeros((20, 20)), 0) def test_correct_mesh_orientation(): @@ -118,13 +100,13 @@ gradient_direction='ascent') # Ensure ascent is opposite of descent for all faces - np.testing.assert_array_equal(corrected_faces1, corrected_faces2[:, ::-1]) + assert_array_equal(corrected_faces1, corrected_faces2[:, ::-1]) # Ensure correct faces have been reversed: 1, 4, and 5 idx = [1, 4, 5] expected = faces.copy() expected[idx] = expected[idx, ::-1] - np.testing.assert_array_equal(expected, corrected_faces1) + assert_array_equal(expected, corrected_faces1) def test_both_algs_same_result_ellipse(): @@ -133,8 +115,10 @@ sphere_small = ellipsoid(1, 1, 1, levelset=True) vertices1, faces1 = marching_cubes_classic(sphere_small, 0)[:2] - vertices2, faces2 = marching_cubes_lewiner(sphere_small, 0, allow_degenerate=False)[:2] - vertices3, faces3 = marching_cubes_lewiner(sphere_small, 0, allow_degenerate=False, use_classic=True)[:2] + vertices2, faces2 = marching_cubes_lewiner( + sphere_small, 0, allow_degenerate=False)[:2] + vertices3, faces3 = marching_cubes_lewiner( + sphere_small, 0, allow_degenerate=False, use_classic=True)[:2] # Order is different, best we can do is test equal shape and same vertices present assert _same_mesh(vertices1, faces1, vertices2, faces2) @@ -149,22 +133,23 @@ triangles1 = vertices1[np.array(faces1)] triangles2 = vertices2[np.array(faces2)] # Sort vertices within each triangle - triang1 = [np.concatenate(sorted(t, key=lambda x:tuple(x))) for t in triangles1] - triang2 = [np.concatenate(sorted(t, key=lambda x:tuple(x))) for t in triangles2] + triang1 = [np.concatenate(sorted(t, key=lambda x:tuple(x))) + for t in triangles1] + triang2 = [np.concatenate(sorted(t, key=lambda x:tuple(x))) + for t in triangles2] # Sort the resulting 9-element "tuples" triang1 = np.array(sorted([tuple(x) for x in triang1])) triang2 = np.array(sorted([tuple(x) for x in triang2])) - return triang1.shape == triang2.shape and np.allclose(triang1, triang2, 0, tol) + return (triang1.shape == triang2.shape and + np.allclose(triang1, triang2, 0, tol)) def test_both_algs_same_result_donut(): # Performing this test on data that does not have ambiguities - n = 48 a, b = 2.5/n, -1.25 - isovalue = 0.0 - - vol = np.empty((n,n,n), 'float32') + + vol = np.empty((n, n, n), 'float32') for iz in range(vol.shape[0]): for iy in range(vol.shape[1]): for ix in range(vol.shape[2]): @@ -188,7 +173,3 @@ assert not _same_mesh(vertices2, faces2, vertices3, faces3) # Would have been nice if old and new classic would have been the same # assert _same_mesh(vertices1, faces1, vertices3, faces3, 5) - - -if __name__ == '__main__': - np.testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/measure/tests/test_moments.py skimage-0.14.0/skimage/measure/tests/test_moments.py --- skimage-0.13.1/skimage/measure/tests/test_moments.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/tests/test_moments.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,8 +1,14 @@ -from numpy.testing import assert_equal, assert_almost_equal, assert_raises +from __future__ import division import numpy as np - -from skimage.measure import (moments, moments_central, moments_normalized, - moments_hu) +from skimage import draw +from skimage.measure import (moments, moments_central, moments_coords, + moments_coords_central, moments_normalized, + moments_hu, centroid) + +from skimage._shared import testing +from skimage._shared.testing import (assert_equal, assert_almost_equal, + assert_allclose) +from skimage._shared._warnings import expected_warnings def test_moments(): @@ -13,8 +19,8 @@ image[15, 14] = 0.5 m = moments(image) assert_equal(m[0, 0], 3) - assert_almost_equal(m[0, 1] / m[0, 0], 14.5) assert_almost_equal(m[1, 0] / m[0, 0], 14.5) + assert_almost_equal(m[0, 1] / m[0, 0], 14.5) def test_moments_central(): @@ -23,7 +29,11 @@ image[15, 15] = 1 image[14, 15] = 0.5 image[15, 14] = 0.5 - mu = moments_central(image, 14.5, 14.5) + mu = moments_central(image, (14.5, 14.5)) + + # check for proper centroid computation + mu_calc_centroid = moments_central(image) + assert_equal(mu, mu_calc_centroid) # shift image by dx=2, dy=2 image2 = np.zeros((20, 20), dtype=np.double) @@ -31,49 +41,113 @@ image2[17, 17] = 1 image2[16, 17] = 0.5 image2[17, 16] = 0.5 - mu2 = moments_central(image2, 14.5 + 2, 14.5 + 2) + mu2 = moments_central(image2, (14.5 + 2, 14.5 + 2)) # central moments must be translation invariant assert_equal(mu, mu2) +def test_moments_central_deprecated(): + image = np.zeros((20, 20), dtype=np.double) + image[5:-5, 5:-5] = np.random.random((10, 10)) + center = moments(image, 1)[[1, 0], [0, 1]] + cr, cc = center + with expected_warnings(['deprecated 2D-only']): + mu0 = moments_central(image, cr, cc) + mu1 = moments_central(image, cr=cr, cc=cc) + mu_ref = moments_central(image, center) + assert_almost_equal(mu0.T, mu_ref) + assert_almost_equal(mu1.T, mu_ref) + + +def test_moments_coords(): + image = np.zeros((20, 20), dtype=np.double) + image[13:17, 13:17] = 1 + mu_image = moments(image) + + coords = np.array([[r, c] for r in range(13, 17) + for c in range(13, 17)], dtype=np.double) + mu_coords = moments_coords(coords) + assert_almost_equal(mu_coords, mu_image) + + +def test_moments_central_coords(): + image = np.zeros((20, 20), dtype=np.double) + image[13:17, 13:17] = 1 + mu_image = moments_central(image, (14.5, 14.5)) + + coords = np.array([[r, c] for r in range(13, 17) + for c in range(13, 17)], dtype=np.double) + mu_coords = moments_coords_central(coords, (14.5, 14.5)) + assert_almost_equal(mu_coords, mu_image) + + # ensure that center is being calculated normally + mu_coords_calc_centroid = moments_coords_central(coords) + assert_almost_equal(mu_coords_calc_centroid, mu_coords) + + # shift image by dx=3 dy=3 + image = np.zeros((20, 20), dtype=np.double) + image[16:20, 16:20] = 1 + mu_image = moments_central(image, (14.5, 14.5)) + + coords = np.array([[r, c] for r in range(16, 20) + for c in range(16, 20)], dtype=np.double) + mu_coords = moments_coords_central(coords, (14.5, 14.5)) + assert_almost_equal(mu_coords, mu_image) + + def test_moments_normalized(): image = np.zeros((20, 20), dtype=np.double) image[13:17, 13:17] = 1 - mu = moments_central(image, 14.5, 14.5) + mu = moments_central(image, (14.5, 14.5)) nu = moments_normalized(mu) # shift image by dx=-3, dy=-3 and scale by 0.5 image2 = np.zeros((20, 20), dtype=np.double) image2[11:13, 11:13] = 1 - mu2 = moments_central(image2, 11.5, 11.5) + mu2 = moments_central(image2, (11.5, 11.5)) nu2 = moments_normalized(mu2) # central moments must be translation and scale invariant assert_almost_equal(nu, nu2, decimal=1) +def test_moments_normalized_3d(): + image = draw.ellipsoid(1, 1, 10) + mu_image = moments_central(image) + nu = moments_normalized(mu_image) + assert nu[0, 0, 2] > nu[0, 2, 0] + assert_almost_equal(nu[0, 2, 0], nu[2, 0, 0]) + + coords = np.where(image) + mu_coords = moments_coords_central(coords) + assert_almost_equal(mu_coords, mu_image) + + def test_moments_normalized_invalid(): - assert_raises(TypeError, moments_normalized, np.zeros((3, 3, 3))) - assert_raises(TypeError, moments_normalized, np.zeros((3,))) - assert_raises(TypeError, moments_normalized, np.zeros((3, 3)), 3) - assert_raises(TypeError, moments_normalized, np.zeros((3, 3)), 4) + with testing.raises(ValueError): + moments_normalized(np.zeros((3, 3)), 3) + with testing.raises(ValueError): + moments_normalized(np.zeros((3, 3)), 4) def test_moments_hu(): image = np.zeros((20, 20), dtype=np.double) image[13:15, 13:17] = 1 - mu = moments_central(image, 13.5, 14.5) + mu = moments_central(image, (13.5, 14.5)) nu = moments_normalized(mu) hu = moments_hu(nu) # shift image by dx=2, dy=3, scale by 0.5 and rotate by 90deg image2 = np.zeros((20, 20), dtype=np.double) image2[11, 11:13] = 1 image2 = image2.T - mu2 = moments_central(image2, 11.5, 11) + mu2 = moments_central(image2, (11.5, 11)) nu2 = moments_normalized(mu2) hu2 = moments_hu(nu2) # central moments must be translation and scale invariant assert_almost_equal(hu, hu2, decimal=1) -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() +def test_centroid(): + image = np.zeros((20, 20), dtype=np.double) + image[14, 14:16] = 1 + image[15, 14:16] = 1/3 + image_centroid = centroid(image) + assert_allclose(image_centroid, (14.25, 14.5)) diff -Nru skimage-0.13.1/skimage/measure/tests/test_pnpoly.py skimage-0.14.0/skimage/measure/tests/test_pnpoly.py --- skimage-0.13.1/skimage/measure/tests/test_pnpoly.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/tests/test_pnpoly.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,10 +1,10 @@ import numpy as np -from numpy.testing import assert_array_equal - from skimage.measure import points_in_poly, grid_points_in_poly +from skimage._shared.testing import assert_array_equal + -class test_npnpoly(): +class TestNpnpoly(): def test_square(self): v = np.array([[0, 0], [0, 1], @@ -32,8 +32,4 @@ expected = np.tril(np.ones((5, 5), dtype=bool)) - assert_array_equal(grid_points_in_poly((5, 5), v), - expected) - -if __name__ == "__main__": - np.testing.run_module_suite() + assert_array_equal(grid_points_in_poly((5, 5), v), expected) diff -Nru skimage-0.13.1/skimage/measure/tests/test_polygon.py skimage-0.14.0/skimage/measure/tests/test_polygon.py --- skimage-0.13.1/skimage/measure/tests/test_polygon.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/tests/test_polygon.py 2018-05-29 01:27:44.000000000 +0000 @@ -2,6 +2,10 @@ from skimage.measure import approximate_polygon, subdivide_polygon from skimage.measure._polygon import _SUBDIVISION_MASKS +from skimage._shared import testing +from skimage._shared.testing import assert_array_equal, assert_equal + + square = np.array([ [0, 0], [0, 1], [0, 2], [0, 3], [1, 3], [2, 3], [3, 3], @@ -12,18 +16,18 @@ def test_approximate_polygon(): out = approximate_polygon(square, 0.1) - np.testing.assert_array_equal(out, square[(0, 3, 6, 9, 12), :]) + assert_array_equal(out, square[(0, 3, 6, 9, 12), :]) out = approximate_polygon(square, 2.2) - np.testing.assert_array_equal(out, square[(0, 6, 12), :]) + assert_array_equal(out, square[(0, 6, 12), :]) out = approximate_polygon(square[(0, 1, 3, 4, 5, 6, 7, 9, 11, 12), :], 0.1) - np.testing.assert_array_equal(out, square[(0, 3, 6, 9, 12), :]) + assert_array_equal(out, square[(0, 3, 6, 9, 12), :]) out = approximate_polygon(square, -1) - np.testing.assert_array_equal(out, square) + assert_array_equal(out, square) out = approximate_polygon(square, 0) - np.testing.assert_array_equal(out, square) + assert_array_equal(out, square) def test_subdivide_polygon(): @@ -38,25 +42,23 @@ mask_len = len(_SUBDIVISION_MASKS[degree][0]) # test circular new_square1 = subdivide_polygon(square1, degree) - np.testing.assert_array_equal(new_square1[-1], new_square1[0]) - np.testing.assert_equal(new_square1.shape[0], - 2 * square1.shape[0] - 1) + assert_array_equal(new_square1[-1], new_square1[0]) + assert_equal(new_square1.shape[0], + 2 * square1.shape[0] - 1) # test non-circular new_square2 = subdivide_polygon(square2, degree) - np.testing.assert_equal(new_square2.shape[0], - 2 * (square2.shape[0] - mask_len + 1)) + assert_equal(new_square2.shape[0], + 2 * (square2.shape[0] - mask_len + 1)) # test non-circular, preserve_ends new_square3 = subdivide_polygon(square3, degree, True) - np.testing.assert_equal(new_square3[0], square3[0]) - np.testing.assert_equal(new_square3[-1], square3[-1]) + assert_equal(new_square3[0], square3[0]) + assert_equal(new_square3[-1], square3[-1]) - np.testing.assert_equal(new_square3.shape[0], - 2 * (square3.shape[0] - mask_len + 2)) + assert_equal(new_square3.shape[0], + 2 * (square3.shape[0] - mask_len + 2)) # not supported B-Spline degree - np.testing.assert_raises(ValueError, subdivide_polygon, square, 0) - np.testing.assert_raises(ValueError, subdivide_polygon, square, 8) - - -if __name__ == "__main__": - np.testing.run_module_suite() + with testing.raises(ValueError): + subdivide_polygon(square, 0) + with testing.raises(ValueError): + subdivide_polygon(square, 8) diff -Nru skimage-0.13.1/skimage/measure/tests/test_profile.py skimage-0.14.0/skimage/measure/tests/test_profile.py --- skimage-0.13.1/skimage/measure/tests/test_profile.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/tests/test_profile.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,10 +1,12 @@ -from numpy.testing import assert_equal, assert_almost_equal import numpy as np - from skimage.measure import profile_line +from skimage._shared.testing import assert_equal, assert_almost_equal + + image = np.arange(100).reshape((10, 10)).astype(np.float) + def test_horizontal_rightward(): prof = profile_line(image, (0, 2), (0, 8), order=0) expected_prof = np.arange(2, 9) @@ -75,6 +77,7 @@ expected_prof = np.linspace(11, 79, 11) assert_almost_equal(prof, expected_prof) + pyth_image = np.zeros((6, 7), np.float) line = ((1, 2, 2, 3, 3, 4), (1, 2, 3, 3, 4, 5)) below = ((2, 2, 3, 4, 4, 5), (0, 1, 2, 3, 4, 4)) @@ -102,9 +105,3 @@ linewidth=3, order=0) expected_prof = np.ones(6) assert_almost_equal(prof, expected_prof) - - -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() - diff -Nru skimage-0.13.1/skimage/measure/tests/test_regionprops.py skimage-0.14.0/skimage/measure/tests/test_regionprops.py --- skimage-0.13.1/skimage/measure/tests/test_regionprops.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/tests/test_regionprops.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,10 +1,15 @@ -from numpy.testing import assert_array_equal, assert_almost_equal, \ - assert_array_almost_equal, assert_raises, assert_equal -import numpy as np import math +import functools + +import numpy as np +from skimage.measure._regionprops import (regionprops as regionprops_default, + PROPS, perimeter, _parse_docs) +from skimage._shared import testing +from skimage._shared.testing import (assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_equal) -from skimage.measure._regionprops import (regionprops, PROPS, perimeter, - _parse_docs) + +regionprops = functools.partial(regionprops_default, coordinates='rc') SAMPLE = np.array( @@ -27,6 +32,7 @@ SAMPLE_3D[3, 2, 2] = 1 INTENSITY_SAMPLE_3D = SAMPLE_3D.copy() + def test_all_props(): region = regionprops(SAMPLE, INTENSITY_SAMPLE)[0] for prop in PROPS: @@ -41,13 +47,14 @@ except NotImplementedError: pass + def test_dtype(): regionprops(np.zeros((10, 10), dtype=np.int)) regionprops(np.zeros((10, 10), dtype=np.uint)) - assert_raises((TypeError), regionprops, - np.zeros((10, 10), dtype=np.float)) - assert_raises((TypeError), regionprops, - np.zeros((10, 10), dtype=np.double)) + with testing.raises(TypeError): + regionprops(np.zeros((10, 10), dtype=np.float)) + with testing.raises(TypeError): + regionprops(np.zeros((10, 10), dtype=np.double)) def test_ndim(): @@ -55,7 +62,8 @@ regionprops(np.zeros((10, 10, 1), dtype=np.int)) regionprops(np.zeros((10, 10, 1, 1), dtype=np.int)) regionprops(np.zeros((10, 10, 10), dtype=np.int)) - assert_raises(TypeError, regionprops, np.zeros((10, 10, 10, 2), dtype=np.int)) + with testing.raises(TypeError): + regionprops(np.zeros((10, 10, 10, 2), dtype=np.int)) def test_area(): @@ -77,21 +85,24 @@ bbox = regionprops(SAMPLE_3D)[0].bbox assert_array_almost_equal(bbox, (1, 1, 1, 4, 3, 3)) + def test_bbox_area(): - bbox_area = regionprops(SAMPLE)[0].bbox_area - assert_array_almost_equal(bbox_area, SAMPLE.shape[0] * SAMPLE.shape[1]) + padded = np.pad(SAMPLE, 5, mode='constant') + bbox_area = regionprops(padded)[0].bbox_area + assert_array_almost_equal(bbox_area, SAMPLE.size) + def test_moments_central(): mu = regionprops(SAMPLE)[0].moments_central # determined with OpenCV - assert_almost_equal(mu[0,2], 436.00000000000045) + assert_almost_equal(mu[2, 0], 436.00000000000045) # different from OpenCV results, bug in OpenCV - assert_almost_equal(mu[0,3], -737.333333333333) - assert_almost_equal(mu[1,1], -87.33333333333303) - assert_almost_equal(mu[1,2], -127.5555555555593) - assert_almost_equal(mu[2,0], 1259.7777777777774) - assert_almost_equal(mu[2,1], 2000.296296296291) - assert_almost_equal(mu[3,0], -760.0246913580195) + assert_almost_equal(mu[3, 0], -737.333333333333) + assert_almost_equal(mu[1, 1], -87.33333333333303) + assert_almost_equal(mu[2, 1], -127.5555555555593) + assert_almost_equal(mu[0, 2], 1259.7777777777774) + assert_almost_equal(mu[1, 2], 2000.296296296291) + assert_almost_equal(mu[0, 3], -760.0246913580195) def test_centroid(): @@ -143,6 +154,7 @@ prop_coords = regionprops(sample)[0].coords assert_array_equal(prop_coords, coords) + def test_eccentricity(): eps = regionprops(SAMPLE)[0].eccentricity assert_almost_equal(eps, 0.814629313427) @@ -182,8 +194,8 @@ 2.35390060e-02, 1.23151193e-03, 1.38882330e-06, - -2.72586158e-05, - 6.48350653e-06 + -2.72586158e-05, + -6.48350653e-06 ]) # bug in OpenCV caused in Central Moments calculation? assert_array_almost_equal(hu, ref) @@ -253,37 +265,39 @@ def test_moments(): - m = regionprops(SAMPLE)[0].moments + m = regionprops(SAMPLE)[0].moments.T # test was written with x/y coords # determined with OpenCV - assert_almost_equal(m[0,0], 72.0) - assert_almost_equal(m[0,1], 408.0) - assert_almost_equal(m[0,2], 2748.0) - assert_almost_equal(m[0,3], 19776.0) - assert_almost_equal(m[1,0], 680.0) - assert_almost_equal(m[1,1], 3766.0) - assert_almost_equal(m[1,2], 24836.0) - assert_almost_equal(m[2,0], 7682.0) - assert_almost_equal(m[2,1], 43882.0) - assert_almost_equal(m[3,0], 95588.0) + assert_almost_equal(m[0, 0], 72.0) + assert_almost_equal(m[0, 1], 408.0) + assert_almost_equal(m[0, 2], 2748.0) + assert_almost_equal(m[0, 3], 19776.0) + assert_almost_equal(m[1, 0], 680.0) + assert_almost_equal(m[1, 1], 3766.0) + assert_almost_equal(m[1, 2], 24836.0) + assert_almost_equal(m[2, 0], 7682.0) + assert_almost_equal(m[2, 1], 43882.0) + assert_almost_equal(m[3, 0], 95588.0) def test_moments_normalized(): - nu = regionprops(SAMPLE)[0].moments_normalized + # test was written with x/y coords + nu = regionprops(SAMPLE)[0].moments_normalized.T + # determined with OpenCV - assert_almost_equal(nu[0,2], 0.08410493827160502) - assert_almost_equal(nu[1,1], -0.016846707818929982) - assert_almost_equal(nu[1,2], -0.002899800614433943) - assert_almost_equal(nu[2,0], 0.24301268861454037) - assert_almost_equal(nu[2,1], 0.045473992910668816) - assert_almost_equal(nu[3,0], -0.017278118992041805) + assert_almost_equal(nu[0, 2], 0.08410493827160502) + assert_almost_equal(nu[1, 1], -0.016846707818929982) + assert_almost_equal(nu[1, 2], -0.002899800614433943) + assert_almost_equal(nu[2, 0], 0.24301268861454037) + assert_almost_equal(nu[2, 1], 0.045473992910668816) + assert_almost_equal(nu[3, 0], -0.017278118992041805) def test_orientation(): - orientation = regionprops(SAMPLE)[0].orientation + orientation = regionprops(SAMPLE.T)[0].orientation # determined with MATLAB assert_almost_equal(orientation, 0.10446844651921) # test correct quadrant determination - orientation2 = regionprops(SAMPLE.T)[0].orientation + orientation2 = regionprops(SAMPLE)[0].orientation assert_almost_equal(orientation2, math.pi / 2 - orientation) # test diagonal regions diag = np.eye(10, dtype=int) @@ -313,7 +327,7 @@ def test_weighted_moments_central(): wmu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE - )[0].weighted_moments_central + )[0].weighted_moments_central.T # test used x/y coords ref = np.array( [[ 7.4000000000e+01, -2.1316282073e-13, 4.7837837838e+02, -7.5943608473e+02], @@ -344,14 +358,14 @@ 1.2565683360e-03, 8.3014209421e-07, -3.5073773473e-05, - 6.7936409056e-06 + -6.7936409056e-06 ]) assert_array_almost_equal(whu, ref) def test_weighted_moments(): wm = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE - )[0].weighted_moments + )[0].weighted_moments.T # test used x/y coords ref = np.array( [[ 7.4000000000e+01, 4.1000000000e+02, 2.7500000000e+03, 1.9778000000e+04], @@ -367,7 +381,7 @@ def test_weighted_moments_normalized(): wnu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE - )[0].weighted_moments_normalized + )[0].weighted_moments_normalized.T # test used x/y coord ref = np.array( [[ np.nan, np.nan, 0.0873590903, -0.0161217406], [ np.nan, -0.0160405109, -0.0031421072, -0.0031376984], @@ -397,12 +411,14 @@ def get_intensity_image(): ps[0].intensity_image - assert_raises(AttributeError, get_intensity_image) + with testing.raises(AttributeError): + get_intensity_image() def test_invalid_size(): wrong_intensity_sample = np.array([[1], [1]]) - assert_raises(ValueError, regionprops, SAMPLE, wrong_intensity_sample) + with testing.raises(ValueError): + regionprops(SAMPLE, wrong_intensity_sample) def test_equals(): @@ -462,6 +478,6 @@ assert len(ds.split('\n')) > 3 -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() +def test_incorrect_coordinate_convention(): + with testing.raises(ValueError): + regionprops_default(SAMPLE, coordinates='xyz')[0] diff -Nru skimage-0.13.1/skimage/measure/tests/test_simple_metrics.py skimage-0.14.0/skimage/measure/tests/test_simple_metrics.py --- skimage-0.13.1/skimage/measure/tests/test_simple_metrics.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/tests/test_simple_metrics.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,11 +1,13 @@ import numpy as np -from numpy.testing import (run_module_suite, assert_equal, assert_raises, - assert_almost_equal) -from skimage.measure import compare_psnr, compare_nrmse, compare_mse import skimage.data +from skimage.measure import compare_psnr, compare_nrmse, compare_mse + +from skimage._shared import testing +from skimage._shared.testing import assert_equal, assert_almost_equal from skimage._shared._warnings import expected_warnings + np.random.seed(5) cam = skimage.data.camera() sigma = 20.0 @@ -23,9 +25,20 @@ def test_PSNR_float(): p_uint8 = compare_psnr(cam, cam_noisy) - p_float64 = compare_psnr(cam/255., cam_noisy/255., data_range=1) + p_float64 = compare_psnr(cam / 255., cam_noisy / 255., + data_range=1) assert_almost_equal(p_uint8, p_float64, decimal=5) + # mixed precision inputs + p_mixed = compare_psnr(cam / 255., np.float32(cam_noisy / 255.), + data_range=1) + assert_almost_equal(p_mixed, p_float64, decimal=5) + + # mismatched dtype results in a warning if data_range is unspecified + with expected_warnings(['Inputs have mismatched dtype']): + p_mixed = compare_psnr(cam / 255., np.float32(cam_noisy / 255.)) + assert_almost_equal(p_mixed, p_float64, decimal=5) + def test_PSNR_dynamic_range_and_data_range(): # Tests deprecation of "dynamic_range" in favor of "data_range" @@ -39,8 +52,9 @@ def test_PSNR_errors(): - assert_raises(ValueError, compare_psnr, cam, cam.astype(np.float32)) - assert_raises(ValueError, compare_psnr, cam, cam[:-1, :]) + # shape mismatch + with testing.raises(ValueError): + compare_psnr(cam, cam[:-1, :]) def test_NRMSE(): @@ -50,6 +64,10 @@ assert_equal(compare_nrmse(y, x, 'Euclidean'), 1/np.sqrt(3)) assert_equal(compare_nrmse(y, x, 'min-max'), 1/(y.max()-y.min())) + # mixed precision inputs are allowed + assert_almost_equal(compare_nrmse(y, np.float32(x), 'min-max'), + 1 / (y.max() - y.min())) + def test_NRMSE_no_int_overflow(): camf = cam.astype(np.float32) @@ -62,12 +80,9 @@ def test_NRMSE_errors(): x = np.ones(4) - assert_raises(ValueError, compare_nrmse, - x.astype(np.uint8), x.astype(np.float32)) - assert_raises(ValueError, compare_nrmse, x[:-1], x) + # shape mismatch + with testing.raises(ValueError): + compare_nrmse(x[:-1], x) # invalid normalization name - assert_raises(ValueError, compare_nrmse, x, x, 'foo') - - -if __name__ == "__main__": - run_module_suite() + with testing.raises(ValueError): + compare_nrmse(x, x, 'foo') diff -Nru skimage-0.13.1/skimage/measure/tests/test_structural_similarity.py skimage-0.14.0/skimage/measure/tests/test_structural_similarity.py --- skimage-0.13.1/skimage/measure/tests/test_structural_similarity.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/measure/tests/test_structural_similarity.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,17 +1,16 @@ import os import numpy as np -import scipy.io -from numpy.testing import (assert_equal, assert_raises, assert_almost_equal, - assert_array_almost_equal) +from skimage import data, data_dir from skimage.measure import compare_ssim as ssim -import skimage.data -from skimage.io import imread -from skimage import data_dir + +from skimage._shared import testing from skimage._shared._warnings import expected_warnings +from skimage._shared.testing import (assert_equal, assert_almost_equal, + assert_array_almost_equal) np.random.seed(5) -cam = skimage.data.camera() +cam = data.camera() sigma = 20.0 cam_noisy = np.clip(cam + sigma * np.random.randn(*cam.shape), 0, 255) cam_noisy = cam_noisy.astype(cam.dtype) @@ -19,14 +18,6 @@ np.random.seed(1234) -# This test to be removed in 0.14, along with the structural_similarity alias -# for compare_ssim -def test_old_name_deprecated(): - from skimage.measure import structural_similarity - with expected_warnings('deprecated'): - ssim_result = structural_similarity(cam, cam_noisy, win_size=31) - - def test_ssim_patch_range(): N = 51 X = (np.random.rand(N, N) * 255).astype(np.uint8) @@ -48,7 +39,7 @@ assert(S1 < 0.3) S2 = ssim(X, Y, win_size=11, gaussian_weights=True) - assert(S1 < 0.3) + assert(S2 < 0.3) mssim0, S3 = ssim(X, Y, full=True) assert_equal(S3.shape, X.shape) @@ -77,24 +68,6 @@ assert np.all(grad < 0.05) -# NOTE: This test is known to randomly fail on some systems (Mac OS X 10.6) -def test_ssim_dynamic_range_and_data_range(): - # Tests deprecation of "dynamic_range" in favor of "data_range" - N = 30 - X = np.random.rand(N, N) * 255 - Y = np.random.rand(N, N) * 255 - - with expected_warnings( - '`dynamic_range` has been deprecated in favor of ' - '`data_range`. The `dynamic_range` keyword argument ' - 'will be removed in v0.14'): - out2 = ssim(X, Y, dynamic_range=255) - - out1 = ssim(X, Y, data_range=255) - - assert_equal(out1, out2) - - def test_ssim_dtype(): N = 30 X = np.random.rand(N, N) @@ -138,7 +111,8 @@ assert_equal(S3.shape, Xc.shape) # fail if win_size exceeds any non-channel dimension - assert_raises(ValueError, ssim, Xc, Yc, win_size=7, multichannel=False) + with testing.raises(ValueError): + ssim(Xc, Yc, win_size=7, multichannel=False) def test_ssim_nD(): @@ -155,7 +129,7 @@ def test_ssim_multichannel_chelsea(): # color image example - Xc = skimage.data.chelsea() + Xc = data.chelsea() sigma = 15.0 Yc = np.clip(Xc + sigma * np.random.randn(*Xc.shape), 0, 255) Yc = Yc.astype(Xc.dtype) @@ -219,24 +193,30 @@ assert_almost_equal(mssim, mssim_skimage_0pt11) -def test_invalid_input(): - X = np.zeros((3, 3), dtype=np.double) - Y = np.zeros((3, 3), dtype=np.int) - assert_raises(ValueError, ssim, X, Y) - - Y = np.zeros((4, 4), dtype=np.double) - assert_raises(ValueError, ssim, X, Y) - - assert_raises(ValueError, ssim, X, X, win_size=8) - - # do not allow both image content weighting and gradient calculation - assert_raises(ValueError, ssim, X, X, image_content_weighting=True, - gradient=True) - # some kwarg inputs must be non-negative - assert_raises(ValueError, ssim, X, X, K1=-0.1) - assert_raises(ValueError, ssim, X, X, K2=-0.1) - assert_raises(ValueError, ssim, X, X, sigma=-1.0) +def test_mssim_mixed_dtype(): + mssim = ssim(cam, cam_noisy) + with expected_warnings(['Inputs have mismatched dtype']): + mssim_mixed = ssim(cam, cam_noisy.astype(np.float32)) + assert_almost_equal(mssim, mssim_mixed) + + # no warning when user supplies data_range + mssim_mixed = ssim(cam, cam_noisy.astype(np.float32), data_range=255) + assert_almost_equal(mssim, mssim_mixed) -if __name__ == "__main__": - np.testing.run_module_suite() +def test_invalid_input(): + # size mismatch + X = np.zeros((9, 9), dtype=np.double) + Y = np.zeros((8, 8), dtype=np.double) + with testing.raises(ValueError): + ssim(X, Y) + # win_size exceeds image extent + with testing.raises(ValueError): + ssim(X, X, win_size=X.shape[0] + 1) + # some kwarg inputs must be non-negative + with testing.raises(ValueError): + ssim(X, X, K1=-0.1) + with testing.raises(ValueError): + ssim(X, X, K2=-0.1) + with testing.raises(ValueError): + ssim(X, X, sigma=-1.0) diff -Nru skimage-0.13.1/skimage/morphology/binary.py skimage-0.14.0/skimage/morphology/binary.py --- skimage-0.13.1/skimage/morphology/binary.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/binary.py 2018-05-29 01:27:44.000000000 +0000 @@ -39,7 +39,7 @@ """ if out is None: out = np.empty(image.shape, dtype=np.bool) - ndi.binary_erosion(image, structure=selem, output=out) + ndi.binary_erosion(image, structure=selem, output=out, border_value=True) return out diff -Nru skimage-0.13.1/skimage/morphology/convex_hull.py skimage-0.14.0/skimage/morphology/convex_hull.py --- skimage-0.13.1/skimage/morphology/convex_hull.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/convex_hull.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,19 +1,24 @@ """Convex Hull.""" +from itertools import product import numpy as np +from scipy.spatial import ConvexHull from ..measure.pnpoly import grid_points_in_poly from ._convex_hull import possible_hull from ..measure._label import label from ..util import unique_rows +from .._shared.utils import warn __all__ = ['convex_hull_image', 'convex_hull_object'] -try: - from scipy.spatial import Delaunay -except ImportError: - Delaunay = None +def _offsets_diamond(ndim): + offsets = np.zeros((2 * ndim, ndim)) + for vertex, (axis, offset) in enumerate(product(range(ndim), (-0.5, 0.5))): + offsets[vertex, axis] = offset + return offsets -def convex_hull_image(image): + +def convex_hull_image(image, offset_coordinates=True, tolerance=1e-10): """Compute the convex hull image of a binary image. The convex hull is the set of pixels included in the smallest convex @@ -21,8 +26,16 @@ Parameters ---------- - image : (M, N) array + image : array Binary input image. This array is cast to bool before processing. + offset_coordinates : bool, optional + If ``True``, a pixel at coordinate, e.g., (4, 7) will be represented + by coordinates (3.5, 7), (4.5, 7), (4, 6.5), and (4, 7.5). This adds + some "extent" to a pixel when computing the hull. + tolerance : float, optional + Tolerance when determining whether a point is inside the hull. Due + to numerical floating point errors, a tolerance of 0 can result in + some points erroneously being classified as being outside the hull. Returns ------- @@ -34,48 +47,48 @@ .. [1] http://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/ """ - if image.ndim > 2: - raise ValueError("Input must be a 2D image") - - if Delaunay is None: - raise ImportError("Could not import scipy.spatial.Delaunay, " - "only available in scipy >= 0.9.") - - # Here we do an optimisation by choosing only pixels that are + ndim = image.ndim + if np.count_nonzero(image) == 0: + warn("Input image is entirely zero, no valid convex hull. " + "Returning empty image", UserWarning) + return np.zeros(image.shape, dtype=np.bool_) + # In 2D, we do an optimisation by choosing only pixels that are # the starting or ending pixel of a row or column. This vastly # limits the number of coordinates to examine for the virtual hull. - coords = possible_hull(image.astype(np.uint8)) - N = len(coords) + if ndim == 2: + coords = possible_hull(image.astype(np.uint8)) + else: + coords = np.transpose(np.nonzero(image)) + if offset_coordinates: + # when offsetting, we multiply number of vertices by 2 * ndim. + # therefore, we reduce the number of coordinates by using a + # convex hull on the original set, before offsetting. + hull0 = ConvexHull(coords) + coords = hull0.points[hull0.vertices] # Add a vertex for the middle of each pixel edge - coords_corners = np.empty((N * 4, 2)) - for i, (x_offset, y_offset) in enumerate(zip((0, 0, -0.5, 0.5), - (-0.5, 0.5, 0, 0))): - coords_corners[i * N:(i + 1) * N] = coords + [x_offset, y_offset] + if offset_coordinates: + offsets = _offsets_diamond(image.ndim) + coords = (coords[:, np.newaxis, :] + offsets).reshape(-1, ndim) # repeated coordinates can *sometimes* cause problems in - # scipy.spatial.Delaunay, so we remove them. - coords = unique_rows(coords_corners) - - # Subtract offset - offset = coords.mean(axis=0) - coords -= offset + # scipy.spatial.ConvexHull, so we remove them. + coords = unique_rows(coords) # Find the convex hull - chull = Delaunay(coords).convex_hull - v = coords[np.unique(chull)] + hull = ConvexHull(coords) + vertices = hull.points[hull.vertices] - # Sort vertices clock-wise - v_centred = v - v.mean(axis=0) - angles = np.arctan2(v_centred[:, 0], v_centred[:, 1]) - v = v[np.argsort(angles)] - - # Add back offset - v += offset - - # For each pixel coordinate, check whether that pixel - # lies inside the convex hull - mask = grid_points_in_poly(image.shape[:2], v) + # If 2D, use fast Cython function to locate convex hull pixels + if ndim == 2: + mask = grid_points_in_poly(image.shape, vertices) + else: + gridcoords = np.reshape(np.mgrid[tuple(map(slice, image.shape))], + (ndim, -1)) + # A point is in the hull if it satisfies all of the hull's inequalities + coords_in_hull = np.all(hull.equations[:, :ndim].dot(gridcoords) + + hull.equations[:, ndim:] < tolerance, axis=0) + mask = np.reshape(coords_in_hull, image.shape) return mask diff -Nru skimage-0.13.1/skimage/morphology/extrema.py skimage-0.14.0/skimage/morphology/extrema.py --- skimage-0.13.1/skimage/morphology/extrema.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/extrema.py 2018-05-29 01:27:44.000000000 +0000 @@ -17,35 +17,35 @@ from ..util import dtype_limits -def _add_constant_clip(img, const_value): +def _add_constant_clip(image, const_value): """Add constant to the image while handling overflow issues gracefully. """ - min_dtype, max_dtype = dtype_limits(img, clip_negative=False) + min_dtype, max_dtype = dtype_limits(image, clip_negative=False) if const_value > (max_dtype - min_dtype): raise ValueError("The added constant is not compatible" "with the image data type.") - result = img + const_value - result[img > max_dtype-const_value] = max_dtype + result = image + const_value + result[image > max_dtype-const_value] = max_dtype return(result) -def _subtract_constant_clip(img, const_value): +def _subtract_constant_clip(image, const_value): """Subtract constant from image while handling underflow issues. """ - min_dtype, max_dtype = dtype_limits(img, clip_negative=False) + min_dtype, max_dtype = dtype_limits(image, clip_negative=False) if const_value > (max_dtype-min_dtype): raise ValueError("The subtracted constant is not compatible" "with the image data type.") - result = img - const_value - result[img < (const_value + min_dtype)] = min_dtype + result = image - const_value + result[image < (const_value + min_dtype)] = min_dtype return(result) -def h_maxima(img, h, selem=None): +def h_maxima(image, h, selem=None): """Determine all maxima of the image with height >= h. The local maxima are defined as connected sets of pixels with equal @@ -60,7 +60,7 @@ Parameters ---------- - img : ndarray + image : ndarray The input image for which the maxima are to be calculated. h : unsigned integer The minimal height of all extracted maxima. @@ -108,25 +108,25 @@ The resulting image will contain 4 local maxima. """ - if np.issubdtype(img.dtype, 'half'): - resolution = 2 * np.finfo(img.dtype).resolution + if np.issubdtype(image.dtype, np.floating): + resolution = 2 * np.finfo(image.dtype).resolution if h < resolution: h = resolution h_corrected = h - resolution / 2.0 - shifted_img = img - h + shifted_img = image - h else: - shifted_img = _subtract_constant_clip(img, h) + shifted_img = _subtract_constant_clip(image, h) h_corrected = h - rec_img = greyreconstruct.reconstruction(shifted_img, img, + rec_img = greyreconstruct.reconstruction(shifted_img, image, method='dilation', selem=selem) - residue_img = img - rec_img - h_max = np.zeros(img.shape, dtype=np.uint8) + residue_img = image - rec_img + h_max = np.zeros(image.shape, dtype=np.uint8) h_max[residue_img >= h_corrected] = 1 return h_max -def h_minima(img, h, selem=None): +def h_minima(image, h, selem=None): """Determine all minima of the image with depth >= h. The local minima are defined as connected sets of pixels with equal @@ -141,7 +141,7 @@ Parameters ---------- - img : ndarray + image : ndarray The input image for which the minima are to be calculated. h : unsigned integer The minimal depth of all extracted minima. @@ -189,34 +189,38 @@ The resulting image will contain 4 local minima. """ - if np.issubdtype(img.dtype, 'half'): - resolution = 2 * np.finfo(img.dtype).resolution + if np.issubdtype(image.dtype, np.floating): + resolution = 2 * np.finfo(image.dtype).resolution if h < resolution: h = resolution h_corrected = h - resolution / 2.0 - shifted_img = img + h + shifted_img = image + h else: - shifted_img = _add_constant_clip(img, h) + shifted_img = _add_constant_clip(image, h) h_corrected = h - rec_img = greyreconstruct.reconstruction(shifted_img, img, + rec_img = greyreconstruct.reconstruction(shifted_img, image, method='erosion', selem=selem) - residue_img = rec_img - img - h_min = np.zeros(img.shape, dtype=np.uint8) + residue_img = rec_img - image + h_min = np.zeros(image.shape, dtype=np.uint8) h_min[residue_img >= h_corrected] = 1 return h_min -def _find_min_diff(img): +def _find_min_diff(image): """ Find the minimal difference of grey levels inside the image. """ - img_vec = np.unique(img.flatten()) + img_vec = np.unique(image.flatten()) + + if img_vec.size == 1: + return 0 + min_diff = np.min(img_vec[1:] - img_vec[:-1]) return min_diff -def local_maxima(img, selem=None): +def local_maxima(image, selem=None): """Determine all local maxima of the image. The local maxima are defined as connected sets of pixels with equal @@ -229,7 +233,7 @@ Parameters ---------- - img : ndarray + image : ndarray The input image for which the maxima are to be calculated. selem : ndarray, optional The neighborhood expressed as an n-D array of 1's and 0's. @@ -275,16 +279,17 @@ The resulting image will contain all 6 local maxima. """ - if np.issubdtype(img.dtype, 'half'): - # find the minimal grey level difference - h = _find_min_diff(img) - else: + # find the minimal grey level difference + h = _find_min_diff(image) + if h == 0: + return np.zeros(image.shape, np.uint8) + if not np.issubdtype(image.dtype, np.floating): h = 1 - local_max = h_maxima(img, h, selem=selem) + local_max = h_maxima(image, h, selem=selem) return local_max -def local_minima(img, selem=None): +def local_minima(image, selem=None): """Determine all local minima of the image. The local minima are defined as connected sets of pixels with equal @@ -297,7 +302,7 @@ Parameters ---------- - img : ndarray + image : ndarray The input image for which the minima are to be calculated. selem : ndarray, optional The neighborhood expressed as an n-D array of 1's and 0's. @@ -343,10 +348,11 @@ The resulting image will contain all 6 local minima. """ - if np.issubdtype(img.dtype, 'half'): - # find the minimal grey level difference - h = _find_min_diff(img) - else: + # find the minimal grey level difference + h = _find_min_diff(image) + if h == 0: + return np.zeros(image.shape, np.uint8) + if not np.issubdtype(image.dtype, np.floating): h = 1 - local_min = h_minima(img, h, selem=selem) + local_min = h_minima(image, h, selem=selem) return local_min diff -Nru skimage-0.13.1/skimage/morphology/grey.py skimage-0.14.0/skimage/morphology/grey.py --- skimage-0.13.1/skimage/morphology/grey.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/grey.py 2018-05-29 01:27:44.000000000 +0000 @@ -29,8 +29,8 @@ out : 2D array, shape (M + int(shift_x), N + int(shift_y)) The shifted structuring element. """ - if selem.ndim > 2: - # do nothing for 3D or higher structuring elements + if selem.ndim != 2: + # do nothing for 1D or 3D or higher structuring elements return selem m, n = selem.shape if m % 2 == 0: @@ -397,11 +397,24 @@ selem = np.array(selem) if out is image: opened = opening(image, selem) - out -= opened + if np.issubdtype(opened.dtype, np.bool_): + np.logical_xor(out, opened, out=out) + else: + out -= opened return out elif out is None: out = np.empty_like(image) - out = ndi.white_tophat(image, footprint=selem, output=out) + # work-around for NumPy deprecation warning for arithmetic + # operations on bool arrays + if isinstance(image, np.ndarray) and image.dtype == np.bool: + image_ = image.view(dtype=np.uint8) + else: + image_ = image + if isinstance(out, np.ndarray) and out.dtype == np.bool: + out_ = out.view(dtype=np.uint8) + else: + out_ = out + out_ = ndi.white_tophat(image_, footprint=selem, output=out_) return out @@ -453,5 +466,8 @@ else: original = image out = closing(image, selem, out=out) - out -= original + if np.issubdtype(out.dtype, np.bool_): + np.logical_xor(out, original, out=out) + else: + out -= original return out diff -Nru skimage-0.13.1/skimage/morphology/misc.py skimage-0.14.0/skimage/morphology/misc.py --- skimage-0.13.1/skimage/morphology/misc.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/misc.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,3 +1,4 @@ +"""Miscellaneous morphology functions.""" import numpy as np import functools from scipy import ndimage as ndi @@ -29,6 +30,7 @@ func_out : function The function, using a default structuring element of same dimension as the input image with connectivity 1. + """ @functools.wraps(func) def func_out(image, selem=None, *args, **kwargs): @@ -38,12 +40,14 @@ return func_out + def _check_dtype_supported(ar): # Should use `issubdtype` for bool below, but there's a bug in numpy 1.7 if not (ar.dtype == bool or np.issubdtype(ar.dtype, np.integer)): raise TypeError("Only bool or integer image types are supported. " "Got %s." % ar.dtype) + def remove_small_objects(ar, min_size=64, connectivity=1, in_place=False): """Remove connected components smaller than the specified size. @@ -92,6 +96,7 @@ >>> d = morphology.remove_small_objects(a, 6, in_place=True) >>> d is a True + """ # Raising type error if not int or bool _check_dtype_supported(ar) @@ -118,7 +123,7 @@ "relabeling the input with `scipy.ndimage.label` or " "`skimage.morphology.label`.") - if len(component_sizes) == 2: + if len(component_sizes) == 2 and out.dtype != bool: warn("Only one label was provided to `remove_small_objects`. " "Did you mean to use a boolean array?") @@ -128,21 +133,25 @@ return out -def remove_small_holes(ar, min_size=64, connectivity=1, in_place=False): + +def remove_small_holes(ar, area_threshold=64, connectivity=1, in_place=False, + min_size=None): """Remove continguous holes smaller than the specified size. Parameters ---------- ar : ndarray (arbitrary shape, int or bool type) The array containing the connected components of interest. - min_size : int, optional (default: 64) - The hole component size. + area_threshold : int, optional (default: 64) + The maximum area, in pixels, of a contiguous hole that will be filled. + Replaces `min_size`. connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1) The connectivity defining the neighborhood of a pixel. in_place : bool, optional (default: False) If `True`, remove the connected components in the input array itself. Otherwise, make a copy. + Raises ------ TypeError @@ -180,35 +189,40 @@ Notes ----- - If the array type is int, it is assumed that it contains already-labeled objects. The labels are not kept in the output image (this function always outputs a bool image). It is suggested that labeling is completed after using this function. + """ _check_dtype_supported(ar) - #Creates warning if image is an integer image + # Creates warning if image is an integer image if ar.dtype != bool: warn("Any labeled images will be returned as a boolean array. " "Did you mean to use a boolean array?", UserWarning) + if min_size is not None: + warn("the min_size argument is deprecated and will be removed in " + + "0.16. Use area_threshold instead.") + area_threshold = min_size + if in_place: out = ar else: out = ar.copy() - #Creating the inverse of ar + # Creating the inverse of ar if in_place: - out = np.logical_not(out,out) + out = np.logical_not(out, out) else: out = np.logical_not(out) - #removing small objects from the inverse of ar - out = remove_small_objects(out, min_size, connectivity, in_place) + # removing small objects from the inverse of ar + out = remove_small_objects(out, area_threshold, connectivity, in_place) if in_place: - out = np.logical_not(out,out) + out = np.logical_not(out, out) else: out = np.logical_not(out) diff -Nru skimage-0.13.1/skimage/morphology/_skeletonize.py skimage-0.14.0/skimage/morphology/_skeletonize.py --- skimage-0.13.1/skimage/morphology/_skeletonize.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/_skeletonize.py 2018-05-29 01:27:44.000000000 +0000 @@ -2,7 +2,6 @@ Algorithms for computing the skeleton of a binary image """ -import sys from six.moves import range import numpy as np from scipy import ndimage as ndi @@ -249,8 +248,6 @@ [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], dtype=uint8) """ - # check parameters - max_iter = max_iter or sys.maxsize # check that image is 2d assert_nD(image, 2) @@ -258,13 +255,17 @@ skel = np.asanyarray(image, dtype=bool).astype(np.uint8) # neighborhood mask - mask = np.array([[ 8, 4, 2], - [16, 0, 1], - [32, 64,128]], dtype=np.uint8) + mask = np.array([[ 8, 4, 2], + [16, 0, 1], + [32, 64, 128]], dtype=np.uint8) # iterate until convergence, up to the iteration limit - for i in range(max_iter): - before = np.sum(skel) # count points before thinning + max_iter = max_iter or np.inf + n_iter = 0 + n_pts_old, n_pts_new = np.inf, np.sum(skel) + while n_pts_old != n_pts_new and n_iter < max_iter: + n_pts_old = n_pts_new + # perform the two "subiterations" described in the paper for lut in [G123_LUT, G123P_LUT]: # correlate image with neighborhood mask @@ -274,11 +275,8 @@ # perform deletion skel[D] = 0 - after = np.sum(skel) # count points after thinning - - if before == after: - # iteration had no effect: finish - break + n_pts_new = np.sum(skel) # count points after thinning + n_iter += 1 return skel.astype(np.bool) @@ -429,7 +427,7 @@ _skeletonize_loop(result, i, j, order, table) result = result.astype(bool) - if not mask is None: + if mask is not None: result[~mask] = image[~mask] if return_distance: return result, store_distance diff -Nru skimage-0.13.1/skimage/morphology/tests/test_binary.py skimage-0.14.0/skimage/morphology/tests/test_binary.py --- skimage-0.13.1/skimage/morphology/tests/test_binary.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/tests/test_binary.py 2018-05-29 01:27:44.000000000 +0000 @@ -6,9 +6,10 @@ from skimage.morphology import binary, grey, selem from scipy import ndimage as ndi +import pytest img = color.rgb2gray(data.astronaut()) -bw_img = img > 100 +bw_img = img > 100 / 255. def test_non_square_image(): @@ -65,9 +66,13 @@ testing.assert_(np.any(out != out_saved)) testing.assert_array_equal(out, func(img, strel)) -def test_default_selem(): - functions = [binary.binary_erosion, binary.binary_dilation, - binary.binary_opening, binary.binary_closing] + +binary_functions = [binary.binary_erosion, binary.binary_dilation, + binary.binary_opening, binary.binary_closing] + + +@pytest.mark.parametrize("function", binary_functions) +def test_default_selem(function): strel = selem.diamond(radius=1) image = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], @@ -82,10 +87,9 @@ [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], np.uint8) - for function in functions: - im_expected = function(image, strel) - im_test = function(image) - yield testing.assert_array_equal, im_expected, im_test + im_expected = function(image, strel) + im_test = function(image) + testing.assert_array_equal(im_expected, im_test) def test_3d_fallback_default_selem(): # 3x3x3 cube inside a 7x7x7 image: @@ -99,16 +103,20 @@ image_expected[2:5, 2:5, 2:5] = ndi.generate_binary_structure(3, 1) testing.assert_array_equal(opened, image_expected) -def test_3d_fallback_cube_selem(): + +binary_3d_fallback_functions = [binary.binary_opening, binary.binary_closing] + + +@pytest.mark.parametrize("function", binary_3d_fallback_functions) +def test_3d_fallback_cube_selem(function): # 3x3x3 cube inside a 7x7x7 image: image = np.zeros((7, 7, 7), np.bool) image[2:-2, 2:-2, 2:-2] = 1 cube = np.ones((3, 3, 3), dtype=np.uint8) - for function in [binary.binary_closing, binary.binary_opening]: - new_image = function(image, cube) - yield testing.assert_array_equal, new_image, image + new_image = function(image, cube) + testing.assert_array_equal(new_image, image) def test_2d_ndimage_equivalence(): image = np.zeros((9, 9), np.uint16) diff -Nru skimage-0.13.1/skimage/morphology/tests/test_ccomp.py skimage-0.14.0/skimage/morphology/tests/test_ccomp.py --- skimage-0.13.1/skimage/morphology/tests/test_ccomp.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/tests/test_ccomp.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,12 +1,13 @@ import numpy as np -from numpy.testing import assert_array_equal, run_module_suite from skimage.measure import label import skimage.measure._ccomp as ccomp -from skimage._shared._warnings import expected_warnings -# Background value -BG = 0 +from skimage._shared import testing +from skimage._shared.testing import assert_array_equal + + +BG = 0 # background value class TestConnectedComponents: @@ -261,7 +262,8 @@ def test_nd(self): x = np.ones((1, 2, 3, 4)) - np.testing.assert_raises(NotImplementedError, label, x) + with testing.raises(NotImplementedError): + label(x) class TestSupport: @@ -281,7 +283,3 @@ back = ccomp.undo_reshape_array(fixed, swaps) # check that the undo works as expected assert_array_equal(inp, back) - - -if __name__ == "__main__": - run_module_suite() diff -Nru skimage-0.13.1/skimage/morphology/tests/test_convex_hull.py skimage-0.14.0/skimage/morphology/tests/test_convex_hull.py --- skimage-0.13.1/skimage/morphology/tests/test_convex_hull.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/tests/test_convex_hull.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,17 +1,11 @@ import numpy as np -from numpy.testing import assert_array_equal, assert_raises -from numpy.testing.decorators import skipif from skimage.morphology import convex_hull_image, convex_hull_object from skimage.morphology._convex_hull import possible_hull -try: - import scipy.spatial - scipy_spatial = True -except ImportError: - scipy_spatial = False +from skimage._shared import testing +from skimage._shared.testing import assert_array_equal, expected_warnings -@skipif(not scipy_spatial) def test_basic(): image = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0], @@ -31,12 +25,13 @@ assert_array_equal(convex_hull_image(image), expected) - # Test that an error is raised on passing a 3D image: - image3d = np.empty((5, 5, 5)) - assert_raises(ValueError, convex_hull_image, image3d) + +def test_empty_image(): + image = np.zeros((6, 6), dtype=bool) + with expected_warnings(['entirely zero']): + assert_array_equal(convex_hull_image(image), image) -@skipif(not scipy_spatial) def test_qhull_offset_example(): nonzeros = (([1367, 1368, 1368, 1368, 1369, 1369, 1369, 1369, 1369, 1370, 1370, 1370, 1370, 1370, 1370, 1370, 1371, 1371, 1371, 1371, @@ -55,7 +50,6 @@ assert_array_equal(convex_hull_image(image), expected) -@skipif(not scipy_spatial) def test_pathological_qhull_example(): image = np.array( [[0, 0, 0, 0, 1, 0, 0], @@ -68,7 +62,6 @@ assert_array_equal(convex_hull_image(image), expected) -@skipif(not scipy_spatial) def test_possible_hull(): image = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0], @@ -102,7 +95,6 @@ assert_array_equal(ph, expected) -@skipif(not scipy_spatial) def test_object(): image = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0], @@ -141,11 +133,20 @@ assert_array_equal(convex_hull_object(image, 8), expected8) - assert_raises(ValueError, convex_hull_object, image, 7) + with testing.raises(ValueError): + convex_hull_object(image, 7) - # Test that an error is raised on passing a 3D image: - image3d = np.empty((5, 5, 5)) - assert_raises(ValueError, convex_hull_object, image3d) -if __name__ == "__main__": - np.testing.run_module_suite() +@testing.fixture +def images2d3d(): + from ...measure.tests.test_regionprops import SAMPLE as image + image3d = np.stack((image, image, image)) + return image, image3d + + +def test_consistent_2d_3d_hulls(images2d3d): + image, image3d = images2d3d + chimage = convex_hull_image(image) + chimage[8, 0] = True # correct for single point exactly on hull edge + chimage3d = convex_hull_image(image3d) + assert_array_equal(chimage3d[1], chimage) diff -Nru skimage-0.13.1/skimage/morphology/tests/test_extrema.py skimage-0.14.0/skimage/morphology/tests/test_extrema.py --- skimage-0.13.1/skimage/morphology/tests/test_extrema.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/tests/test_extrema.py 2018-05-29 01:27:44.000000000 +0000 @@ -192,6 +192,28 @@ assert error < eps assert out.dtype == expected_result.dtype + def test_local_extrema_uniform(self): + "local extrema tests for uniform arrays with various data types" + + data = np.full((7, 6), 42, dtype=np.uint8) + + expected_result = np.zeros((7, 6), dtype=np.uint8) + + for dtype in [np.uint8, np.uint64, np.int8, np.int64]: + data = data.astype(dtype) + + # test for local maxima + out = extrema.local_maxima(data) + error = diff(expected_result, out) + assert error < eps + assert out.dtype == expected_result.dtype + + # test for local minima + out = extrema.local_minima(data) + error = diff(expected_result, out) + assert error < eps + assert out.dtype == expected_result.dtype + def test_extrema_float(self): "specific tests for float type" data = np.array([[0.10, 0.11, 0.13, 0.14, 0.14, 0.15, 0.14, diff -Nru skimage-0.13.1/skimage/morphology/tests/test_grey.py skimage-0.14.0/skimage/morphology/tests/test_grey.py --- skimage-0.13.1/skimage/morphology/tests/test_grey.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/tests/test_grey.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,16 +1,18 @@ -import os.path +import os import numpy as np -from numpy import testing from scipy import ndimage as ndi from skimage import color, data, transform from skimage import img_as_uint, img_as_ubyte, data_dir from skimage.morphology import grey, selem from skimage._shared._warnings import expected_warnings +from skimage._shared import testing +from skimage._shared.testing import (assert_array_equal, assert_equal, + TestCase, parametrize) -class TestMorphology(): +class TestMorphology(TestCase): # These expected outputs were generated with skimage v0.12.1 # using: @@ -20,7 +22,6 @@ # output = TestMorphology()._build_expected_output() # np.savez_compressed('gray_morph_output.npz', **output) - def _build_expected_output(self): funcs = (grey.erosion, grey.dilation, grey.opening, grey.closing, grey.white_tophat, grey.black_tophat) @@ -35,19 +36,21 @@ for n in range(1, 4): for strel in selems_2D: for func in funcs: - key = '{0}_{1}_{2}'.format(strel.__name__, n, func.__name__) + key = '{0}_{1}_{2}'.format( + strel.__name__, n, func.__name__) output[key] = func(image, strel(n)) return output def test_gray_morphology(self): - expected = dict(np.load(os.path.join(data_dir, 'gray_morph_output.npz'))) + expected = dict(np.load( + os.path.join(data_dir, 'gray_morph_output.npz'))) calculated = self._build_expected_output() - testing.assert_equal(expected, calculated) - + assert_equal(expected, calculated) -class TestEccentricStructuringElements(): +class TestEccentricStructuringElements(TestCase): + @testing.fixture(autouse=True) def setUp(self): self.black_pixel = 255 * np.ones((4, 4), dtype=np.uint8) self.black_pixel[1, 1] = 0 @@ -100,10 +103,13 @@ assert np.all(tophat == 0) -def test_default_selem(): - functions = [grey.erosion, grey.dilation, - grey.opening, grey.closing, - grey.white_tophat, grey.black_tophat] +grey_functions = [grey.erosion, grey.dilation, + grey.opening, grey.closing, + grey.white_tophat, grey.black_tophat] + + +@parametrize("function", grey_functions) +def test_default_selem(function): strel = selem.diamond(radius=1) image = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], @@ -118,10 +124,9 @@ [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], np.uint8) - for function in functions: - im_expected = function(image, strel) - im_test = function(image) - yield testing.assert_array_equal, im_expected, im_test + im_expected = function(image, strel) + im_test = function(image) + testing.assert_array_equal(im_expected, im_test) def test_3d_fallback_default_selem(): @@ -134,19 +139,22 @@ # expect a "hyper-cross" centered in the 5x5x5: image_expected = np.zeros((7, 7, 7), dtype=bool) image_expected[2:5, 2:5, 2:5] = ndi.generate_binary_structure(3, 1) - testing.assert_array_equal(opened, image_expected) + assert_array_equal(opened, image_expected) -def test_3d_fallback_cube_selem(): +grey_3d_fallback_functions = [grey.closing, grey.opening] + + +@parametrize("function", grey_3d_fallback_functions) +def test_3d_fallback_cube_selem(function): # 3x3x3 cube inside a 7x7x7 image: image = np.zeros((7, 7, 7), np.bool) image[2:-2, 2:-2, 2:-2] = 1 cube = np.ones((3, 3, 3), dtype=np.uint8) - for function in [grey.closing, grey.opening]: - new_image = function(image, cube) - yield testing.assert_array_equal, new_image, image + new_image = function(image, cube) + testing.assert_array_equal(new_image, image) def test_3d_fallback_white_tophat(): @@ -157,10 +165,11 @@ with expected_warnings(['operator.*deprecated|\A\Z']): new_image = grey.white_tophat(image) - footprint = ndi.generate_binary_structure(3,1) + footprint = ndi.generate_binary_structure(3, 1) with expected_warnings(['operator.*deprecated|\A\Z']): - image_expected = ndi.white_tophat(image,footprint=footprint) - testing.assert_array_equal(new_image, image_expected) + image_expected = ndi.white_tophat( + image.view(dtype=np.uint8), footprint=footprint) + assert_array_equal(new_image, image_expected) def test_3d_fallback_black_tophat(): @@ -171,10 +180,11 @@ with expected_warnings(['operator.*deprecated|\A\Z']): new_image = grey.black_tophat(image) - footprint = ndi.generate_binary_structure(3,1) + footprint = ndi.generate_binary_structure(3, 1) with expected_warnings(['operator.*deprecated|\A\Z']): - image_expected = ndi.black_tophat(image,footprint=footprint) - testing.assert_array_equal(new_image, image_expected) + image_expected = ndi.black_tophat( + image.view(dtype=np.uint8), footprint=footprint) + assert_array_equal(new_image, image_expected) def test_2d_ndimage_equivalence(): @@ -190,8 +200,8 @@ ndimage_opened = ndi.grey_opening(image, footprint=selem) ndimage_closed = ndi.grey_closing(image, footprint=selem) - testing.assert_array_equal(opened, ndimage_opened) - testing.assert_array_equal(closed, ndimage_closed) + assert_array_equal(opened, ndimage_opened) + assert_array_equal(closed, ndimage_closed) # float test images @@ -225,6 +235,7 @@ [ 0.79, 0.79, 0.83, 0.78, 0.87], [ 0.98, 0.83, 0.78, 0.78, 0.78]]) + def test_float(): np.testing.assert_allclose(grey.erosion(im), eroded) np.testing.assert_allclose(grey.dilation(im), dilated) @@ -259,10 +270,13 @@ [0, 0, 0, 0, 0], [3, 0, 1, 0, 1]], np.uint8) grey.dilation(image, out=out_array) - testing.assert_array_equal(out_array_big, expected_dilation) + assert_array_equal(out_array_big, expected_dilation) grey.erosion(image, out=out_array) testing.assert_array_equal(out_array_big, expected_erosion) -if __name__ == '__main__': - testing.run_module_suite() +def test_1d_erosion(): + image = np.array([1, 2, 3, 2, 1]) + expected = np.array([1, 1, 2, 1, 1]) + eroded = grey.erosion(image) + testing.assert_array_equal(eroded, expected) diff -Nru skimage-0.13.1/skimage/morphology/tests/test_misc.py skimage-0.14.0/skimage/morphology/tests/test_misc.py --- skimage-0.13.1/skimage/morphology/tests/test_misc.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/tests/test_misc.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,8 +1,10 @@ import numpy as np -from numpy.testing import (assert_array_equal, assert_equal, assert_raises, - assert_warns) from skimage.morphology import remove_small_objects, remove_small_holes -from ..._shared._warnings import expected_warnings + +from skimage._shared import testing +from skimage._shared.testing import assert_array_equal, assert_equal +from skimage._shared._warnings import expected_warnings + test_image = np.array([[0, 0, 0, 1, 0], [1, 1, 1, 0, 0], @@ -28,7 +30,7 @@ def test_in_place(): observed = remove_small_objects(test_image, min_size=6, in_place=True) assert_equal(observed is test_image, True, - "remove_small_objects in_place argument failed.") + "remove_small_objects in_place argument failed.") def test_labeled_image(): @@ -67,109 +69,122 @@ def test_float_input(): float_test = np.random.rand(5, 5) - assert_raises(TypeError, remove_small_objects, float_test) + with testing.raises(TypeError): + remove_small_objects(float_test) def test_negative_input(): negative_int = np.random.randint(-4, -1, size=(5, 5)) - assert_raises(ValueError, remove_small_objects, negative_int) + with testing.raises(ValueError): + remove_small_objects(negative_int) + + +test_holes_image = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 1, 0, 1], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], np.bool_) -test_holes_image = np.array([[0,0,0,0,0,0,1,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,1,0,0,1,1,0,0,0,0], - [0,1,1,1,0,1,0,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,0,0,0,0,0,0,1,1,1], - [0,0,0,0,0,0,0,1,0,1], - [0,0,0,0,0,0,0,1,1,1]], bool) def test_one_connectivity_holes(): - expected = np.array([[0,0,0,0,0,0,1,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,0,0,0,0,0,0,1,1,1], - [0,0,0,0,0,0,0,1,1,1], - [0,0,0,0,0,0,0,1,1,1]], bool) - observed = remove_small_holes(test_holes_image, min_size=3) + expected = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], np.bool_) + observed = remove_small_holes(test_holes_image, area_threshold=3) assert_array_equal(observed, expected) def test_two_connectivity_holes(): - expected = np.array([[0,0,0,0,0,0,1,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,1,0,0,1,1,0,0,0,0], - [0,1,1,1,0,1,0,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,0,0,0,0,0,0,1,1,1], - [0,0,0,0,0,0,0,1,1,1], - [0,0,0,0,0,0,0,1,1,1]], bool) - observed = remove_small_holes(test_holes_image, min_size=3, connectivity=2) + expected = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], np.bool_) + observed = remove_small_holes(test_holes_image, area_threshold=3, + connectivity=2) assert_array_equal(observed, expected) + def test_in_place_holes(): - observed = remove_small_holes(test_holes_image, min_size=3, in_place=True) + observed = remove_small_holes(test_holes_image, area_threshold=3, + in_place=True) assert_equal(observed is test_holes_image, True, - "remove_small_holes in_place argument failed.") + "remove_small_holes in_place argument failed.") + def test_labeled_image_holes(): - labeled_holes_image = np.array([[0,0,0,0,0,0,1,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,1,0,0,1,1,0,0,0,0], - [0,1,1,1,0,1,0,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,0,0,0,0,0,0,2,2,2], - [0,0,0,0,0,0,0,2,0,2], - [0,0,0,0,0,0,0,2,2,2]], dtype=int) - expected = np.array([[0,0,0,0,0,0,1,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,0,0,0,0,0,0,1,1,1], - [0,0,0,0,0,0,0,1,1,1], - [0,0,0,0,0,0,0,1,1,1]], dtype=bool) - observed = remove_small_holes(labeled_holes_image, min_size=3) + labeled_holes_image = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 2, 2, 2], + [0, 0, 0, 0, 0, 0, 0, 2, 0, 2], + [0, 0, 0, 0, 0, 0, 0, 2, 2, 2]], + dtype=np.int_) + expected = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], dtype=np.bool_) + with expected_warnings(['returned as a boolean array']): + observed = remove_small_holes(labeled_holes_image, area_threshold=3) assert_array_equal(observed, expected) def test_uint_image_holes(): - labeled_holes_image = np.array([[0,0,0,0,0,0,1,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,1,0,0,1,1,0,0,0,0], - [0,1,1,1,0,1,0,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,0,0,0,0,0,0,2,2,2], - [0,0,0,0,0,0,0,2,0,2], - [0,0,0,0,0,0,0,2,2,2]], dtype=np.uint8) - expected = np.array([[0,0,0,0,0,0,1,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,0,0,0,0,0,0,1,1,1], - [0,0,0,0,0,0,0,1,1,1], - [0,0,0,0,0,0,0,1,1,1]], dtype=bool) - observed = remove_small_holes(labeled_holes_image, min_size=3) + labeled_holes_image = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 2, 2, 2], + [0, 0, 0, 0, 0, 0, 0, 2, 0, 2], + [0, 0, 0, 0, 0, 0, 0, 2, 2, 2]], + dtype=np.uint8) + expected = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], dtype=np.bool_) + with expected_warnings(['returned as a boolean array']): + observed = remove_small_holes(labeled_holes_image, area_threshold=3) assert_array_equal(observed, expected) def test_label_warning_holes(): - labeled_holes_image = np.array([[0,0,0,0,0,0,1,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,1,0,0,1,1,0,0,0,0], - [0,1,1,1,0,1,0,0,0,0], - [0,1,1,1,1,1,0,0,0,0], - [0,0,0,0,0,0,0,2,2,2], - [0,0,0,0,0,0,0,2,0,2], - [0,0,0,0,0,0,0,2,2,2]], dtype=int) + labeled_holes_image = np.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 2, 2, 2], + [0, 0, 0, 0, 0, 0, 0, 2, 0, 2], + [0, 0, 0, 0, 0, 0, 0, 2, 2, 2]], + dtype=np.int_) with expected_warnings(['use a boolean array?']): - remove_small_holes(labeled_holes_image, min_size=3) + remove_small_holes(labeled_holes_image, area_threshold=3) + remove_small_holes(labeled_holes_image.astype(bool), area_threshold=3) + def test_float_input_holes(): float_test = np.random.rand(5, 5) - assert_raises(TypeError, remove_small_holes, float_test) - -if __name__ == "__main__": - np.testing.run_module_suite() + with testing.raises(TypeError): + remove_small_holes(float_test) diff -Nru skimage-0.13.1/skimage/morphology/tests/test_reconstruction.py skimage-0.14.0/skimage/morphology/tests/test_reconstruction.py --- skimage-0.13.1/skimage/morphology/tests/test_reconstruction.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/tests/test_reconstruction.py 2018-05-29 01:27:44.000000000 +0000 @@ -8,27 +8,29 @@ Original author: Lee Kamentsky """ import numpy as np -from numpy.testing import (assert_array_almost_equal as assert_close, - assert_raises) from skimage.morphology.greyreconstruct import reconstruction +from skimage._shared import testing +from skimage._shared.testing import assert_array_almost_equal def test_zeros(): """Test reconstruction with image and mask of zeros""" - assert_close(reconstruction(np.zeros((5, 7)), np.zeros((5, 7))), 0) + assert_array_almost_equal( + reconstruction(np.zeros((5, 7)), np.zeros((5, 7))), 0) def test_image_equals_mask(): """Test reconstruction where the image and mask are the same""" - assert_close(reconstruction(np.ones((7, 5)), np.ones((7, 5))), 1) + assert_array_almost_equal( + reconstruction(np.ones((7, 5)), np.ones((7, 5))), 1) def test_image_less_than_mask(): """Test reconstruction where the image is uniform and less than mask""" image = np.ones((5, 5)) mask = np.ones((5, 5)) * 2 - assert_close(reconstruction(image, mask), 1) + assert_array_almost_equal(reconstruction(image, mask), 1) def test_one_image_peak(): @@ -36,7 +38,7 @@ image = np.ones((5, 5)) image[2, 2] = 2 mask = np.ones((5, 5)) * 3 - assert_close(reconstruction(image, mask), 2) + assert_array_almost_equal(reconstruction(image, mask), 2) def test_two_image_peaks(): @@ -61,13 +63,13 @@ [1, 1, 1, 1, 1, 3, 3, 3], [1, 1, 1, 1, 1, 3, 3, 3], [1, 1, 1, 1, 1, 3, 3, 3]]) - assert_close(reconstruction(image, mask), expected) + assert_array_almost_equal(reconstruction(image, mask), expected) def test_zero_image_one_mask(): """Test reconstruction with an image of all zeros and a mask that's not""" result = reconstruction(np.zeros((10, 10)), np.ones((10, 10))) - assert_close(result, 0) + assert_array_almost_equal(result, 0) def test_fill_hole(): @@ -75,34 +77,34 @@ seed = np.array([0, 8, 8, 8, 8, 8, 8, 8, 8, 0]) mask = np.array([0, 3, 6, 2, 1, 1, 1, 4, 2, 0]) result = reconstruction(seed, mask, method='erosion') - assert_close(result, np.array([0, 3, 6, 4, 4, 4, 4, 4, 2, 0])) + assert_array_almost_equal(result, np.array([0, 3, 6, 4, 4, 4, 4, 4, 2, 0])) def test_invalid_seed(): seed = np.ones((5, 5)) mask = np.ones((5, 5)) - assert_raises(ValueError, reconstruction, seed * 2, mask, - method='dilation') - assert_raises(ValueError, reconstruction, seed * 0.5, mask, - method='erosion') + with testing.raises(ValueError): + reconstruction(seed * 2, mask, + method='dilation') + with testing.raises(ValueError): + reconstruction(seed * 0.5, mask, + method='erosion') def test_invalid_selem(): seed = np.ones((5, 5)) mask = np.ones((5, 5)) - assert_raises(ValueError, reconstruction, seed, mask, - selem=np.ones((4, 4))) - assert_raises(ValueError, reconstruction, seed, mask, - selem=np.ones((3, 4))) + with testing.raises(ValueError): + reconstruction(seed, mask, + selem=np.ones((4, 4))) + with testing.raises(ValueError): + reconstruction(seed, mask, + selem=np.ones((3, 4))) reconstruction(seed, mask, selem=np.ones((3, 3))) def test_invalid_method(): seed = np.array([0, 8, 8, 8, 8, 8, 8, 8, 8, 0]) mask = np.array([0, 3, 6, 2, 1, 1, 1, 4, 2, 0]) - assert_raises(ValueError, reconstruction, seed, mask, method='foo') - - -if __name__ == '__main__': - from numpy import testing - testing.run_module_suite() + with testing.raises(ValueError): + reconstruction(seed, mask, method='foo') diff -Nru skimage-0.13.1/skimage/morphology/tests/test_selem.py skimage-0.14.0/skimage/morphology/tests/test_selem.py --- skimage-0.13.1/skimage/morphology/tests/test_selem.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/tests/test_selem.py 2018-05-29 01:27:44.000000000 +0000 @@ -7,11 +7,12 @@ import os.path import numpy as np -from numpy.testing import assert_equal from skimage import data_dir from skimage.morphology import selem +from skimage._shared.testing import assert_equal + class TestSElem(): @@ -135,7 +136,8 @@ [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]], dtype=np.uint8) + [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]], + dtype=np.uint8) actual_mask1 = selem.star(4) expected_mask2 = np.array([[1, 1, 1], [1, 1, 1], @@ -143,7 +145,3 @@ actual_mask2 = selem.star(1) assert_equal(expected_mask1, actual_mask1) assert_equal(expected_mask2, actual_mask2) - - -if __name__ == '__main__': - np.testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/morphology/tests/test_skeletonize_3d.py skimage-0.14.0/skimage/morphology/tests/test_skeletonize_3d.py --- skimage-0.13.1/skimage/morphology/tests/test_skeletonize_3d.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/tests/test_skeletonize_3d.py 2018-05-29 01:27:44.000000000 +0000 @@ -4,27 +4,27 @@ import warnings import numpy as np -from numpy.testing import (assert_equal, run_module_suite, assert_raises, - assert_) - import scipy.ndimage as ndi -import skimage from skimage import io, draw, data_dir from skimage.data import binary_blobs from skimage.util import img_as_ubyte - from skimage.morphology import skeletonize_3d +from skimage._shared import testing +from skimage._shared.testing import assert_equal, assert_, parametrize +from skimage._shared._warnings import expected_warnings # basic behavior tests (mostly copied over from 2D skeletonize) def test_skeletonize_wrong_dim(): im = np.zeros(5, dtype=np.uint8) - assert_raises(ValueError, skeletonize_3d, im) + with testing.raises(ValueError): + skeletonize_3d(im) im = np.zeros((5, 5, 5, 5), dtype=np.uint8) - assert_raises(ValueError, skeletonize_3d, im) + with testing.raises(ValueError): + skeletonize_3d(im) def test_skeletonize_1D(): @@ -71,34 +71,41 @@ img[img < 0.5] = 0 orig = img.copy() - - with warnings.catch_warnings(): - # UserWarning for possible precision loss, expected - warnings.simplefilter('ignore', UserWarning) + with expected_warnings(['precision']): res = skeletonize_3d(img) + with expected_warnings(['precision']): + img_max = img_as_ubyte(img).max() assert_equal(res.dtype, np.uint8) - assert_equal(img, orig) # operation does not clobber the original - assert_equal(res.max(), - img_as_ubyte(img).max()) # the intensity range is preserved + assert_equal(img, orig) # operation does not clobber the original + assert_equal(res.max(), img_max) # the intensity range is preserved + + +@parametrize("img", [ + np.ones((8, 8), dtype=float), np.ones((4, 8, 8), dtype=float) +]) +def test_input_with_warning(img): + # check that the input is not clobbered + # for 2D and 3D images of varying dtypes + # Skeletonize changes it to uint8. Therefore, for images of type float, + # we can expect a warning. + with expected_warnings(['precision']): + check_input(img) -def test_input(): +@parametrize("img", [ + np.ones((8, 8), dtype=np.uint8), np.ones((4, 8, 8), dtype=np.uint8), + np.ones((8, 8), dtype=bool), np.ones((4, 8, 8), dtype=bool) +]) +def test_input_without_warning(img): # check that the input is not clobbered # for 2D and 3D images of varying dtypes - imgs = [np.ones((8, 8), dtype=float), np.ones((4, 8, 8), dtype=float), - np.ones((8, 8), dtype=np.uint8), np.ones((4, 8, 8), dtype=np.uint8), - np.ones((8, 8), dtype=bool), np.ones((4, 8, 8), dtype=bool)] - for img in imgs: - yield check_input, img + check_input(img) def check_input(img): orig = img.copy() - with warnings.catch_warnings(): - # UserWarning for possible precision loss, expected - warnings.simplefilter('ignore', UserWarning) - res = skeletonize_3d(img) + skeletonize_3d(img) assert_equal(img, orig) @@ -125,7 +132,8 @@ circle2 = (ic - 135)**2 + (ir - 150)**2 < 20**2 image[circle1] = 1 image[circle2] = 0 - result = skeletonize_3d(image) + with expected_warnings(['precision']): + result = skeletonize_3d(image) # there should never be a 2x2 block of foreground pixels in a skeleton mask = np.array([[1, 1], @@ -150,7 +158,7 @@ [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], - dtype=np.uint8) + dtype=np.uint8) img_f = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0], @@ -165,7 +173,7 @@ [0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], - dtype=np.uint8) + dtype=np.uint8) res = skeletonize_3d(img_o) assert_equal(res, img_f) @@ -180,7 +188,3 @@ img_s = skeletonize_3d(img) img_f = io.imread(os.path.join(data_dir, "_blobs_3d_fiji_skeleton.tif")) assert_equal(img_s, img_f) - - -if __name__ == '__main__': - run_module_suite() diff -Nru skimage-0.13.1/skimage/morphology/tests/test_skeletonize.py skimage-0.14.0/skimage/morphology/tests/test_skeletonize.py --- skimage-0.13.1/skimage/morphology/tests/test_skeletonize.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/tests/test_skeletonize.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,39 +1,45 @@ +import os import numpy as np from skimage.morphology import skeletonize, medial_axis, thin from skimage.morphology._skeletonize import (_generate_thin_luts, G123_LUT, G123P_LUT) -import numpy.testing from skimage import draw from scipy.ndimage import correlate from skimage.io import imread from skimage import data_dir -import os.path + +from skimage._shared import testing +from skimage._shared.testing import assert_array_equal class TestSkeletonize(): def test_skeletonize_no_foreground(self): im = np.zeros((5, 5)) result = skeletonize(im) - numpy.testing.assert_array_equal(result, np.zeros((5, 5))) + assert_array_equal(result, np.zeros((5, 5))) def test_skeletonize_wrong_dim1(self): im = np.zeros((5)) - numpy.testing.assert_raises(ValueError, skeletonize, im) + with testing.raises(ValueError): + skeletonize(im) def test_skeletonize_wrong_dim2(self): im = np.zeros((5, 5, 5)) - numpy.testing.assert_raises(ValueError, skeletonize, im) + with testing.raises(ValueError): + skeletonize(im) def test_skeletonize_not_binary(self): im = np.zeros((5, 5)) im[0, 0] = 1 im[0, 1] = 2 - numpy.testing.assert_raises(ValueError, skeletonize, im) + with testing.raises(ValueError): + skeletonize(im) def test_skeletonize_unexpected_value(self): im = np.zeros((5, 5)) im[0, 0] = 2 - numpy.testing.assert_raises(ValueError, skeletonize, im) + with testing.raises(ValueError): + skeletonize(im) def test_skeletonize_all_foreground(self): im = np.ones((3, 4)) @@ -43,7 +49,7 @@ im = np.zeros((5, 5), np.uint8) im[3, 3] = 1 result = skeletonize(im) - numpy.testing.assert_array_equal(result, im) + assert_array_equal(result, im) def test_skeletonize_already_thinned(self): im = np.zeros((5, 5), np.uint8) @@ -51,17 +57,17 @@ im[2, -1] = 1 im[4, 0] = 1 result = skeletonize(im) - numpy.testing.assert_array_equal(result, im) + assert_array_equal(result, im) def test_skeletonize_output(self): - im = imread(os.path.join(data_dir, "bw_text.png"), as_grey=True) + im = imread(os.path.join(data_dir, "bw_text.png"), as_gray=True) # make black the foreground im = (im == 0) result = skeletonize(im) expected = np.load(os.path.join(data_dir, "bw_text_skeleton.npy")) - numpy.testing.assert_array_equal(result, expected) + assert_array_equal(result, expected) def test_skeletonize_num_neighbours(self): # an empty image @@ -92,7 +98,7 @@ mask = np.array([[1, 1], [1, 1]], np.uint8) blocks = correlate(result, mask, mode='constant') - assert not numpy.any(blocks == 4) + assert not np.any(blocks == 4) def test_lut_fix(self): im = np.zeros((6, 6), np.uint8) @@ -138,7 +144,7 @@ [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8) - numpy.testing.assert_array_equal(result, expected) + assert_array_equal(result, expected) def test_noiter(self): result = thin(self.input_image).astype(np.uint8) @@ -149,17 +155,18 @@ [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8) - numpy.testing.assert_array_equal(result, expected) + assert_array_equal(result, expected) def test_baddim(self): for ii in [np.zeros((3)), np.zeros((3, 3, 3))]: - numpy.testing.assert_raises(ValueError, thin, ii) + with testing.raises(ValueError): + thin(ii) def test_lut_generation(self): g123, g123p = _generate_thin_luts() - numpy.testing.assert_array_equal(g123, G123_LUT) - numpy.testing.assert_array_equal(g123p, G123P_LUT) + assert_array_equal(g123, G123_LUT) + assert_array_equal(g123p, G123P_LUT) class TestMedialAxis(): @@ -171,7 +178,7 @@ def test_00_01_zeros_masked(self): '''Test skeletonize on an array that is completely masked''' result = medial_axis(np.zeros((10, 10), bool), - np.zeros((10, 10), bool)) + np.zeros((10, 10), bool)) assert np.all(result == False) def test_01_01_rectangle(self): @@ -191,7 +198,7 @@ [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], - bool) + dtype=np.bool_) result = medial_axis(image) assert np.all(result == expected) result, distance = medial_axis(image, return_distance=True) @@ -211,7 +218,7 @@ [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], - bool) + dtype=np.bool_) result = medial_axis(image) assert np.all(result == expected) @@ -221,7 +228,3 @@ image[:, 1:-1] = True result = medial_axis(image) assert np.all(result == image) - - -if __name__ == '__main__': - np.testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/morphology/tests/test_watershed.py skimage-0.14.0/skimage/morphology/tests/test_watershed.py --- skimage-0.13.1/skimage/morphology/tests/test_watershed.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/tests/test_watershed.py 2018-05-29 01:27:44.000000000 +0000 @@ -44,7 +44,7 @@ import math import unittest - +import pytest import numpy as np from scipy import ndimage as ndi @@ -483,5 +483,11 @@ np.testing.assert_equal(compact, expected) +def test_incorrect_markers_shape(): + with pytest.raises(ValueError): + image = np.ones((5, 6)) + markers = np.ones((5, 7)) + output = watershed(image, markers) + if __name__ == "__main__": np.testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/morphology/watershed.py skimage-0.14.0/skimage/morphology/watershed.py --- skimage-0.13.1/skimage/morphology/watershed.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/watershed.py 2018-05-29 01:27:44.000000000 +0000 @@ -102,17 +102,21 @@ """ if connectivity is None: connectivity = 1 + if np.isscalar(connectivity): c_connectivity = ndi.generate_binary_structure(image_dim, connectivity) else: c_connectivity = np.array(connectivity, bool) if c_connectivity.ndim != image_dim: raise ValueError("Connectivity dimension must be same as image") + if offset is None: if any([x % 2 == 0 for x in c_connectivity.shape]): raise ValueError("Connectivity array must have an unambiguous " "center") + offset = np.array(c_connectivity.shape) // 2 + return c_connectivity, offset @@ -126,7 +130,7 @@ locations = np.transpose(np.nonzero(structure)) sqdistances = np.sum((locations - offset)**2, axis=1) neighborhood = (np.ravel_multi_index(locations.T, image.shape) - - np.ravel_multi_index(offset, image.shape)).astype(np.int32) + np.ravel_multi_index(offset, image.shape)) sorted_neighborhood = neighborhood[np.argsort(sqdistances)] return sorted_neighborhood @@ -250,8 +254,8 @@ output = np.pad(markers, pad_width, mode='constant') flat_neighborhood = _compute_neighbors(image, connectivity, offset) - marker_locations = np.flatnonzero(output).astype(np.int32) - image_strides = np.array(image.strides, dtype=np.int32) // image.itemsize + marker_locations = np.flatnonzero(output) + image_strides = np.array(image.strides, dtype=np.intp) // image.itemsize _watershed.watershed_raveled(image.ravel(), marker_locations, flat_neighborhood, @@ -261,8 +265,4 @@ output = crop(output, pad_width, copy=True) - if watershed_line: - min_val = output.min() - output[output == min_val] = 0 - return output diff -Nru skimage-0.13.1/skimage/morphology/_watershed.pyx skimage-0.14.0/skimage/morphology/_watershed.pyx --- skimage-0.13.1/skimage/morphology/_watershed.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/morphology/_watershed.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -23,12 +23,13 @@ include "heap_watershed.pxi" +@cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) @cython.overflowcheck(False) @cython.unraisable_tracebacks(False) -cdef inline double _euclid_dist(cnp.int32_t pt0, cnp.int32_t pt1, - cnp.int32_t[::1] strides): +cdef inline double _euclid_dist(Py_ssize_t pt0, Py_ssize_t pt1, + cnp.intp_t[::1] strides): """Return the Euclidean distance between raveled points pt0 and pt1.""" cdef double result = 0 cdef double curr = 0 @@ -40,12 +41,47 @@ return sqrt(result) +@cython.wraparound(False) @cython.boundscheck(False) +@cython.cdivision(True) +@cython.unraisable_tracebacks(False) +cdef inline DTYPE_BOOL_t _diff_neighbors(DTYPE_INT32_t[::1] output, + cnp.intp_t[::1] structure, + DTYPE_BOOL_t[::1] mask, + Py_ssize_t index): + """ + Return ``True`` and set ``mask[index]`` to ``False`` if the neighbors of + ``index`` (as given by the offsets in ``structure``) have more than one + distinct nonzero label. + """ + cdef: + Py_ssize_t i, neighbor_index + DTYPE_INT32_t neighbor_label0, neighbor_label1 + Py_ssize_t nneighbors = structure.shape[0] + + if not mask[index]: + return True + + neighbor_label0, neighbor_label1 = 0, 0 + for i in range(nneighbors): + neighbor_index = structure[i] + index + if mask[neighbor_index]: # neighbor not a watershed line + if not neighbor_label0: + neighbor_label0 = output[neighbor_index] + else: + neighbor_label1 = output[neighbor_index] + if neighbor_label1 and neighbor_label1 != neighbor_label0: + mask[index] = False + return True + return False + +@cython.boundscheck(False) +@cython.wraparound(False) def watershed_raveled(cnp.float64_t[::1] image, - DTYPE_INT32_t[::1] marker_locations, - DTYPE_INT32_t[::1] structure, + cnp.intp_t[::1] marker_locations, + cnp.intp_t[::1] structure, DTYPE_BOOL_t[::1] mask, - cnp.int32_t[::1] strides, + cnp.intp_t[::1] strides, cnp.double_t compactness, DTYPE_INT32_t[::1] output, DTYPE_BOOL_t wsl): @@ -89,7 +125,8 @@ cdef Py_ssize_t i = 0 cdef Py_ssize_t age = 1 cdef Py_ssize_t index = 0 - cdef DTYPE_INT32_t wsl_label = -1 + cdef Py_ssize_t neighbor_index = 0 + cdef DTYPE_BOOL_t compact = (compactness > 0) cdef Heap *hp = heap_from_numpy2() @@ -100,52 +137,58 @@ elem.index = index elem.source = index heappush(hp, &elem) - if wsl and wsl_label >= output[index]: - wsl_label = output[index] - 1 while hp.items > 0: heappop(hp, &elem) - # this can happen if the same pixel entered the queue - # several times before being processed. - if wsl and output[elem.index] == wsl_label: - # wsl labels are not propagated. - continue - - if output[elem.index] and elem.index != elem.source: - # non-marker, already visited from another neighbor - continue + if compact or wsl: + # in the compact case, we need to label pixels as they come off + # the heap, because the same pixel can be pushed twice, *and* the + # later push can have lower cost because of the compactness. + # + # In the case of preserving watershed lines, a similar argument + # applies: we can only observe that all neighbors have been labeled + # as the pixel comes off the heap. Trying to do so at push time + # is a bug. + if output[elem.index] and elem.index != elem.source: + # non-marker, already visited from another neighbor + continue + if wsl: + # if the current element has different-labeled neighbors and we + # want to preserve watershed lines, we mask it and move on + if _diff_neighbors(output, structure, mask, elem.index): + continue + output[elem.index] = output[elem.source] - output[elem.index] = output[elem.source] for i in range(nneighbors): # get the flattened address of the neighbor - index = structure[i] + elem.index + neighbor_index = structure[i] + elem.index - if not mask[index]: + if not mask[neighbor_index]: + # this branch includes basin boundaries, aka watershed lines # neighbor is not in mask continue - if wsl and output[index] == wsl_label: - continue - - if output[index]: - # neighbor has a label (but not wsl_label): - # the neighbor is not added to the queue. - if wsl: - # if the label of the neighbor is different - # from the label of the pixel taken from the queue, - # the latter takes the WSL label. - if output[index] != output[elem.index]: - output[elem.index] = wsl_label + if output[neighbor_index]: + # pre-labeled neighbor is not added to the queue. continue age += 1 - new_elem.value = image[index] - if compactness > 0: + new_elem.value = image[neighbor_index] + if compact: new_elem.value += (compactness * - _euclid_dist(index, elem.source, strides)) + _euclid_dist(neighbor_index, elem.source, + strides)) + elif not wsl: + # in the simplest watershed case (no compactness and no + # watershed lines), we can label a pixel at the time that + # we push it onto the heap, because it can't be reached with + # lower cost later. + # This results in a very significant performance gain, see: + # https://github.com/scikit-image/scikit-image/issues/2636 + output[neighbor_index] = output[elem.index] new_elem.age = age - new_elem.index = index + new_elem.index = neighbor_index new_elem.source = elem.source heappush(hp, &new_elem) diff -Nru skimage-0.13.1/skimage/novice/__init__.py skimage-0.14.0/skimage/novice/__init__.py --- skimage-0.13.1/skimage/novice/__init__.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/novice/__init__.py 2018-05-29 01:27:44.000000000 +0000 @@ -96,7 +96,11 @@ >>> picture.compare() # doctest: +SKIP """ +import warnings from ._novice import Picture, open, colors, color_dict +warnings.warn("The `skimage.novice` module was deprecated in version 0.14. " + "It will be removed in 0.16.") + __all__ = ['Picture', 'open', 'colors', 'color_dict'] diff -Nru skimage-0.13.1/skimage/novice/_novice.py skimage-0.14.0/skimage/novice/_novice.py --- skimage-0.13.1/skimage/novice/_novice.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/novice/_novice.py 2018-05-29 01:27:44.000000000 +0000 @@ -326,6 +326,9 @@ path : str Path (with file extension) where the picture is saved. """ + if (self.array.ndim == 3 and self.array.shape[-1] == 4 and + os.path.splitext(path)[-1].lower() in ['.jpg', '.jpeg']): + self.array = self.array[..., :-1] io.imsave(path, self.array) self._modified = False self._path = os.path.abspath(path) @@ -368,7 +371,8 @@ # skimage dimensions are flipped: y, x new_size = (int(value[1]), int(value[0])) new_array = resize(self.array, new_size, order=0, - preserve_range=True) + preserve_range=True, mode='constant', + anti_aliasing=False) self.array = new_array.astype(np.uint8) self._array_modified() diff -Nru skimage-0.13.1/skimage/novice/tests/test_novice.py skimage-0.14.0/skimage/novice/tests/test_novice.py --- skimage-0.13.1/skimage/novice/tests/test_novice.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/novice/tests/test_novice.py 2018-05-29 01:27:44.000000000 +0000 @@ -2,14 +2,17 @@ import tempfile import numpy as np -from nose.tools import assert_true -from numpy.testing import assert_equal, raises, assert_allclose from skimage import novice from skimage.novice._novice import (array_to_xy_origin, xy_to_array_origin, rgb_transpose) from skimage import data_dir + +from skimage._shared import testing +from skimage._shared.testing import (assert_equal, assert_allclose, + expected_warnings) from skimage._shared.utils import all_warnings + IMAGE_PATH = os.path.join(data_dir, "chelsea.png") SMALL_IMAGE_PATH = os.path.join(data_dir, "block.png") @@ -95,7 +98,8 @@ pixel.rgba = np.arange(4) assert_equal(pixel.rgba, np.arange(4)) - for i, channel in enumerate((pixel.red, pixel.green, pixel.blue, pixel.alpha)): + pixel_channels = (pixel.red, pixel.green, pixel.blue, pixel.alpha) + for i, channel in enumerate(pixel_channels): assert_equal(channel, i) pixel.red = 3 @@ -104,7 +108,8 @@ pixel.alpha = 6 assert_equal(pixel.rgba, np.arange(4) + 3) - for i, channel in enumerate((pixel.red, pixel.green, pixel.blue, pixel.alpha)): + pixel_channels = (pixel.red, pixel.green, pixel.blue, pixel.alpha) + for i, channel in enumerate(pixel_channels): assert_equal(channel, i + 3) @@ -141,30 +146,42 @@ v = pic[0, 0] pic[0, 0] = (1, 1, 1) pic.reset() - assert_true(pic[0, 0] == v) + assert pic[0, 0] == v def test_update_on_save(): pic = novice.Picture(array=np.zeros((3, 3, 3))) - pic[0, 0] = (255, 255, 255) # prevent attempting to save low-contrast image + # prevent attempting to save low-contrast image + pic[0, 0] = (255, 255, 255) with all_warnings(): # precision loss pic.size = (6, 6) assert pic.modified assert pic.path is None - fd, filename = tempfile.mkstemp(suffix=".jpg") + fd, filename = tempfile.mkstemp(suffix=".png") os.close(fd) try: pic.save(filename) assert not pic.modified assert_equal(pic.path, os.path.abspath(filename)) - assert_equal(pic.format, "jpeg") + assert_equal(pic.format, "png") finally: os.unlink(filename) +def test_save_with_alpha_channel(): + # create an image with an alpha channel + pic = novice.Picture(array=np.zeros((3, 3, 4))) + + fd, filename = tempfile.mkstemp(suffix=".png") + os.close(fd) + with expected_warnings(['is a low contrast']): + pic.save(filename) + os.unlink(filename) + + def test_indexing(): array = 128 * np.ones((10, 10, 3), dtype=np.uint8) pic = novice.Picture(array=array) @@ -259,65 +276,61 @@ assert sliced_pic == novice.Picture(array=array[::2, ::2]) -@raises(IndexError) def test_1d_getitem_raises(): pic = novice.Picture.from_size((1, 1)) - pic[1] + with testing.raises(IndexError): + pic[1] -@raises(IndexError) def test_3d_getitem_raises(): pic = novice.Picture.from_size((1, 1)) - pic[1, 2, 3] + with testing.raises(IndexError): + pic[1, 2, 3] -@raises(IndexError) def test_1d_setitem_raises(): pic = novice.Picture.from_size((1, 1)) - pic[1] = 0 + with testing.raises(IndexError): + pic[1] = 0 -@raises(IndexError) def test_3d_setitem_raises(): pic = novice.Picture.from_size((1, 1)) - pic[1, 2, 3] = 0 + with testing.raises(IndexError): + pic[1, 2, 3] = 0 -@raises(IndexError) def test_out_of_bounds_indexing(): pic = novice.open(SMALL_IMAGE_PATH) - pic[pic.width, pic.height] + with testing.raises(IndexError): + pic[pic.width, pic.height] -@raises(ValueError) def test_pixel_rgb_raises(): pixel = novice.Picture.from_size((1, 1))[0, 0] - pixel.rgb = (-1, -1, -1) + with testing.raises(ValueError): + pixel.rgb = (-1, -1, -1) -@raises(ValueError) def test_pixel_red_raises(): pixel = novice.Picture.from_size((1, 1))[0, 0] - pixel.red = 256 + with testing.raises(ValueError): + pixel.red = 256 -@raises(ValueError) def test_pixel_green_raises(): pixel = novice.Picture.from_size((1, 1))[0, 0] - pixel.green = 256 + with testing.raises(ValueError): + pixel.green = 256 -@raises(ValueError) def test_pixel_blue_raises(): pixel = novice.Picture.from_size((1, 1))[0, 0] - pixel.blue = 256 + with testing.raises(ValueError): + pixel.blue = 256 -@raises(ValueError) def test_pixel_alpha_raises(): pixel = novice.Picture.from_size((1, 1))[0, 0] - pixel.alpha = 256 - - -if __name__ == '__main__': - np.testing.run_module_suite() + with testing.raises(ValueError): + pixel.alpha = 256 diff -Nru skimage-0.13.1/skimage/restoration/_cycle_spin.py skimage-0.14.0/skimage/restoration/_cycle_spin.py --- skimage-0.13.1/skimage/restoration/_cycle_spin.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/skimage/restoration/_cycle_spin.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,160 @@ +from __future__ import division + +from itertools import product + +import numpy as np +import dask + + +def _generate_shifts(ndim, multichannel, max_shifts, shift_steps=1): + """Returns all combinations of shifts in n dimensions over the specified + max_shifts and step sizes. + + Examples + -------- + >>> s = list(_generate_shifts(2, False, max_shifts=(1, 2), shift_steps=1)) + >>> print(s) + [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)] + """ + mc = int(multichannel) + if np.isscalar(max_shifts): + max_shifts = (max_shifts, ) * (ndim - mc) + (0, ) * mc + elif multichannel and len(max_shifts) == ndim - 1: + max_shifts = tuple(max_shifts) + (0, ) + elif len(max_shifts) != ndim: + raise ValueError("max_shifts should have length ndim") + + if np.isscalar(shift_steps): + shift_steps = (shift_steps, ) * (ndim - mc) + (1, ) * mc + elif multichannel and len(shift_steps) == ndim - 1: + shift_steps = tuple(shift_steps) + (1, ) + elif len(shift_steps) != ndim: + raise ValueError("max_shifts should have length ndim") + + if np.any(np.asarray(shift_steps) < 1): + raise ValueError("shift_steps must all be >= 1") + + if multichannel and max_shifts[-1] != 0: + raise ValueError( + "Multichannel cycle spinning should not have shifts along the " + "last axis.") + + return product(*[range(0, s+1, t) for + s, t in zip(max_shifts, shift_steps)]) + + +def _roll_axes(x, rolls, axes=None): + """Apply np.roll along a set of axes. + + Parameters + ---------- + x : array-like + Array to roll. + rolls : int or sequence + The amount to roll along each axis in ``axes``. + axes : int or sequence, optional + The axes to roll. Default is the first ``len(rolls)`` axes of ``x``. + + Returns + ------- + x : array + Data with axes rolled. + """ + if axes is None: + axes = np.arange(len(rolls)) + # Replace this loop with x = np.roll(x, rolls, axes) when NumPy>=1.12 + # becomes a requirement. + for r, a in zip(rolls, axes): + x = np.roll(x, r, a) + return x + + +def cycle_spin(x, func, max_shifts, shift_steps=1, num_workers=None, + multichannel=False, func_kw={}): + """Cycle spinning (repeatedly apply func to shifted versions of x). + + Parameters + ---------- + x : array-like + Data for input to ``func``. + func : function + A function to apply to circularly shifted versions of ``x``. Should + take ``x`` as its first argument. Any additional arguments can be + supplied via ``func_kw``. + max_shifts : int or tuple + If an integer, shifts in ``range(0, max_shifts+1)`` will be used along + each axis of ``x``. If a tuple, ``range(0, max_shifts[i]+1)`` will be + along axis i. + shift_steps : int or tuple, optional + The step size for the shifts applied along axis, i, are:: + ``range((0, max_shifts[i]+1, shift_steps[i]))``. If an integer is + provided, the same step size is used for all axes. + num_workers : int or None, optional + The number of parallel threads to use during cycle spinning. If set to + ``None``, the full set of available cores are used. + multichannel : bool, optional + Whether to treat the final axis as channels (no cycle shifts are + performed over the channels axis). + func_kw : dict, optional + Additional keyword arguments to supply to ``func``. + + Returns + ------- + avg_y : np.ndarray + The output of ``func(x, **func_kw)`` averaged over all combinations of + the specified axis shifts. + + Notes + ----- + Cycle spinning was proposed as a way to approach shift-invariance via + performing several circular shifts of a shift-variant transform [1]_. + + For a n-level discrete wavelet transforms, one may wish to perform all + shifts up to ``max_shifts = 2**n - 1``. In practice, much of the benefit + can often be realized with only a small number of shifts per axis. + + For transforms such as the blockwise discrete cosine transform, one may + wish to evaluate shifts up to the block size used by the transform. + + References + ---------- + .. [1] R.R. Coifman and D.L. Donoho. "Translation-Invariant De-Noising". + Wavelets and Statistics, Lecture Notes in Statistics, vol.103. + Springer, New York, 1995, pp.125-150. + DOI:10.1007/978-1-4612-2544-7_9 + + Examples + -------- + >>> import skimage.data + >>> from skimage import img_as_float + >>> from skimage.restoration import denoise_wavelet, cycle_spin + >>> img = img_as_float(skimage.data.camera()) + >>> sigma = 0.1 + >>> img = img + sigma * np.random.standard_normal(img.shape) + >>> denoised = cycle_spin(img, func=denoise_wavelet, max_shifts=3) + + """ + x = np.asanyarray(x) + all_shifts = _generate_shifts(x.ndim, multichannel, max_shifts, + shift_steps) + all_shifts = list(all_shifts) + + def _run_one_shift(shift): + # shift, apply function, inverse shift + xs = _roll_axes(x, shift) + tmp = func(xs, **func_kw) + return _roll_axes(tmp, -np.asarray(shift)) + + # compute a running average across the cycle shifts + if num_workers == 1: + # serial processing + mean = _run_one_shift(all_shifts[0]) + for shift in all_shifts[1:]: + mean += _run_one_shift(shift) + mean /= len(all_shifts) + else: + # multithreaded via dask + futures = [dask.delayed(_run_one_shift)(s) for s in all_shifts] + mean = sum(futures) / len(futures) + mean = mean.compute(get=dask.threaded.get, num_workers=num_workers) + return mean diff -Nru skimage-0.13.1/skimage/restoration/deconvolution.py skimage-0.14.0/skimage/restoration/deconvolution.py --- skimage-0.13.1/skimage/restoration/deconvolution.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/restoration/deconvolution.py 2018-05-29 01:27:44.000000000 +0000 @@ -78,10 +78,10 @@ unknown original image, the Wiener filter is .. math:: - \hat x = F^\dag (|\Lambda_H|^2 + \lambda |\Lambda_D|^2) - \Lambda_H^\dag F y + \hat x = F^\dagger (|\Lambda_H|^2 + \lambda |\Lambda_D|^2) + \Lambda_H^\dagger F y - where :math:`F` and :math:`F^\dag` are the Fourier and inverse + where :math:`F` and :math:`F^\dagger` are the Fourier and inverse Fourier transfroms respectively, :math:`\Lambda_H` the transfer function (or the Fourier transfrom of the PSF, see [Hunt] below) and :math:`\Lambda_D` the filter to penalize the restored image @@ -186,12 +186,12 @@ samples, see Notes section). 1e-4 by default. burnin : int The number of sample to ignore to start computation of the - mean. 100 by default. + mean. 15 by default. min_iter : int The minimum number of iterations. 30 by default. max_iter : int The maximum number of iterations if ``threshold`` is not - satisfied. 150 by default. + satisfied. 200 by default. callback : callable (None by default) A user provided callable to which is passed, if the function exists, the current image sample for whatever purpose. The user diff -Nru skimage-0.13.1/skimage/restoration/_denoise_cy.pyx skimage-0.14.0/skimage/restoration/_denoise_cy.pyx --- skimage-0.13.1/skimage/restoration/_denoise_cy.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/restoration/_denoise_cy.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -11,8 +11,8 @@ from ..util import img_as_float -cdef inline double _gaussian_weight(double sigma, double value): - return exp(-0.5 * (value / sigma)**2) +cdef inline double _gaussian_weight(double sigma_sqr, double value): + return exp(-0.5 * value * value / sigma_sqr) cdef double[:] _compute_color_lut(Py_ssize_t bins, double sigma, double max_value): @@ -21,8 +21,11 @@ double[:] color_lut = np.empty(bins, dtype=np.double) Py_ssize_t b + sigma *= sigma + max_value /= bins + for b in range(bins): - color_lut[b] = _gaussian_weight(sigma, b * max_value / bins) + color_lut[b] = _gaussian_weight(sigma, b * max_value) return color_lut @@ -30,14 +33,18 @@ cdef double[:] _compute_range_lut(Py_ssize_t win_size, double sigma): cdef: - double[:] range_lut = np.empty(win_size**2, dtype=np.double) - Py_ssize_t kr, kc + double[:] range_lut = np.empty(win_size*win_size, dtype=np.double) + Py_ssize_t kr, kc, dr, dc Py_ssize_t window_ext = (win_size - 1) / 2 double dist + sigma *= sigma + for kr in range(win_size): for kc in range(win_size): - dist = sqrt((kr - window_ext)**2 + (kc - window_ext)**2) + dr = kr - window_ext + dc = kc - window_ext + dist = sqrt(dr * dr + dc * dc) range_lut[kr * win_size + kc] = _gaussian_weight(sigma, dist) return range_lut @@ -50,7 +57,7 @@ return value2 -def _denoise_bilateral(image, Py_ssize_t win_size, sigma_range, +def _denoise_bilateral(image, Py_ssize_t win_size, sigma_color, double sigma_spatial, Py_ssize_t bins, mode, double cval): cdef: @@ -87,17 +94,17 @@ double[:] range_lut Py_ssize_t r, c, d, wr, wc, kr, kc, rr, cc, pixel_addr, color_lut_bin - double value, weight, dist, total_weight, csigma_range, color_weight, \ - range_weight + double value, weight, dist, total_weight, csigma_color, color_weight, \ + range_weight, t double dist_scale double[:] values double[:] centres double[:] total_values - if sigma_range is None: - csigma_range = image.std() + if sigma_color is None: + csigma_color = image.std() else: - csigma_range = sigma_range + csigma_color = sigma_color if mode not in ('constant', 'wrap', 'symmetric', 'reflect', 'edge'): raise ValueError("Invalid mode specified. Please use `constant`, " @@ -107,7 +114,7 @@ cimage = np.ascontiguousarray(image) out = np.zeros((rows, cols, dims), dtype=np.double) - color_lut = _compute_color_lut(bins, csigma_range, max_value) + color_lut = _compute_color_lut(bins, csigma_color, max_value) range_lut = _compute_range_lut(win_size, sigma_spatial) dist_scale = bins / dims / max_value values = np.empty(dims, dtype=np.double) @@ -134,7 +141,8 @@ value = get_pixel3d(&cimage[0, 0, 0], rows, cols, dims, rr, cc, d, cmode, cval) values[d] = value - dist += (centres[d] - value)**2 + t = centres[d] - value + dist += t * t dist = sqrt(dist) range_weight = range_lut[kr * win_size + kc] @@ -179,7 +187,7 @@ double[:, :, ::1] bx = np.zeros(shape_ext, dtype=np.double) double[:, :, ::1] by = np.zeros(shape_ext, dtype=np.double) - double ux, uy, uprev, unew, bxx, byy, dxx, dyy, s + double ux, uy, uprev, unew, bxx, byy, dxx, dyy, s, tx, ty int i = 0 double lam = 2 * weight double rmse = DBL_MAX @@ -229,16 +237,19 @@ cu[r, c, k] = unew # update root mean square error - rmse += (unew - uprev)**2 + tx = unew - uprev + rmse += tx * tx bxx = bx[r, c, k] byy = by[r, c, k] # d_subproblem after reference [4] if isotropic: - s = sqrt((ux + bxx)**2 + (uy + byy)**2) - dxx = s * lam * (ux + bxx) / (s * lam + 1) - dyy = s * lam * (uy + byy) / (s * lam + 1) + tx = ux + bxx + ty = uy + byy + s = sqrt(tx * tx + ty * ty) + dxx = s * lam * tx / (s * lam + 1) + dyy = s * lam * ty / (s * lam + 1) else: s = ux + bxx diff -Nru skimage-0.13.1/skimage/restoration/_denoise.py skimage-0.14.0/skimage/restoration/_denoise.py --- skimage-0.13.1/skimage/restoration/_denoise.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/restoration/_denoise.py 2018-05-29 01:27:44.000000000 +0000 @@ -11,8 +11,7 @@ def denoise_bilateral(image, win_size=None, sigma_color=None, sigma_spatial=1, - bins=10000, mode='constant', cval=0, multichannel=None, - sigma_range=None): + bins=10000, mode='constant', cval=0, multichannel=None): """Denoise image using bilateral filter. This is an edge-preserving, denoising filter. It averages pixels based on @@ -111,14 +110,6 @@ "``multichannel=True`` for 2-D RGB " "images.".format(image.shape)) - if sigma_range is not None: - warn('`sigma_range` has been deprecated in favor of ' - '`sigma_color`. The `sigma_range` keyword argument ' - 'will be removed in v0.14', skimage_deprecation) - - # If sigma_range is provided, assign it to sigma_color - sigma_color = sigma_range - if win_size is None: win_size = max(5, 2 * int(ceil(3 * sigma_spatial)) + 1) @@ -173,12 +164,12 @@ return _denoise_tv_bregman(image, weight, max_iter, eps, isotropic) -def _denoise_tv_chambolle_nd(im, weight=0.1, eps=2.e-4, n_iter_max=200): +def _denoise_tv_chambolle_nd(image, weight=0.1, eps=2.e-4, n_iter_max=200): """Perform total-variation denoising on n-dimensional images. Parameters ---------- - im : ndarray + image : ndarray n-D input data to be denoised. weight : float, optional Denoising weight. The greater `weight`, the more denoising (at @@ -203,10 +194,10 @@ """ - ndim = im.ndim - p = np.zeros((im.ndim, ) + im.shape, dtype=im.dtype) + ndim = image.ndim + p = np.zeros((image.ndim, ) + image.shape, dtype=image.dtype) g = np.zeros_like(p) - d = np.zeros_like(im) + d = np.zeros_like(image) i = 0 while i < n_iter_max: if i > 0: @@ -221,9 +212,9 @@ d[slices_d] += p[slices_p] slices_d[ax] = slice(None) slices_p[ax+1] = slice(None) - out = im + d + out = image + d else: - out = im + out = image E = (d ** 2).sum() # g stores the gradients of out along each axis @@ -242,7 +233,7 @@ norm += 1. p -= tau * g p /= norm - E /= float(im.size) + E /= float(image.size) if i == 0: E_init = E E_previous = E @@ -255,14 +246,14 @@ return out -def denoise_tv_chambolle(im, weight=0.1, eps=2.e-4, n_iter_max=200, +def denoise_tv_chambolle(image, weight=0.1, eps=2.e-4, n_iter_max=200, multichannel=False): """Perform total-variation denoising on n-dimensional images. Parameters ---------- - im : ndarray of ints, uints or floats - Input data to be denoised. `im` can be of any numeric type, + image : ndarray of ints, uints or floats + Input data to be denoised. `image` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. weight : float, optional @@ -327,17 +318,17 @@ """ - im_type = im.dtype + im_type = image.dtype if not im_type.kind == 'f': - im = img_as_float(im) + image = img_as_float(image) if multichannel: - out = np.zeros_like(im) - for c in range(im.shape[-1]): - out[..., c] = _denoise_tv_chambolle_nd(im[..., c], weight, eps, + out = np.zeros_like(image) + for c in range(image.shape[-1]): + out[..., c] = _denoise_tv_chambolle_nd(image[..., c], weight, eps, n_iter_max) else: - out = _denoise_tv_chambolle_nd(im, weight, eps, n_iter_max) + out = _denoise_tv_chambolle_nd(image, weight, eps, n_iter_max) return out @@ -350,6 +341,11 @@ return thresh +def _universal_thresh(img, sigma): + """ Universal threshold used by the VisuShrink method """ + return sigma*np.sqrt(2*np.log(img.size)) + + def _sigma_est_dwt(detail_coeffs, distribution='Gaussian'): """Calculate the robust median estimator of the noise standard deviation. @@ -385,27 +381,31 @@ return sigma -def _wavelet_threshold(img, wavelet, threshold=None, sigma=None, mode='soft', - wavelet_levels=None): +def _wavelet_threshold(image, wavelet, method=None, threshold=None, + sigma=None, mode='soft', wavelet_levels=None): """Perform wavelet thresholding. Parameters ---------- - img : ndarray (2d or 3d) of ints, uints or floats - Input data to be denoised. `img` can be of any numeric type, + image : ndarray (2d or 3d) of ints, uints or floats + Input data to be denoised. `image` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. wavelet : string The type of wavelet to perform. Can be any of the options pywt.wavelist outputs. For example, this may be any of ``{db1, db2, db3, db4, haar}``. + method : {'BayesShrink', 'VisuShrink'}, optional + Thresholding method to be used. The currently supported methods are + "BayesShrink" [1]_ and "VisuShrink" [2]_. If it is set to None, a + user-specified ``threshold`` must be supplied instead. + threshold : float, optional + The thresholding value to apply during wavelet coefficient + thresholding. The default value (None) uses the selected ``method`` to + estimate appropriate threshold(s) for noise removal. sigma : float, optional The standard deviation of the noise. The noise is estimated when sigma is None (the default) by the method in [2]_. - threshold : float, optional - The thresholding value. All wavelet coefficients less than this value - are set to 0. The default value (None) uses the BayesShrink method - found in [1]_ to remove noise. mode : {'soft', 'hard'}, optional An optional argument to choose the type of denoising performed. It noted that choosing soft thresholding given additive noise finds the @@ -435,32 +435,45 @@ # original_extent is used to workaround PyWavelets issue #80 # odd-sized input results in an image with 1 extra sample after waverecn - original_extent = [slice(s) for s in img.shape] + original_extent = [slice(s) for s in image.shape] # Determine the number of wavelet decomposition levels if wavelet_levels is None: - # Determine the maximum number of possible levels for img + # Determine the maximum number of possible levels for image dlen = wavelet.dec_len wavelet_levels = np.min( - [pywt.dwt_max_level(s, dlen) for s in img.shape]) + [pywt.dwt_max_level(s, dlen) for s in image.shape]) # Skip coarsest wavelet scales (see Notes in docstring). wavelet_levels = max(wavelet_levels - 3, 1) - coeffs = pywt.wavedecn(img, wavelet=wavelet, level=wavelet_levels) + coeffs = pywt.wavedecn(image, wavelet=wavelet, level=wavelet_levels) # Detail coefficients at each decomposition level dcoeffs = coeffs[1:] if sigma is None: # Estimate the noise via the method in [2]_ - detail_coeffs = dcoeffs[-1]['d' * img.ndim] + detail_coeffs = dcoeffs[-1]['d' * image.ndim] sigma = _sigma_est_dwt(detail_coeffs, distribution='Gaussian') + if method is not None and threshold is not None: + warn(("Thresholding method {} selected. The user-specified threshold " + "will be ignored.").format(method)) + if threshold is None: - # The BayesShrink thresholds from [1]_ in docstring var = sigma**2 - threshold = [{key: _bayes_thresh(level[key], var) for key in level} - for level in dcoeffs] + if method is None: + raise ValueError( + "If method is None, a threshold must be provided.") + elif method == "BayesShrink": + # The BayesShrink thresholds from [1]_ in docstring + threshold = [{key: _bayes_thresh(level[key], var) for key in level} + for level in dcoeffs] + elif method == "VisuShrink": + # The VisuShrink thresholds from [2]_ in docstring + threshold = _universal_thresh(image, sigma) + else: + raise ValueError("Unrecognized method: {}".format(method)) if np.isscalar(threshold): # A single threshold for all coefficient arrays @@ -478,22 +491,21 @@ return pywt.waverecn(denoised_coeffs, wavelet)[original_extent] -def denoise_wavelet(img, sigma=None, wavelet='db1', mode='soft', +def denoise_wavelet(image, sigma=None, wavelet='db1', mode='soft', wavelet_levels=None, multichannel=False, - convert2ycbcr=False): + convert2ycbcr=False, method='BayesShrink'): """Perform wavelet denoising on an image. Parameters ---------- - img : ndarray ([M[, N[, ...P]][, C]) of ints, uints or floats - Input data to be denoised. `img` can be of any numeric type, + image : ndarray ([M[, N[, ...P]][, C]) of ints, uints or floats + Input data to be denoised. `image` can be of any numeric type, but it is cast into an ndarray of floats for the computation of the denoised image. sigma : float or list, optional - The noise standard deviation used when computing the threshold - adaptively as described in [1]_ for each color channel. When None - (default), the noise standard deviation is estimated via the method in - [2]_. + The noise standard deviation used when computing the wavelet detail + coefficient threshold(s). When None (default), the noise standard + deviation is estimated via the method in [2]_. wavelet : string, optional The type of wavelet to perform and can be any of the options ``pywt.wavelist`` outputs. The default is `'db1'`. For example, @@ -512,6 +524,9 @@ If True and multichannel True, do the wavelet denoising in the YCbCr colorspace instead of the RGB color space. This typically results in better performance for RGB images. + method : {'BayesShrink', 'VisuShrink'}, optional + Thresholding method to be used. The currently supported methods are + "BayesShrink" [1]_ and "VisuShrink" [2]_. Defaults to "BayesShrink". Returns ------- @@ -534,6 +549,16 @@ When YCbCr conversion is done, every color channel is scaled between 0 and 1, and `sigma` values are applied to these scaled color channels. + Many wavelet coefficient thresholding approaches have been proposed. By + default, ``denoise_wavelet`` applies BayesShrink, which is an adaptive + thresholding method that computes separate thresholds for each wavelet + sub-band as described in [1]_. + + If ``method == "VisuShrink"``, a single "universal threshold" is applied to + all wavelet detail coefficients as described in [2]_. This threshold + is designed to remove all Gaussian noise at a given ``sigma`` with high + probability, but tends to produce images that appear overly smooth. + References ---------- .. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet @@ -554,49 +579,57 @@ >>> denoised_img = denoise_wavelet(img, sigma=0.1) """ - img = img_as_float(img) + if method not in ["BayesShrink", "VisuShrink"]: + raise ValueError( + ('Invalid method: {}. The currently supported methods are ' + '"BayesShrink" and "VisuShrink"').format(method)) + + image = img_as_float(image) if multichannel: if isinstance(sigma, numbers.Number) or sigma is None: - sigma = [sigma] * img.shape[-1] + sigma = [sigma] * image.shape[-1] if multichannel: if convert2ycbcr: - out = color.rgb2ycbcr(img) + out = color.rgb2ycbcr(image) for i in range(3): # renormalizing this color channel to live in [0, 1] min, max = out[..., i].min(), out[..., i].max() channel = out[..., i] - min channel /= max - min - out[..., i] = denoise_wavelet(channel, sigma=sigma[i], - wavelet=wavelet, mode=mode, + out[..., i] = denoise_wavelet(channel, wavelet=wavelet, + method=method, sigma=sigma[i], + mode=mode, wavelet_levels=wavelet_levels) out[..., i] = out[..., i] * (max - min) out[..., i] += min out = color.ycbcr2rgb(out) else: - out = np.empty_like(img) - for c in range(img.shape[-1]): - out[..., c] = _wavelet_threshold(img[..., c], wavelet=wavelet, - mode=mode, sigma=sigma[c], + out = np.empty_like(image) + for c in range(image.shape[-1]): + out[..., c] = _wavelet_threshold(image[..., c], + wavelet=wavelet, + method=method, + sigma=sigma[c], mode=mode, wavelet_levels=wavelet_levels) - else: - out = _wavelet_threshold(img, wavelet=wavelet, mode=mode, sigma=sigma, + out = _wavelet_threshold(image, wavelet=wavelet, method=method, + sigma=sigma, mode=mode, wavelet_levels=wavelet_levels) - clip_range = (-1, 1) if img.min() < 0 else (0, 1) + clip_range = (-1, 1) if image.min() < 0 else (0, 1) return np.clip(out, *clip_range) -def estimate_sigma(im, average_sigmas=False, multichannel=False): +def estimate_sigma(image, average_sigmas=False, multichannel=False): """ Robust wavelet-based estimator of the (Gaussian) noise standard deviation. Parameters ---------- - im : ndarray + image : ndarray Image for which to estimate the noise standard deviation. average_sigmas : bool, optional If true, average the channel estimates of `sigma`. Otherwise return @@ -634,17 +667,17 @@ >>> sigma_hat = estimate_sigma(img, multichannel=False) """ if multichannel: - nchannels = im.shape[-1] + nchannels = image.shape[-1] sigmas = [estimate_sigma( - im[..., c], multichannel=False) for c in range(nchannels)] + image[..., c], multichannel=False) for c in range(nchannels)] if average_sigmas: sigmas = np.mean(sigmas) return sigmas - elif im.shape[-1] <= 4: + elif image.shape[-1] <= 4: msg = ("image is size {0} on the last axis, but multichannel is " "False. If this is a color image, please set multichannel " "to True for proper noise estimation.") - warn(msg.format(im.shape[-1])) - coeffs = pywt.dwtn(im, wavelet='db2') - detail_coeffs = coeffs['d' * im.ndim] + warn(msg.format(image.shape[-1])) + coeffs = pywt.dwtn(image, wavelet='db2') + detail_coeffs = coeffs['d' * image.ndim] return _sigma_est_dwt(detail_coeffs, distribution='Gaussian') diff -Nru skimage-0.13.1/skimage/restoration/__init__.py skimage-0.14.0/skimage/restoration/__init__.py --- skimage-0.13.1/skimage/restoration/__init__.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/restoration/__init__.py 2018-05-29 01:27:44.000000000 +0000 @@ -5,14 +5,11 @@ from .deconvolution import wiener, unsupervised_wiener, richardson_lucy from .unwrap import unwrap_phase -from ._denoise import denoise_tv_chambolle, denoise_tv_bregman, \ - denoise_bilateral, denoise_wavelet, estimate_sigma +from ._denoise import (denoise_tv_chambolle, denoise_tv_bregman, + denoise_bilateral, denoise_wavelet, estimate_sigma) +from ._cycle_spin import cycle_spin from .non_local_means import denoise_nl_means from .inpaint import inpaint_biharmonic -from .._shared.utils import copy_func, deprecated - -nl_means_denoising = copy_func(denoise_nl_means, name='nl_means_denoising') -nl_means_denoising = deprecated('skimage.restoration.denoise_nl_means')(nl_means_denoising) __all__ = ['wiener', @@ -24,7 +21,6 @@ 'denoise_bilateral', 'denoise_wavelet', 'denoise_nl_means', - 'nl_means_denoising', - 'inpaint_biharmonic'] - -del copy_func, deprecated + 'estimate_sigma', + 'inpaint_biharmonic', + 'cycle_spin'] diff -Nru skimage-0.13.1/skimage/restoration/inpaint.py skimage-0.14.0/skimage/restoration/inpaint.py --- skimage-0.13.1/skimage/restoration/inpaint.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/restoration/inpaint.py 2018-05-29 01:27:44.000000000 +0000 @@ -6,7 +6,7 @@ import scipy.ndimage as ndi from scipy.ndimage.filters import laplace import skimage -from skimage.measure import label +from ..measure import label def _get_neighborhood(nd_idx, radius, nd_shape): @@ -15,7 +15,7 @@ return bounds_lo, bounds_hi -def _inpaint_biharmonic_single_channel(img, mask, out, limits): +def _inpaint_biharmonic_single_channel(mask, out, limits): # Initialize sparse matrices matrix_unknown = sparse.lil_matrix((np.sum(mask), out.size)) matrix_known = sparse.lil_matrix((np.sum(mask), out.size)) @@ -74,19 +74,19 @@ return out -def inpaint_biharmonic(img, mask, multichannel=False): +def inpaint_biharmonic(image, mask, multichannel=False): """Inpaint masked points in image with biharmonic equations. Parameters ---------- - img : (M[, N[, ..., P]][, C]) ndarray + image : (M[, N[, ..., P]][, C]) ndarray Input image. mask : (M[, N[, ..., P]]) ndarray Array of pixels to be inpainted. Have to be the same shape as one - of the 'img' channels. Unknown pixels have to be represented with 1, + of the 'image' channels. Unknown pixels have to be represented with 1, known pixels - with 0. multichannel : boolean, optional - If True, the last `img` dimension is considered as a color channel, + If True, the last `image` dimension is considered as a color channel, otherwise as spatial. Returns @@ -97,7 +97,12 @@ References ---------- .. [1] N.S.Hoang, S.B.Damelin, "On surface completion and image inpainting - by biharmonic functions: numerical aspects" + by biharmonic functions: numerical aspects", + https://arxiv.org/abs/1707.06567 + .. [2] C. K. Chui and H. N. Mhaskar, MRA Contextual-Recovery Extension of + Smooth Functions on Manifolds, Appl. and Comp. Harmonic Anal., + 28 (2010), 104-113, + DOI: 10.1016/j.acha.2009.04.004 Examples -------- @@ -109,17 +114,17 @@ >>> out = inpaint_biharmonic(img, mask) """ - if img.ndim < 1: + if image.ndim < 1: raise ValueError('Input array has to be at least 1D') - img_baseshape = img.shape[:-1] if multichannel else img.shape + img_baseshape = image.shape[:-1] if multichannel else image.shape if img_baseshape != mask.shape: raise ValueError('Input arrays have to be the same shape') - if np.ma.isMaskedArray(img): + if np.ma.isMaskedArray(image): raise TypeError('Masked arrays are not supported') - img = skimage.img_as_float(img) + image = skimage.img_as_float(image) mask = mask.astype(np.bool) # Split inpainting mask into independent regions @@ -129,18 +134,17 @@ mask_labeled *= mask if not multichannel: - img = img[..., np.newaxis] + image = image[..., np.newaxis] - out = np.copy(img) + out = np.copy(image) - for idx_channel in range(img.shape[-1]): - known_points = img[..., idx_channel][~mask] + for idx_channel in range(image.shape[-1]): + known_points = image[..., idx_channel][~mask] limits = (np.min(known_points), np.max(known_points)) for idx_region in range(1, num_labels+1): mask_region = mask_labeled == idx_region - _inpaint_biharmonic_single_channel( - img[..., idx_channel], mask_region, + _inpaint_biharmonic_single_channel(mask_region, out[..., idx_channel], limits) if not multichannel: diff -Nru skimage-0.13.1/skimage/restoration/_nl_means_denoising.pyx skimage-0.14.0/skimage/restoration/_nl_means_denoising.pyx --- skimage-0.13.1/skimage/restoration/_nl_means_denoising.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/restoration/_nl_means_denoising.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -1,16 +1,23 @@ +#cython: initializedcheck=False +#cython: wraparound=False +#cython: boundscheck=False +#cython: cdivision=True + import numpy as np cimport numpy as np cimport cython -from libc.math cimport exp -ctypedef np.float32_t IMGDTYPE +ctypedef np.float64_t IMGDTYPE + +cdef double DISTANCE_CUTOFF = 5.0 -cdef float DISTANCE_CUTOFF = 5. +cdef extern from "fast_exp.h": + double fast_exp(double y) nogil -@cython.boundscheck(False) -cdef inline float patch_distance_2d(IMGDTYPE [:, :] p1, - IMGDTYPE [:, :] p2, - IMGDTYPE [:, ::] w, int s): + +cdef inline double patch_distance_2d(IMGDTYPE [:, :] p1, + IMGDTYPE [:, :] p2, + IMGDTYPE [:, ::] w, int s, double var): """ Compute a Gaussian distance between two image patches. @@ -21,44 +28,48 @@ p2 : 2-D array_like Second patch. w : 2-D array_like - Array of weigths for the different pixels of the patches. + Array of weights for the different pixels of the patches. s : int Linear size of the patches. + var : double + Expected noise variance. Returns ------- - distance : float + distance : double Gaussian distance between the two patches Notes ----- The returned distance is given by - .. math:: \exp( -w (p1 - p2)^2) + .. math:: \exp( -w ((p1 - p2)^2 - 2*var)) """ cdef int i, j cdef int center = s / 2 # Check if central pixel is too different in the 2 patches - cdef float tmp_diff = p1[center, center] - p2[center, center] - cdef float init = w[center, center] * tmp_diff * tmp_diff + cdef double tmp_diff = p1[center, center] - p2[center, center] + cdef double init = w[center, center] * tmp_diff * tmp_diff if init > 1: return 0. - cdef float distance = 0 + cdef double distance = 0 for i in range(s): # exp of large negative numbers will be 0, so we'd better stop if distance > DISTANCE_CUTOFF: return 0. for j in range(s): tmp_diff = p1[i, j] - p2[i, j] - distance += (w[i, j] * tmp_diff * tmp_diff) - distance = exp(-distance) + distance += w[i, j] * (tmp_diff * tmp_diff - 2 * var) + distance = max(distance, 0) + distance = fast_exp(-distance) return distance -@cython.boundscheck(False) -cdef inline float patch_distance_2drgb(IMGDTYPE [:, :, :] p1, - IMGDTYPE [:, :, :] p2, - IMGDTYPE [:, ::] w, int s): +cdef inline double patch_distance_2dmultichannel(IMGDTYPE [:, :, :] p1, + IMGDTYPE [:, :, :] p2, + IMGDTYPE [:, ::] w, + int s, double var, + int n_channels): """ Compute a Gaussian distance between two image patches. @@ -72,39 +83,41 @@ Array of weights for the different pixels of the patches. s : int Linear size of the patches. + var : double + Expected noise variance. + n_channels : int + The number of channels. Returns ------- - distance : float + distance : double Gaussian distance between the two patches Notes ----- The returned distance is given by - .. math:: \exp( -w (p1 - p2)^2) + .. math:: \exp( -w ((p1 - p2)^2 - 2*var)) """ - cdef int i, j - cdef int center = s / 2 - cdef int color - cdef float tmp_diff = 0 - cdef float distance = 0 + cdef int i, j, channel + cdef double tmp_diff = 0 + cdef double distance = 0 for i in range(s): # exp of large negative numbers will be 0, so we'd better stop if distance > DISTANCE_CUTOFF: return 0. for j in range(s): - for color in range(3): - tmp_diff = p1[i, j, color] - p2[i, j, color] - distance += w[i, j] * tmp_diff * tmp_diff - distance = exp(-distance) + for channel in range(n_channels): + tmp_diff = p1[i, j, channel] - p2[i, j, channel] + distance += w[i, j] * (tmp_diff * tmp_diff - 2 * var) + distance = max(distance, 0) + distance = fast_exp(-distance) return distance -@cython.boundscheck(False) -cdef inline float patch_distance_3d(IMGDTYPE [:, :, :] p1, - IMGDTYPE [:, :, :] p2, - IMGDTYPE [:, :, ::] w, int s): +cdef inline double patch_distance_3d(IMGDTYPE [:, :, :] p1, + IMGDTYPE [:, :, :] p2, + IMGDTYPE [:, :, ::] w, int s, double var): """ Compute a Gaussian distance between two image patches. @@ -118,21 +131,23 @@ Array of weights for the different pixels of the patches. s : int Linear size of the patches. + var : double + Expected noise variance. Returns ------- - distance : float + distance : double Gaussian distance between the two patches Notes ----- The returned distance is given by - .. math:: \exp( -w (p1 - p2)^2) + .. math:: \exp( -w ((p1 - p2)^2 - 2*var)) """ cdef int i, j, k - cdef float distance = 0 - cdef float tmp_diff + cdef double distance = 0 + cdef double tmp_diff for i in range(s): # exp of large negative numbers will be 0, so we'd better stop if distance > DISTANCE_CUTOFF: @@ -140,14 +155,14 @@ for j in range(s): for k in range(s): tmp_diff = p1[i, j, k] - p2[i, j, k] - distance += w[i, j, k] * tmp_diff * tmp_diff - distance = exp(-distance) + distance += w[i, j, k] * (tmp_diff * tmp_diff - 2 * var) + distance = max(distance, 0.0) + distance = fast_exp(-distance) return distance -@cython.cdivision(True) -@cython.boundscheck(False) -def _nl_means_denoising_2d(image, int s=7, int d=13, float h=0.1): +def _nl_means_denoising_2d(image, int s=7, int d=13, double h=0.1, + double var=0.): """ Perform non-local means denoising on 2-D RGB image @@ -159,9 +174,18 @@ Size of patches used for denoising d : int, optional Maximal distance in pixels where to search patches used for denoising - h : float, optional + h : double, optional Cut-off distance (in gray levels). The higher h, the more permissive one is in accepting patches. + var : double + Expected noise variance. If non-zero, this is used to reduce the + apparent patch distances by the expected distance due to the noise. + + Notes + ----- + This function operates on 2D grayscale and multichannel images. For + 2D grayscale images, the input should be 3D with size 1 along the last + axis. The code is compatible with an arbitrary number of channels. Returns ------- @@ -170,26 +194,26 @@ """ if s % 2 == 0: s += 1 # odd value for symmetric patch - cdef int n_row, n_col, n_ch - n_row, n_col, n_ch = image.shape + cdef int n_row, n_col, n_channels + n_row, n_col, n_channels = image.shape cdef int offset = s / 2 - cdef int row, col, i, j, color + cdef int row, col, i, j, channel cdef int row_start, row_end, col_start, col_end cdef int row_start_i, row_end_i, col_start_j, col_end_j - cdef IMGDTYPE [::1] new_values = np.zeros(n_ch).astype(np.float32) + cdef IMGDTYPE [::1] new_values = np.zeros(n_channels).astype(np.float64) cdef IMGDTYPE [:, :, ::1] padded = np.ascontiguousarray(np.pad(image, ((offset, offset), (offset, offset), (0, 0)), - mode='reflect').astype(np.float32)) + mode='reflect').astype(np.float64)) cdef IMGDTYPE [:, :, ::1] result = padded.copy() - cdef float A = ((s - 1.) / 4.) - cdef float new_value - cdef float weight_sum, weight + cdef double A = ((s - 1.) / 4.) + cdef double new_value + cdef double weight_sum, weight xg_row, xg_col = np.mgrid[-offset:offset + 1, -offset:offset + 1] cdef IMGDTYPE [:, ::1] w = np.ascontiguousarray(np.exp( - -(xg_row ** 2 + xg_col ** 2) / (2 * A ** 2)). - astype(np.float32)) - cdef float distance - w = 1. / (n_ch * np.sum(w) * h ** 2) * w + -(xg_row * xg_row + xg_col * xg_col) / (2 * A * A)). + astype(np.float64)) + cdef double distance + w = 1. / (n_channels * np.sum(w) * h * h) * w # Coordinates of central pixel # Iterate over rows, taking padding into account @@ -199,8 +223,8 @@ # Iterate over columns, taking padding into account for col in range(offset, n_col + offset): # Initialize per-channel bins - for color in range(n_ch): - new_values[color] = 0 + for channel in range(n_channels): + new_values[channel] = 0 # Reset weights for each local region weight_sum = 0 col_start = col - offset @@ -218,40 +242,39 @@ col_start_j = col_start + j col_end_j = col_end + j # Shortcut for grayscale, else assume RGB - if n_ch == 1: + if n_channels == 1: weight = patch_distance_2d( padded[row_start:row_end, col_start:col_end, 0], padded[row_start_i:row_end_i, col_start_j:col_end_j, 0], - w, s) + w, s, var) else: - weight = patch_distance_2drgb( + weight = patch_distance_2dmultichannel( padded[row_start:row_end, col_start:col_end, :], padded[row_start_i:row_end_i, col_start_j:col_end_j, :], - w, s) + w, s, var, n_channels) # Collect results in weight sum weight_sum += weight # Apply to each channel multiplicatively - for color in range(n_ch): - new_values[color] += weight * padded[row + i, - col + j, color] + for channel in range(n_channels): + new_values[channel] += weight * padded[row + i, + col + j, + channel] # Normalize the result - for color in range(n_ch): - result[row, col, color] = new_values[color] / weight_sum + for channel in range(n_channels): + result[row, col, channel] = new_values[channel] / weight_sum # Return cropped result, undoing padding return result[offset:-offset, offset:-offset] -@cython.cdivision(True) -@cython.boundscheck(False) -def _nl_means_denoising_3d(image, int s=7, - int d=13, float h=0.1): +def _nl_means_denoising_3d(image, int s=7, int d=13, double h=0.1, + double var=0.0): """ Perform non-local means denoising on 3-D array @@ -263,8 +286,11 @@ Size of patches used for denoising. d : int, optional Maximal distance in pixels where to search patches used for denoising. - h : float, optional + h : double, optional Cut-off distance (in gray levels). + var : double + Expected noise variance. If non-zero, this is used to reduce the + apparent patch distances by the expected distance due to the noise. Returns ------- @@ -278,24 +304,25 @@ cdef int offset = s / 2 # padd the image so that boundaries are denoised as well cdef IMGDTYPE [:, :, ::1] padded = np.ascontiguousarray(np.pad( - image.astype(np.float32), + image.astype(np.float64), offset, mode='reflect')) cdef IMGDTYPE [:, :, ::1] result = padded.copy() - cdef float A = ((s - 1.) / 4.) - cdef float new_value - cdef float weight_sum, weight + cdef double A = ((s - 1.) / 4.) + cdef double new_value + cdef double weight_sum, weight xg_pln, xg_row, xg_col = np.mgrid[-offset: offset + 1, -offset: offset + 1, -offset: offset + 1] cdef IMGDTYPE [:, :, ::1] w = np.ascontiguousarray(np.exp( - -(xg_pln ** 2 + xg_row ** 2 + xg_col ** 2) / - (2 * A ** 2)).astype(np.float32)) - cdef float distance + -(xg_pln * xg_pln + xg_row * xg_row + + xg_col * xg_col) / + (2 * A * A)).astype(np.float64)) + cdef double distance cdef int pln, row, col, i, j, k cdef int pln_start, pln_end, row_start, row_end, col_start, col_end cdef int pln_start_i, pln_end_i, row_start_j, row_end_j, \ col_start_k, col_end_k - w = 1. / (np.sum(w) * h ** 2) * w + w = 1. / (np.sum(w) * h * h) * w # Coordinates of central pixel # Iterate over planes, taking padding into account @@ -336,7 +363,7 @@ padded[pln_start_i:pln_end_i, row_start_j:row_end_j, col_start_k:col_end_k], - w, s) + w, s, var) # Collect results in weight sum weight_sum += weight new_value += weight * padded[pln + i, @@ -351,10 +378,8 @@ #-------------- Accelerated algorithm of Froment 2015 ------------------ -@cython.cdivision(True) -@cython.boundscheck(False) -cdef inline float _integral_to_distance_2d(IMGDTYPE [:, ::] integral, - int row, int col, int offset, float h2s2): +cdef inline double _integral_to_distance_2d(IMGDTYPE [:, ::] integral, int row, + int col, int offset, double h2s2): """ References ---------- @@ -368,20 +393,19 @@ Used in _fast_nl_means_denoising_2d """ - cdef float distance + cdef double distance distance = integral[row + offset, col + offset] + \ integral[row - offset, col - offset] - \ integral[row - offset, col + offset] - \ integral[row + offset, col - offset] - distance /= h2s2 + distance = max(distance, 0.0) / h2s2 return distance -@cython.cdivision(True) -@cython.boundscheck(False) -cdef inline float _integral_to_distance_3d(IMGDTYPE [:, :, ::] integral, - int pln, int row, int col, int offset, - float s_cube_h_square): +cdef inline double _integral_to_distance_3d(IMGDTYPE [:, :, ::] integral, + int pln, int row, int col, + int offset, + double s_cube_h_square): """ References ---------- @@ -395,7 +419,7 @@ Used in _fast_nl_means_denoising_3d """ - cdef float distance + cdef double distance distance = (integral[pln + offset, row + offset, col + offset] - integral[pln - offset, row - offset, col - offset] + integral[pln - offset, row - offset, col + offset] + @@ -404,22 +428,21 @@ integral[pln - offset, row + offset, col + offset] - integral[pln + offset, row - offset, col + offset] - integral[pln + offset, row + offset, col - offset]) - distance /= s_cube_h_square + distance = max(distance, 0.0) / (s_cube_h_square) return distance -@cython.cdivision(True) -@cython.boundscheck(False) cdef inline _integral_image_2d(IMGDTYPE [:, :, ::] padded, IMGDTYPE [:, ::] integral, int t_row, - int t_col, int n_row, int n_col, int n_ch): + int t_col, int n_row, int n_col, int n_channels, + double var): """ Computes the integral of the squared difference between an image ``padded`` and the same image shifted by ``(t_row, t_col)``. Parameters ---------- - padded : ndarray of shape (n_row, n_col, n_ch) + padded : ndarray of shape (n_row, n_col, n_channels) Image of interest. integral : ndarray Output of the function. The array is filled with integral values. @@ -430,7 +453,10 @@ Shift along the column axis. n_row : int n_col : int - n_ch : int + n_channels : int + var : double + Expected noise variance. If non-zero, this is used to reduce the + apparent patch distances by the expected distance due to the noise. Notes ----- @@ -439,31 +465,33 @@ ``transform.integral_image``, but this helper function saves memory by avoiding copies of ``padded``. """ - cdef int row, col - cdef float distance + cdef int row, col, channel + cdef double distance, t + var *= 2.0 + for row in range(max(1, -t_row), min(n_row, n_row - t_row)): for col in range(max(1, -t_col), min(n_col, n_col - t_col)): - if n_ch == 1: - distance = (padded[row, col, 0] - - padded[row + t_row, col + t_col, 0])**2 + if n_channels == 1: + t = padded[row, col, 0] - padded[row + t_row, col + t_col, 0] + distance = t * t - var else: - distance = ((padded[row, col, 0] - - padded[row + t_row, col + t_col, 0])**2 + - (padded[row, col, 1] - - padded[row + t_row, col + t_col, 1])**2 + - (padded[row, col, 2] - - padded[row + t_row, col + t_col, 2])**2) + distance = 0 + for channel in range(n_channels): + t = (padded[row, col, channel] - + padded[row + t_row, col + t_col, channel]) + distance += t * t + distance -= n_channels * var integral[row, col] = distance + \ integral[row - 1, col] + \ integral[row, col - 1] - \ integral[row - 1, col - 1] -@cython.cdivision(True) -@cython.boundscheck(False) + cdef inline _integral_image_3d(IMGDTYPE [:, :, ::] padded, IMGDTYPE [:, :, ::] integral, int t_pln, int t_row, int t_col, int n_pln, int n_row, - int n_col): + int n_col, + double var): """ Computes the integral of the squared difference between an image ``padded`` and the same image shifted by ``(t_pln, t_row, t_col)``. @@ -484,6 +512,9 @@ n_pln : int n_row : int n_col : int + var : double + Expected noise variance. If non-zero, this is used to reduce the + apparent patch distances by the expected distance due to the noise. Notes ----- @@ -493,25 +524,28 @@ by avoiding copies of ``padded``. """ cdef int pln, row, col - cdef float distance + cdef double distance + var *= 2.0 for pln in range(max(1, -t_pln), min(n_pln, n_pln - t_pln)): for row in range(max(1, -t_row), min(n_row, n_row - t_row)): for col in range(max(1, -t_col), min(n_col, n_col - t_col)): - integral[pln, row, col] = \ - ((padded[pln, row, col] - - padded[pln + t_pln, row + t_row, col + t_col])**2 + - integral[pln - 1, row, col] + - integral[pln, row - 1, col] + - integral[pln, row, col - 1] + - integral[pln - 1, row - 1, col - 1] - - integral[pln - 1, row - 1, col] - - integral[pln, row - 1, col - 1] - - integral[pln - 1, row, col - 1]) + distance = (padded[pln, row, col] - + padded[pln + t_pln, row + t_row, col + t_col]) + distance *= distance + distance -= var + integral[pln, row, col] = ( + distance + + integral[pln - 1, row, col] + + integral[pln, row - 1, col] + + integral[pln, row, col - 1] + + integral[pln - 1, row - 1, col - 1] - + integral[pln - 1, row - 1, col] - + integral[pln, row - 1, col - 1] - + integral[pln - 1, row, col - 1]) -@cython.cdivision(True) -@cython.boundscheck(False) -def _fast_nl_means_denoising_2d(image, int s=7, int d=13, float h=0.1): +def _fast_nl_means_denoising_2d(image, int s=7, int d=13, double h=0.1, + double var=0.): """ Perform fast non-local means denoising on 2-D array, with the outer loop on patch shifts in order to reduce the number of operations. @@ -524,9 +558,12 @@ Size of patches used for denoising. d : int, optional Maximal distance in pixels where to search patches used for denoising. - h : float, optional + h : double, optional Cut-off distance (in gray levels). The higher h, the more permissive one is in accepting patches. + var : double + Expected noise variance. If non-zero, this is used to reduce the + apparent patch distances by the expected distance due to the noise. Returns ------- @@ -551,17 +588,17 @@ cdef int pad_size = offset + d + 1 cdef IMGDTYPE [:, :, ::1] padded = np.ascontiguousarray(np.pad(image, ((pad_size, pad_size), (pad_size, pad_size), (0, 0)), - mode='reflect').astype(np.float32)) + mode='reflect').astype(np.float64)) cdef IMGDTYPE [:, :, ::1] result = np.zeros_like(padded) cdef IMGDTYPE [:, ::1] weights = np.zeros_like(padded[..., 0], order='C') cdef IMGDTYPE [:, ::1] integral = np.zeros_like(padded[..., 0], order='C') - cdef int n_row, n_col, n_ch, t_row, t_col, row, col - cdef float weight, distance - cdef float alpha - cdef float h2 = h ** 2. - cdef float s2 = s ** 2. - n_row, n_col, n_ch = image.shape - cdef float h2s2 = n_ch * h2 * s2 + cdef int n_row, n_col, t_row, t_col, row, col, n_channels, channel + cdef double weight, distance + cdef double alpha + cdef double h2 = h * h + cdef double s2 = s * s + n_row, n_col, n_channels = image.shape + cdef double h2s2 = n_channels * h2 * s2 n_row += 2 * pad_size n_col += 2 * pad_size @@ -581,7 +618,7 @@ # padded and the same image shifted by (t_row, t_col) integral = np.zeros_like(padded[..., 0], order='C') _integral_image_2d(padded, integral, t_row, t_col, - n_row, n_col, n_ch) + n_row, n_col, n_channels, var) # Inner loops on pixel coordinates # Iterate over rows, taking offset and shift into account @@ -596,21 +633,21 @@ # exp of large negative numbers is close to zero if distance > DISTANCE_CUTOFF: continue - weight = alpha * exp(-distance) + weight = alpha * fast_exp(-distance) # Accumulate weights corresponding to different shifts weights[row, col] += weight weights[row + t_row, col + t_col] += weight # Iterate over channels - for ch in range(n_ch): - result[row, col, ch] += weight * \ - padded[row + t_row, col + t_col, ch] - result[row + t_row, col + t_col, ch] += \ - weight * padded[row, col, ch] + for channel in range(n_channels): + result[row, col, channel] += weight * \ + padded[row + t_row, col + t_col, channel] + result[row + t_row, col + t_col, channel] += \ + weight * padded[row, col, channel] # Normalize pixel values using sum of weights of contributing patches for row in range(offset, n_row - offset): for col in range(offset, n_col - offset): - for channel in range(n_ch): + for channel in range(n_channels): # No risk of division by zero, since the contribution # of a null shift is strictly positive result[row, col, channel] /= weights[row, col] @@ -619,9 +656,8 @@ return result[pad_size:-pad_size, pad_size:-pad_size] -@cython.cdivision(True) -@cython.boundscheck(False) -def _fast_nl_means_denoising_3d(image, int s=5, int d=7, float h=0.1): +def _fast_nl_means_denoising_3d(image, int s=5, int d=7, double h=0.1, + double var=0.): """ Perform fast non-local means denoising on 3-D array, with the outer loop on patch shifts in order to reduce the number of operations. @@ -634,9 +670,12 @@ Size of patches used for denoising. d : int, optional Maximal distance in pixels where to search patches used for denoising. - h : float, optional + h : double, optional cut-off distance (in gray levels). The higher h, the more permissive one is in accepting patches. + var : double + Expected noise variance. If non-zero, this is used to reduce the + apparent patch distances by the expected distance due to the noise. Returns ------- @@ -660,7 +699,7 @@ # + 1 for the boundary effects in finite differences cdef int pad_size = offset + d + 1 cdef IMGDTYPE [:, :, ::1] padded = np.ascontiguousarray(np.pad(image, - pad_size, mode='reflect').astype(np.float32)) + pad_size, mode='reflect').astype(np.float64)) cdef IMGDTYPE [:, :, ::1] result = np.zeros_like(padded) cdef IMGDTYPE [:, :, ::1] weights = np.zeros_like(padded) cdef IMGDTYPE [:, :, ::1] integral = np.zeros_like(padded) @@ -668,11 +707,11 @@ pln, row, col cdef int pln_dist_min, pln_dist_max, row_dist_min, row_dist_max, \ col_dist_min, col_dist_max - cdef float weight, distance - cdef float alpha - cdef float h_square = h ** 2. - cdef float s_cube = s ** 3. - cdef float s_cube_h_square = h_square * s_cube + cdef double weight, distance + cdef double alpha + cdef double h_square = h * h + cdef double s_cube = s * s * s + cdef double s_cube_h_square = h_square * s_cube n_pln, n_row, n_col = image.shape n_pln += 2 * pad_size n_row += 2 * pad_size @@ -697,12 +736,13 @@ if t_col == 0 and (t_pln is not 0 or t_row is not 0): alpha = 0.5 else: - alpha = 1. + alpha = 1.0 + # Compute integral image of the squared difference between # padded and the same image shifted by (t_pln, t_row, t_col) integral = np.zeros_like(padded) _integral_image_3d(padded, integral, t_pln, t_row, t_col, - n_pln, n_row, n_col) + n_pln, n_row, n_col, var) # Inner loops on pixel coordinates # Iterate over planes, taking offset and shift into account @@ -717,7 +757,8 @@ # exp of large negative numbers is close to zero if distance > DISTANCE_CUTOFF: continue - weight = alpha * exp(-distance) + + weight = alpha * fast_exp(-distance) # Accumulate weights for the different shifts weights[pln, row, col] += weight weights[pln + t_pln, row + t_row, diff -Nru skimage-0.13.1/skimage/restoration/non_local_means.py skimage-0.14.0/skimage/restoration/non_local_means.py --- skimage-0.13.1/skimage/restoration/non_local_means.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/restoration/non_local_means.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,5 +1,5 @@ import numpy as np -from .._shared.utils import skimage_deprecation, warn +from .._shared.utils import warn from ._nl_means_denoising import ( _nl_means_denoising_2d, _nl_means_denoising_3d, @@ -8,7 +8,7 @@ def denoise_nl_means(image, patch_size=7, patch_distance=11, h=0.1, - multichannel=None, fast_mode=True): + multichannel=None, fast_mode=True, sigma=0.): """ Perform non-local means denoising on 2-D or 3-D grayscale images, and 2-D RGB images. @@ -35,6 +35,10 @@ If True (default value), a fast version of the non-local means algorithm is used. If False, the original version of non-local means is used. See the Notes section for more details about the algorithms. + sigma : float, optional + The standard deviation of the (Gaussian) noise. If provided, a more + robust computation of patch weights is computed that takes the expected + noise variance into account (see Notes below). Returns ------- @@ -52,18 +56,18 @@ to the patch centered on the pixel of interest. In the original version of the algorithm [1]_, corresponding to - ``fast=False``, the computational complexity is + ``fast=False``, the computational complexity is:: - image.size * patch_size ** image.ndim * patch_distance ** image.ndim + image.size * patch_size ** image.ndim * patch_distance ** image.ndim Hence, changing the size of patches or their maximal distance has a strong effect on computing times, especially for 3-D images. However, the default behavior corresponds to ``fast_mode=True``, for which another version of non-local means [2]_ is used, corresponding to a - complexity of + complexity of:: - image.size * patch_distance ** image.ndim + image.size * patch_distance ** image.ndim The computing time depends only weakly on the patch size, thanks to the computation of the integral of patches distances for a given @@ -84,18 +88,38 @@ The image is padded using the `reflect` mode of `skimage.util.pad` before denoising. + If the noise standard deviation, `sigma`, is provided a more robust + computation of patch weights is used. Subtracting the known noise variance + from the computed patch distances improves the estimates of patch + similarity, giving a moderate improvement to denoising performance [4]_. + It was also mentioned as an option for the fast variant of the algorithm in + [3]_. + + When `sigma` is provided, a smaller `h` should typically be used to + avoid oversmoothing. The optimal value for `h` depends on the image + content and noise level, but a reasonable starting point is + ``h = 0.8 * sigma`` when `fast_mode` is `True`, or ``h = 0.6 * sigma`` when + `fast_mode` is `False`. + References ---------- - .. [1] Buades, A., Coll, B., & Morel, J. M. (2005, June). A non-local - algorithm for image denoising. In CVPR 2005, Vol. 2, pp. 60-65, IEEE. + .. [1] A. Buades, B. Coll, & J-M. Morel. A non-local algorithm for image + denoising. In CVPR 2005, Vol. 2, pp. 60-65, IEEE. + DOI: 10.1109/CVPR.2005.38 .. [2] J. Darbon, A. Cunha, T.F. Chan, S. Osher, and G.J. Jensen, Fast nonlocal filtering applied to electron cryomicroscopy, in 5th IEEE International Symposium on Biomedical Imaging: From Nano to Macro, 2008, pp. 1331-1334. + DOI: 10.1109/ISBI.2008.4541250 .. [3] Jacques Froment. Parameter-Free Fast Pixelwise Non-Local Means - Denoising. Image Processing On Line, 2014, vol. 4, p. 300-326. + Denoising. Image Processing On Line, 2014, vol. 4, pp. 300-326. + DOI: 10.5201/ipol.2014.120 + + .. [4] A. Buades, B. Coll, & J-M. Morel. Non-Local Means Denoising. + Image Processing On Line, 2011, vol. 1, pp. 208-212. + DOI: 10.5201/ipol.2011.bcm_nlm Examples -------- @@ -114,17 +138,16 @@ if image.ndim != 3: raise NotImplementedError("Non-local means denoising is only \ implemented for 2D grayscale and RGB images or 3-D grayscale images.") + nlm_kwargs = dict(s=patch_size, d=patch_distance, h=h, var=sigma * sigma) if multichannel: # 2-D images if fast_mode: - return np.squeeze(np.array(_fast_nl_means_denoising_2d(image, - patch_size, patch_distance, h))) + return np.squeeze( + np.asarray(_fast_nl_means_denoising_2d(image, **nlm_kwargs))) else: - return np.squeeze(np.array(_nl_means_denoising_2d(image, - patch_size, patch_distance, h))) + return np.squeeze( + np.asarray(_nl_means_denoising_2d(image, **nlm_kwargs))) else: # 3-D grayscale if fast_mode: - return np.array(_fast_nl_means_denoising_3d(image, s=patch_size, - d=patch_distance, h=h)) + return np.asarray(_fast_nl_means_denoising_3d(image, **nlm_kwargs)) else: - return np.array(_nl_means_denoising_3d(image, patch_size, - patch_distance, h)) + return np.asarray(_nl_means_denoising_3d(image, **nlm_kwargs)) diff -Nru skimage-0.13.1/skimage/restoration/setup.py skimage-0.14.0/skimage/restoration/setup.py --- skimage-0.13.1/skimage/restoration/setup.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/restoration/setup.py 2018-05-29 01:27:44.000000000 +0000 @@ -31,7 +31,8 @@ include_dirs=[get_numpy_include_dirs(), '../_shared']) config.add_extension('_nl_means_denoising', sources=['_nl_means_denoising.c'], - include_dirs=[get_numpy_include_dirs()]) + include_dirs=[get_numpy_include_dirs(), + '../_shared']) return config diff -Nru skimage-0.13.1/skimage/restoration/tests/test_denoise.py skimage-0.14.0/skimage/restoration/tests/test_denoise.py --- skimage-0.13.1/skimage/restoration/tests/test_denoise.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/restoration/tests/test_denoise.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,14 +1,16 @@ import numpy as np -from numpy.testing import (run_module_suite, assert_raises, assert_equal, - assert_almost_equal, assert_warns, assert_) from skimage import restoration, data, color, img_as_float, measure -from skimage._shared._warnings import expected_warnings from skimage.measure import compare_psnr from skimage.restoration._denoise import _wavelet_threshold - import pywt +from skimage._shared import testing +from skimage._shared.testing import (assert_equal, assert_almost_equal, + assert_warns, assert_) +from skimage._shared._warnings import expected_warnings, warnings + + np.random.seed(1234) @@ -200,8 +202,8 @@ def test_denoise_bilateral_3d_grayscale(): img = np.ones((50, 50, 3)) - assert_raises(ValueError, restoration.denoise_bilateral, img, - multichannel=False) + with testing.raises(ValueError): + restoration.denoise_bilateral(img, multichannel=False) def test_denoise_bilateral_3d_multichannel(): @@ -214,113 +216,103 @@ def test_denoise_bilateral_multidimensional(): img = np.ones((10, 10, 10, 10)) - assert_raises(ValueError, restoration.denoise_bilateral, img, - multichannel=False) - assert_raises(ValueError, restoration.denoise_bilateral, img, - multichannel=True) + with testing.raises(ValueError): + restoration.denoise_bilateral(img, multichannel=False) + with testing.raises(ValueError): + restoration.denoise_bilateral(img, multichannel=True) def test_denoise_bilateral_nan(): + import sys img = np.full((50, 50), np.NaN) - out = restoration.denoise_bilateral(img, multichannel=False) - assert_equal(img, out) - - -def test_denoise_sigma_range(): - img = checkerboard_gray.copy()[:50, :50] - # add some random noise - img += 0.5 * img.std() * np.random.rand(*img.shape) - img = np.clip(img, 0, 1) - out1 = restoration.denoise_bilateral(img, sigma_color=0.1, - sigma_spatial=10, multichannel=False) - with expected_warnings( - '`sigma_range` has been deprecated in favor of `sigma_color`. ' - 'The `sigma_range` keyword argument will be removed in v0.14'): - out2 = restoration.denoise_bilateral(img, sigma_range=0.1, - sigma_spatial=10, - multichannel=False) - assert_equal(out1, out2) - -def test_denoise_sigma_range_and_sigma_color(): - img = checkerboard_gray.copy()[:50, :50] - # add some random noise - img += 0.5 * img.std() * np.random.rand(*img.shape) - img = np.clip(img, 0, 1) - out1 = restoration.denoise_bilateral(img, sigma_color=0.1, - sigma_spatial=10, multichannel=False) - with expected_warnings( - '`sigma_range` has been deprecated in favor of `sigma_color`. ' - 'The `sigma_range` keyword argument will be removed in v0.14'): - out2 = restoration.denoise_bilateral(img, sigma_color=0.2, - sigma_range=0.1, sigma_spatial=10, - multichannel=False) - assert_equal(out1, out2) + # TODO: This warning is not optional in python3. This should be + # made a strict warning when we get to 0.15 + with expected_warnings(['invalid|\A\Z']): + out = restoration.denoise_bilateral(img, multichannel=False) + assert_equal(img, out) -def test_nl_means_denoising_2d(): +def test_denoise_nl_means_2d(): img = np.zeros((40, 40)) img[10:-10, 10:-10] = 1. - img += 0.3*np.random.randn(*img.shape) - denoised = restoration.denoise_nl_means(img, 7, 5, 0.2, fast_mode=True, - multichannel=True) - # make sure noise is reduced - assert_(img.std() > denoised.std()) - denoised = restoration.denoise_nl_means(img, 7, 5, 0.2, fast_mode=False, - multichannel=True) - # make sure noise is reduced - assert_(img.std() > denoised.std()) + sigma = 0.3 + img += sigma * np.random.randn(*img.shape) + for s in [sigma, 0]: + denoised = restoration.denoise_nl_means(img, 7, 5, 0.2, fast_mode=True, + multichannel=True, sigma=s) + # make sure noise is reduced + assert_(img.std() > denoised.std()) + denoised = restoration.denoise_nl_means(img, 7, 5, 0.2, + fast_mode=False, + multichannel=True, sigma=s) + # make sure noise is reduced + assert_(img.std() > denoised.std()) -def test_denoise_nl_means_2drgb(): - # reduce image size because nl means is very slow +def test_denoise_nl_means_2d_multichannel(): + # reduce image size because nl means is slow img = np.copy(astro[:50, :50]) + img = np.concatenate((img, ) * 2, ) # 6 channels + # add some random noise - img += 0.5 * img.std() * np.random.random(img.shape) - img = np.clip(img, 0, 1) - denoised = restoration.denoise_nl_means(img, 7, 9, 0.3, fast_mode=True, - multichannel=True) - # make sure noise is reduced - assert_(img.std() > denoised.std()) - denoised = restoration.denoise_nl_means(img, 7, 9, 0.3, fast_mode=False, - multichannel=True) - # make sure noise is reduced - assert_(img.std() > denoised.std()) + sigma = 0.1 + imgn = img + sigma * np.random.standard_normal(img.shape) + imgn = np.clip(imgn, 0, 1) + for fast_mode in [True, False]: + for s in [sigma, 0]: + for n_channels in [2, 3, 6]: + psnr_noisy = compare_psnr(img[..., :n_channels], + imgn[..., :n_channels]) + denoised = restoration.denoise_nl_means(imgn[..., :n_channels], + 3, 5, h=0.75 * sigma, + fast_mode=fast_mode, + multichannel=True, + sigma=s) + psnr_denoised = compare_psnr(denoised[..., :n_channels], + img[..., :n_channels]) + # make sure noise is reduced + assert_(psnr_denoised > psnr_noisy) def test_denoise_nl_means_3d(): - img = np.zeros((20, 20, 10)) - img[5:-5, 5:-5, 3:-3] = 1. - img += 0.3*np.random.randn(*img.shape) - denoised = restoration.denoise_nl_means(img, 5, 4, 0.2, fast_mode=True, - multichannel=False) - # make sure noise is reduced - assert_(img.std() > denoised.std()) - denoised = restoration.denoise_nl_means(img, 5, 4, 0.2, fast_mode=False, - multichannel=False) - # make sure noise is reduced - assert_(img.std() > denoised.std()) + img = np.zeros((12, 12, 8)) + img[5:-5, 5:-5, 2:-2] = 1. + sigma = 0.3 + imgn = img + sigma * np.random.randn(*img.shape) + psnr_noisy = compare_psnr(img, imgn) + for s in [sigma, 0]: + denoised = restoration.denoise_nl_means(imgn, 3, 4, h=0.75 * sigma, + fast_mode=True, + multichannel=False, sigma=s) + # make sure noise is reduced + assert_(compare_psnr(img, denoised) > psnr_noisy) + denoised = restoration.denoise_nl_means(imgn, 3, 4, h=0.75 * sigma, + fast_mode=False, + multichannel=False, sigma=s) + # make sure noise is reduced + assert_(compare_psnr(img, denoised) > psnr_noisy) def test_denoise_nl_means_multichannel(): - img = np.zeros((21, 20, 10)) - img[10, 9:11, 2:-2] = 1. - img += 0.3*np.random.randn(*img.shape) + # for true 3D data, 3D denoising is better than denoising as 2D+channels + img = np.zeros((13, 10, 8)) + img[6, 4:6, 2:-2] = 1. + sigma = 0.3 + imgn = img + sigma * np.random.randn(*img.shape) denoised_wrong_multichannel = restoration.denoise_nl_means( - img, 5, 4, 0.1, fast_mode=True, multichannel=True) + imgn, 3, 4, 0.6 * sigma, fast_mode=True, multichannel=True) denoised_ok_multichannel = restoration.denoise_nl_means( - img, 5, 4, 0.1, fast_mode=True, multichannel=False) - snr_wrong = 10 * np.log10(1. / - ((denoised_wrong_multichannel - img)**2).mean()) - snr_ok = 10 * np.log10(1. / - ((denoised_ok_multichannel - img)**2).mean()) - assert_(snr_ok > snr_wrong) + imgn, 3, 4, 0.6 * sigma, fast_mode=True, multichannel=False) + psnr_wrong = compare_psnr(img, denoised_wrong_multichannel) + psnr_ok = compare_psnr(img, denoised_ok_multichannel) + assert_(psnr_ok > psnr_wrong) def test_denoise_nl_means_wrong_dimension(): img = np.zeros((5, 5, 5, 5)) - assert_raises(NotImplementedError, restoration.denoise_nl_means, img, - multichannel=True) + with testing.raises(NotImplementedError): + restoration.denoise_nl_means(img, multichannel=True) def test_no_denoising_for_small_h(): @@ -392,29 +384,48 @@ noisy = img + sigma * rstate.randn(*(img.shape)) noisy = np.clip(noisy, 0, 1) - # employ a single, uniform threshold instead of BayesShrink sigmas - denoised = _wavelet_threshold(noisy, wavelet='db1', threshold=sigma) + # employ a single, user-specified threshold instead of BayesShrink sigmas + denoised = _wavelet_threshold(noisy, wavelet='db1', method=None, + threshold=sigma) psnr_noisy = compare_psnr(img, noisy) psnr_denoised = compare_psnr(img, denoised) assert_(psnr_denoised > psnr_noisy) + # either method or threshold must be defined + with testing.raises(ValueError): + _wavelet_threshold(noisy, wavelet='db1', method=None, threshold=None) + + # warns if a threshold is provided in a case where it would be ignored + with expected_warnings(["Thresholding method "]): + _wavelet_threshold(noisy, wavelet='db1', method='BayesShrink', + threshold=sigma) + def test_wavelet_denoising_nd(): rstate = np.random.RandomState(1234) - for ndim in range(1, 5): - # Generate a very simple test image - img = 0.2*np.ones((16, )*ndim) - img[[slice(5, 13), ] * ndim] = 0.8 - - sigma = 0.1 - noisy = img + sigma * rstate.randn(*(img.shape)) - noisy = np.clip(noisy, 0, 1) - - # Verify that SNR is improved with internally estimated sigma - denoised = restoration.denoise_wavelet(noisy) - psnr_noisy = compare_psnr(img, noisy) - psnr_denoised = compare_psnr(img, denoised) - assert_(psnr_denoised > psnr_noisy) + for method in ['VisuShrink', 'BayesShrink']: + for ndim in range(1, 5): + # Generate a very simple test image + if ndim < 3: + img = 0.2*np.ones((128, )*ndim) + else: + img = 0.2*np.ones((16, )*ndim) + img[[slice(5, 13), ] * ndim] = 0.8 + + sigma = 0.1 + noisy = img + sigma * rstate.randn(*(img.shape)) + noisy = np.clip(noisy, 0, 1) + + # Verify that SNR is improved with internally estimated sigma + denoised = restoration.denoise_wavelet(noisy, method=method) + psnr_noisy = compare_psnr(img, noisy) + psnr_denoised = compare_psnr(img, denoised) + assert_(psnr_denoised > psnr_noisy) + + +def test_wavelet_invalid_method(): + with testing.raises(ValueError): + restoration.denoise_wavelet(np.ones(16), method='Unimplemented') def test_wavelet_denoising_levels(): @@ -443,10 +454,14 @@ # invalid number of wavelet levels results in a ValueError max_level = pywt.dwt_max_level(np.min(img.shape), pywt.Wavelet(wavelet).dec_len) - assert_raises(ValueError, restoration.denoise_wavelet, noisy, - wavelet=wavelet, wavelet_levels=max_level+1) - assert_raises(ValueError, restoration.denoise_wavelet, noisy, - wavelet=wavelet, wavelet_levels=-1) + with testing.raises(ValueError): + restoration.denoise_wavelet( + noisy, + wavelet=wavelet, wavelet_levels=max_level+1) + with testing.raises(ValueError): + restoration.denoise_wavelet( + noisy, + wavelet=wavelet, wavelet_levels=-1) def test_estimate_sigma_gray(): @@ -525,5 +540,93 @@ assert_warns(UserWarning, restoration.denoise_nl_means, img) +def test_cycle_spinning_multichannel(): + sigma = 0.1 + rstate = np.random.RandomState(1234) + + for multichannel in True, False: + if multichannel: + img = astro + # can either omit or be 0 along the channels axis + valid_shifts = [1, (0, 1), (1, 0), (1, 1), (1, 1, 0)] + # can either omit or be 1 on channels axis. + valid_steps = [1, 2, (1, 2), (1, 2, 1)] + # too few or too many shifts or non-zero shift on channels + invalid_shifts = [(1, 1, 2), (1, ), (1, 1, 0, 1)] + # too few or too many shifts or any shifts <= 0 + invalid_steps = [(1, ), (1, 1, 1, 1), (0, 1), (-1, -1)] + else: + img = astro_gray + valid_shifts = [1, (0, 1), (1, 0), (1, 1)] + valid_steps = [1, 2, (1, 2)] + invalid_shifts = [(1, 1, 2), (1, )] + invalid_steps = [(1, ), (1, 1, 1), (0, 1), (-1, -1)] + + noisy = img.copy() + 0.1 * rstate.randn(*(img.shape)) + + denoise_func = restoration.denoise_wavelet + func_kw = dict(sigma=sigma, multichannel=multichannel) + + # max_shifts=0 is equivalent to just calling denoise_func + dn_cc = restoration.cycle_spin(noisy, denoise_func, max_shifts=0, + func_kw=func_kw, + multichannel=multichannel) + dn = denoise_func(noisy, **func_kw) + assert_equal(dn, dn_cc) + + # denoising with cycle spinning will give better PSNR than without + for max_shifts in valid_shifts: + dn_cc = restoration.cycle_spin(noisy, denoise_func, + max_shifts=max_shifts, + func_kw=func_kw, + multichannel=multichannel) + assert_(compare_psnr(img, dn_cc) > compare_psnr(img, dn)) + + for shift_steps in valid_steps: + dn_cc = restoration.cycle_spin(noisy, denoise_func, + max_shifts=2, + shift_steps=shift_steps, + func_kw=func_kw, + multichannel=multichannel) + assert_(compare_psnr(img, dn_cc) > compare_psnr(img, dn)) + + for max_shifts in invalid_shifts: + with testing.raises(ValueError): + dn_cc = restoration.cycle_spin(noisy, denoise_func, + max_shifts=max_shifts, + func_kw=func_kw, + multichannel=multichannel) + for shift_steps in invalid_steps: + with testing.raises(ValueError): + dn_cc = restoration.cycle_spin(noisy, denoise_func, + max_shifts=2, + shift_steps=shift_steps, + func_kw=func_kw, + multichannel=multichannel) + + +def test_cycle_spinning_num_workers(): + img = astro_gray + sigma = 0.1 + rstate = np.random.RandomState(1234) + noisy = img.copy() + 0.1 * rstate.randn(*(img.shape)) + + denoise_func = restoration.denoise_wavelet + func_kw = dict(sigma=sigma, multichannel=True) + + # same result whether using 1 worker or multiple workers + dn_cc1 = restoration.cycle_spin(noisy, denoise_func, max_shifts=1, + func_kw=func_kw, multichannel=False, + num_workers=1) + dn_cc2 = restoration.cycle_spin(noisy, denoise_func, max_shifts=1, + func_kw=func_kw, multichannel=False, + num_workers=4) + dn_cc3 = restoration.cycle_spin(noisy, denoise_func, max_shifts=1, + func_kw=func_kw, multichannel=False, + num_workers=None) + assert_almost_equal(dn_cc1, dn_cc2) + assert_almost_equal(dn_cc1, dn_cc3) + + if __name__ == "__main__": - run_module_suite() + testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/restoration/tests/test_inpaint.py skimage-0.14.0/skimage/restoration/tests/test_inpaint.py --- skimage-0.13.1/skimage/restoration/tests/test_inpaint.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/restoration/tests/test_inpaint.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,10 +1,11 @@ from __future__ import print_function, division import numpy as np -from numpy.testing import (run_module_suite, assert_allclose, - assert_raises) from skimage.restoration import inpaint +from skimage._shared import testing +from skimage._shared.testing import assert_allclose + def test_inpaint_biharmonic_2d(): img = np.tile(np.square(np.linspace(0, 1, 5)), (5, 1)) @@ -52,15 +53,14 @@ def test_invalid_input(): img, mask = np.zeros([]), np.zeros([]) - assert_raises(ValueError, inpaint.inpaint_biharmonic, img, mask) + with testing.raises(ValueError): + inpaint.inpaint_biharmonic(img, mask) img, mask = np.zeros((2, 2)), np.zeros((4, 1)) - assert_raises(ValueError, inpaint.inpaint_biharmonic, img, mask) + with testing.raises(ValueError): + inpaint.inpaint_biharmonic(img, mask) img = np.ma.array(np.zeros((2, 2)), mask=[[0, 0], [0, 0]]) mask = np.zeros((2, 2)) - assert_raises(TypeError, inpaint.inpaint_biharmonic, img, mask) - - -if __name__ == '__main__': - run_module_suite() + with testing.raises(TypeError): + inpaint.inpaint_biharmonic(img, mask) diff -Nru skimage-0.13.1/skimage/restoration/tests/test_unwrap.py skimage-0.14.0/skimage/restoration/tests/test_unwrap.py --- skimage-0.13.1/skimage/restoration/tests/test_unwrap.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/restoration/tests/test_unwrap.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,12 +1,14 @@ from __future__ import print_function, division import numpy as np -from numpy.testing import (run_module_suite, assert_array_almost_equal_nulp, - assert_almost_equal, assert_array_equal, - assert_raises, assert_) -import warnings - from skimage.restoration import unwrap_phase +import sys + +import warnings +from skimage._shared import testing +from skimage._shared.testing import (assert_array_almost_equal_nulp, + assert_almost_equal, assert_array_equal, + assert_, skipif) from skimage._shared._warnings import expected_warnings @@ -46,27 +48,33 @@ image = np.linspace(0, 10 * np.pi, 100) check_unwrap(image) # Masked arrays are not allowed in 1D - assert_raises(ValueError, check_unwrap, image, True) + with testing.raises(ValueError): + check_unwrap(image, True) # wrap_around is not allowed in 1D - assert_raises(ValueError, unwrap_phase, image, True, seed=0) + with testing.raises(ValueError): + unwrap_phase(image, True, seed=0) -def test_unwrap_2d(): +@testing.parametrize("check_with_mask", (False, True)) +def test_unwrap_2d(check_with_mask): + mask = None x, y = np.ogrid[:8, :16] image = 2 * np.pi * (x * 0.2 + y * 0.1) - yield check_unwrap, image - mask = np.zeros(image.shape, dtype=np.bool) - mask[4:6, 4:8] = True - yield check_unwrap, image, mask + if check_with_mask: + mask = np.zeros(image.shape, dtype=np.bool) + mask[4:6, 4:8] = True + check_unwrap(image, mask) -def test_unwrap_3d(): +@testing.parametrize("check_with_mask", (False, True)) +def test_unwrap_3d(check_with_mask): + mask = None x, y, z = np.ogrid[:8, :12, :16] image = 2 * np.pi * (x * 0.2 + y * 0.1 + z * 0.05) - yield check_unwrap, image - mask = np.zeros(image.shape, dtype=np.bool) - mask[4:6, 4:6, 1:3] = True - yield check_unwrap, image, mask + if check_with_mask: + mask = np.zeros(image.shape, dtype=np.bool) + mask[4:6, 4:6, 1:3] = True + check_unwrap(image, mask) def check_wrap_around(ndim, axis): @@ -106,10 +114,14 @@ image_unwrap_wrap_around[index_last]) -def test_wrap_around(): - for ndim in (2, 3): - for axis in range(ndim): - yield check_wrap_around, ndim, axis +dim_axis = [(ndim, axis) for ndim in (2, 3) for axis in range(ndim)] + + +@skipif(sys.version_info[:2] == (3, 4), + reason="Doesn't work with python 3.4. See issue #3079") +@testing.parametrize("ndim, axis", dim_axis) +def test_wrap_around(ndim, axis): + check_wrap_around(ndim, axis) def test_mask(): @@ -140,14 +152,19 @@ image_unwrapped_3d = unwrap_phase(image_wrapped_3d) # remove phase shift image_unwrapped_3d -= image_unwrapped_3d[0, 0, 0] - assert_array_almost_equal_nulp(image_unwrapped_3d[:, :, -1], image[i, -1]) + assert_array_almost_equal_nulp(image_unwrapped_3d[:, :, -1], + image[i, -1]) def test_invalid_input(): - assert_raises(ValueError, unwrap_phase, np.zeros([])) - assert_raises(ValueError, unwrap_phase, np.zeros((1, 1, 1, 1))) - assert_raises(ValueError, unwrap_phase, np.zeros((1, 1)), 3 * [False]) - assert_raises(ValueError, unwrap_phase, np.zeros((1, 1)), 'False') + with testing.raises(ValueError): + unwrap_phase(np.zeros([])) + with testing.raises(ValueError): + unwrap_phase(np.zeros((1, 1, 1, 1))) + with testing.raises(ValueError): + unwrap_phase(np.zeros((1, 1)), 3 * [False]) + with testing.raises(ValueError): + unwrap_phase(np.zeros((1, 1)), 'False') def test_unwrap_3d_middle_wrap_around(): @@ -202,7 +219,3 @@ assert_(np.ma.isMaskedArray(unwrap)) assert_(np.sum(unwrap.mask) == 999) # all but one masked assert_(unwrap[0, 0, 0] == 0) - - -if __name__ == "__main__": - run_module_suite() diff -Nru skimage-0.13.1/skimage/segmentation/active_contour_model.py skimage-0.14.0/skimage/segmentation/active_contour_model.py --- skimage-0.13.1/skimage/segmentation/active_contour_model.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/active_contour_model.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,9 +1,8 @@ import numpy as np -from skimage import img_as_float -import scipy import scipy.linalg -from scipy.interpolate import RectBivariateSpline, interp2d -from skimage.filters import sobel +from scipy.interpolate import RectBivariateSpline +from ..util import img_as_float +from ..filters import sobel def active_contour(image, snake, alpha=0.01, beta=0.1, @@ -88,17 +87,6 @@ 25 """ - split_version = scipy.__version__.split('.') - if not(split_version[-1].isdigit()): - split_version.pop() - scipy_version = list(map(int, split_version)) - new_scipy = scipy_version[0] > 0 or \ - (scipy_version[0] == 0 and scipy_version[1] >= 14) - if not new_scipy: - raise NotImplementedError('You are using an old version of scipy. ' - 'Active contours is implemented for scipy versions ' - '0.14.0 and above.') - max_iterations = int(max_iterations) if max_iterations <= 0: raise ValueError("max_iterations should be >0.") @@ -134,14 +122,9 @@ img = w_line*img + w_edge*edge[0] # Interpolate for smoothness: - if new_scipy: - intp = RectBivariateSpline(np.arange(img.shape[1]), - np.arange(img.shape[0]), - img.T, kx=2, ky=2, s=0) - else: - intp = np.vectorize(interp2d(np.arange(img.shape[1]), - np.arange(img.shape[0]), img, kind='cubic', - copy=False, bounds_error=False, fill_value=0)) + intp = RectBivariateSpline(np.arange(img.shape[1]), + np.arange(img.shape[0]), + img.T, kx=2, ky=2, s=0) x, y = snake[:, 0].astype(np.float), snake[:, 1].astype(np.float) xsave = np.empty((convergence_order, len(x))) @@ -192,12 +175,8 @@ # Explicit time stepping for image energy minimization: for i in range(max_iterations): - if new_scipy: - fx = intp(x, y, dx=1, grid=False) - fy = intp(x, y, dy=1, grid=False) - else: - fx = intp(x, y, dx=1) - fy = intp(x, y, dy=1) + fx = intp(x, y, dx=1, grid=False) + fy = intp(x, y, dy=1, grid=False) if sfixed: fx[0] = 0 fy[0] = 0 @@ -233,7 +212,7 @@ ysave[j, :] = y else: dist = np.min(np.max(np.abs(xsave-x[None, :]) + - np.abs(ysave-y[None, :]), 1)) + np.abs(ysave-y[None, :]), 1)) if dist < convergence: break diff -Nru skimage-0.13.1/skimage/segmentation/_chan_vese.py skimage-0.14.0/skimage/segmentation/_chan_vese.py --- skimage-0.13.1/skimage/segmentation/_chan_vese.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/_chan_vese.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,338 @@ +import numpy as np +from scipy.ndimage import distance_transform_edt as distance + + +def _cv_curvature(phi): + """Returns the 'curvature' of a level set 'phi'. + """ + P = np.pad(phi, 1, mode='edge') + fy = (P[2:, 1:-1] - P[:-2, 1:-1]) / 2.0 + fx = (P[1:-1, 2:] - P[1:-1, :-2]) / 2.0 + fyy = P[2:, 1:-1] + P[:-2, 1:-1] - 2*phi + fxx = P[1:-1, 2:] + P[1:-1, :-2] - 2*phi + fxy = .25 * (P[2:, 2:] + P[:-2, :-2] - P[:-2, 2:] - P[2:, :-2]) + grad2 = fx**2 + fy**2 + K = ((fxx*fy**2 - 2*fxy*fx*fy + fyy*fx**2) / + (grad2*np.sqrt(grad2) + 1e-8)) + return K + + +def _cv_calculate_variation(image, phi, mu, lambda1, lambda2, dt): + """Returns the variation of level set 'phi' based on algorithm parameters. + """ + eta = 1e-16 + P = np.pad(phi, 1, mode='edge') + + phixp = P[1:-1, 2:] - P[1:-1, 1:-1] + phixn = P[1:-1, 1:-1] - P[1:-1, :-2] + phix0 = (P[1:-1, 2:] - P[1:-1, :-2]) / 2.0 + + phiyp = P[2:, 1:-1] - P[1:-1, 1:-1] + phiyn = P[1:-1, 1:-1] - P[:-2, 1:-1] + phiy0 = (P[2:, 1:-1] - P[:-2, 1:-1]) / 2.0 + + C1 = 1. / np.sqrt(eta + phixp**2 + phiy0**2) + C2 = 1. / np.sqrt(eta + phixn**2 + phiy0**2) + C3 = 1. / np.sqrt(eta + phix0**2 + phiyp**2) + C4 = 1. / np.sqrt(eta + phix0**2 + phiyn**2) + + K = (P[1:-1, 2:] * C1 + P[1:-1, :-2] * C2 + + P[2:, 1:-1] * C3 + P[:-2, 1:-1] * C4) + + Hphi = 1 * (phi > 0) + (c1, c2) = _cv_calculate_averages(image, Hphi) + + difference_from_average_term = (- lambda1 * (image-c1)**2 + + lambda2 * (image-c2)**2) + new_phi = (phi + (dt*_cv_delta(phi)) * + (mu*K + difference_from_average_term)) + return new_phi / (1 + mu * dt * _cv_delta(phi) * (C1+C2+C3+C4)) + + +def _cv_heavyside(x, eps=1.): + """Returns the result of a regularised heavyside function of the + input value(s). + """ + return 0.5 * (1. + (2./np.pi) * np.arctan(x/eps)) + + +def _cv_delta(x, eps=1.): + """Returns the result of a regularised dirac function of the + input value(s). + """ + return eps / (eps**2 + x**2) + + +def _cv_calculate_averages(image, Hphi): + """Returns the average values 'inside' and 'outside'. + """ + H = Hphi + Hinv = 1. - H + Hsum = np.sum(H) + Hinvsum = np.sum(Hinv) + avg_inside = np.sum(image * H) + avg_oustide = np.sum(image * Hinv) + if Hsum != 0: + avg_inside /= Hsum + if Hinvsum != 0: + avg_oustide /= Hinvsum + return (avg_inside, avg_oustide) + + +def _cv_difference_from_average_term(image, Hphi, lambda_pos, lambda_neg): + """Returns the 'energy' contribution due to the difference from + the average value within a region at each point. + """ + (c1, c2) = _cv_calculate_averages(image, Hphi) + Hinv = 1. - Hphi + return (lambda_pos * (image-c1)**2 * Hphi + + lambda_neg * (image-c2)**2 * Hinv) + + +def _cv_edge_length_term(phi, mu): + """Returns the 'energy' contribution due to the length of the + edge between regions at each point, multiplied by a factor 'mu'. + """ + toret = _cv_curvature(phi) + return mu * toret + + +def _cv_energy(image, phi, mu, lambda1, lambda2): + """Returns the total 'energy' of the current level set function. + """ + H = _cv_heavyside(phi) + avgenergy = _cv_difference_from_average_term(image, H, lambda1, lambda2) + lenenergy = _cv_edge_length_term(phi, mu) + return np.sum(avgenergy) + np.sum(lenenergy) + + +def _cv_reset_level_set(phi): + """This is a placeholder function as resetting the level set is not + strictly necessary, and has not been done for this implementation. + """ + return phi + + +def _cv_checkerboard(image_size, square_size): + """Generates a checkerboard level set function. + + According to Pascal Getreuer, such a level set function has fast convergence. + """ + yv = np.arange(image_size[0]).reshape(image_size[0], 1) + xv = np.arange(image_size[1]) + return (np.sin(np.pi/square_size*yv) * + np.sin(np.pi/square_size*xv)) + + +def _cv_large_disk(image_size): + """Generates a disk level set function. + + The disk covers the whole image along its smallest dimension. + """ + res = np.ones(image_size) + centerY = int((image_size[0]-1) / 2) + centerX = int((image_size[1]-1) / 2) + res[centerY, centerX] = 0. + radius = float(min(centerX, centerY)) + return (radius-distance(res)) / radius + + +def _cv_small_disk(image_size): + """Generates a disk level set function. + + The disk covers half of the image along its smallest dimension. + """ + res = np.ones(image_size) + centerY = int((image_size[0]-1) / 2) + centerX = int((image_size[1]-1) / 2) + res[centerY, centerX] = 0. + radius = float(min(centerX, centerY)) / 2.0 + return (radius-distance(res)) / (radius*3) + + +def _cv_init_level_set(init_level_set, image_shape): + """Generates an initial level set function conditional on input arguments. + """ + if type(init_level_set) == str: + if init_level_set == 'checkerboard': + res = _cv_checkerboard(image_shape, 5) + elif init_level_set == 'disk': + res = _cv_large_disk(image_shape) + elif init_level_set == 'small disk': + res = _cv_small_disk(image_shape) + else: + raise ValueError("Incorrect name for starting level set preset.") + else: + res = init_level_set + return res + + +def chan_vese(image, mu=0.25, lambda1=1.0, lambda2=1.0, tol=1e-3, max_iter=500, + dt=0.5, init_level_set='checkerboard', + extended_output=False): + """Chan-Vese segmentation algorithm. + + Active contour model by evolving a level set. Can be used to + segment objects without clearly defined boundaries. + + Parameters + ---------- + image : (M, N) ndarray + Grayscale image to be segmented. + mu : float, optional + 'edge length' weight parameter. Higher `mu` values will + produce a 'round' edge, while values closer to zero will + detect smaller objects. + lambda1 : float, optional + 'difference from average' weight parameter for the output + region with value 'True'. If it is lower than `lambda2`, this + region will have a larger range of values than the other. + lambda2 : float, optional + 'difference from average' weight parameter for the output + region with value 'False'. If it is lower than `lambda1`, this + region will have a larger range of values than the other. + tol : float, positive, optional + Level set variation tolerance between iterations. If the + L2 norm difference between the level sets of successive + iterations normalized by the area of the image is below this + value, the algorithm will assume that the solution was + reached. + max_iter : uint, optional + Maximum number of iterations allowed before the algorithm + interrupts itself. + dt : float, optional + A multiplication factor applied at calculations for each step, + serves to accelerate the algorithm. While higher values may + speed up the algorithm, they may also lead to convergence + problems. + init_level_set : str or (M, N) ndarray, optional + Defines the starting level set used by the algorithm. + If a string is inputted, a level set that matches the image + size will automatically be generated. Alternatively, it is + possible to define a custom level set, which should be an + array of float values, with the same shape as 'image'. + Accepted string values are as follows. + + 'checkerboard' + the starting level set is defined as + sin(x/5*pi)*sin(y/5*pi), where x and y are pixel + coordinates. This level set has fast convergence, but may + fail to detect implicit edges. + 'disk' + the starting level set is defined as the opposite + of the distance from the center of the image minus half of + the minimum value between image width and image height. + This is somewhat slower, but is more likely to properly + detect implicit edges. + 'small disk' + the starting level set is defined as the + opposite of the distance from the center of the image + minus a quarter of the minimum value between image width + and image height. + extended_output : bool, optional + If set to True, the return value will be a tuple containing + the three return values (see below). If set to False which + is the default value, only the 'segmentation' array will be + returned. + + Returns + ------- + segmentation : (M, N) ndarray, bool + Segmentation produced by the algorithm. + phi : (M, N) ndarray of floats + Final level set computed by the algorithm. + energies : list of floats + Shows the evolution of the 'energy' for each step of the + algorithm. This should allow to check whether the algorithm + converged. + + Notes + ----- + The Chan-Vese Algorithm is designed to segment objects without + clearly defined boundaries. This algorithm is based on level sets + that are evolved iteratively to minimize an energy, which is + defined by weighted values corresponding to the sum of differences + intensity from the average value outside the segmented region, the + sum of differences from the average value inside the segmented + region, and a term which is dependent on the length of the + boundary of the segmented region. + + This algorithm was first proposed by Tony Chan and Luminita Vese, + in a publication entitled "An Active Countour Model Without Edges" + [1]_. + + This implementation of the algorithm is somewhat simplified in the + sense that the area factor 'nu' described in the original paper is + not implemented, and is only suitable for grayscale images. + + Typical values for `lambda1` and `lambda2` are 1. If the + 'background' is very different from the segmented object in terms + of distribution (for example, a uniform black image with figures + of varying intensity), then these values should be different from + each other. + + Typical values for mu are between 0 and 1, though higher values + can be used when dealing with shapes with very ill-defined + contours. + + The 'energy' which this algorithm tries to minimize is defined + as the sum of the differences from the average within the region + squared and weighed by the 'lambda' factors to which is added the + length of the contour multiplied by the 'mu' factor. + + Supports 2D grayscale images only, and does not implement the area + term described in the original article. + + References + ---------- + .. [1] An Active Contour Model without Edges, Tony Chan and + Luminita Vese, Scale-Space Theories in Computer Vision, + 1999, DOI:10.1007/3-540-48236-9_13 + .. [2] Chan-Vese Segmentation, Pascal Getreuer Image Processing On + Line, 2 (2012), pp. 214-224, + DOI:10.5201/ipol.2012.g-cv + .. [3] The Chan-Vese Algorithm - Project Report, Rami Cohen, + http://arxiv.org/abs/1107.2782, 2011 + """ + if len(image.shape) != 2: + raise ValueError("Input image should be a 2D array.") + + phi = _cv_init_level_set(init_level_set, image.shape) + + if type(phi) != np.ndarray or phi.shape != image.shape: + raise ValueError("The dimensions of initial level set do not " + "match the dimensions of image.") + + image = image - np.min(image) + if np.max(image) != 0: + image = image / np.max(image) + + i = 0 + old_energy = _cv_energy(image, phi, mu, lambda1, lambda2) + energies = [] + phivar = tol + 1 + segmentation = phi > 0 + + while(phivar > tol and i < max_iter): + # Save old level set values + oldphi = phi + + # Calculate new level set + phi = _cv_calculate_variation(image, phi, mu, lambda1, lambda2, dt) + phi = _cv_reset_level_set(phi) + phivar = np.sqrt(((phi-oldphi)**2).mean()) + + # Extract energy and compare to previous level set and + # segmentation to see if continuing is necessary + segmentation = phi > 0 + new_energy = _cv_energy(image, phi, mu, lambda1, lambda2) + + # Save old energy values + energies.append(old_energy) + old_energy = new_energy + i += 1 + + if extended_output: + return (segmentation, phi, energies) + else: + return segmentation diff -Nru skimage-0.13.1/skimage/segmentation/__init__.py skimage-0.14.0/skimage/segmentation/__init__.py --- skimage-0.13.1/skimage/segmentation/__init__.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/__init__.py 2018-05-29 01:27:44.000000000 +0000 @@ -7,6 +7,10 @@ from ._clear_border import clear_border from ._join import join_segmentations, relabel_from_one, relabel_sequential from ..morphology import watershed +from ._chan_vese import chan_vese +from .morphsnakes import (morphological_geodesic_active_contour, + morphological_chan_vese, inverse_gaussian_gradient, + circle_level_set, checkerboard_level_set) __all__ = ['random_walker', @@ -20,4 +24,11 @@ 'join_segmentations', 'relabel_from_one', 'relabel_sequential', - 'watershed'] + 'watershed', + 'chan_vese', + 'morphological_geodesic_active_contour', + 'morphological_chan_vese', + 'inverse_gaussian_gradient', + 'circle_level_set', + 'checkerboard_level_set' + ] diff -Nru skimage-0.13.1/skimage/segmentation/_join.py skimage-0.14.0/skimage/segmentation/_join.py --- skimage-0.13.1/skimage/segmentation/_join.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/_join.py 2018-05-29 01:27:44.000000000 +0000 @@ -115,7 +115,7 @@ array([5, 5, 6, 6, 7, 9, 8]) """ m = label_field.max() - if not np.issubdtype(label_field.dtype, np.int): + if not np.issubdtype(label_field.dtype, np.signedinteger): new_type = np.min_scalar_type(int(m)) label_field = label_field.astype(new_type) m = m.astype(new_type) # Ensures m is an integer diff -Nru skimage-0.13.1/skimage/segmentation/morphsnakes.py skimage-0.14.0/skimage/segmentation/morphsnakes.py --- skimage-0.13.1/skimage/segmentation/morphsnakes.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/morphsnakes.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,442 @@ +# -*- coding: utf-8 -*- + +from itertools import cycle + +import numpy as np +from scipy import ndimage as ndi + +from .._shared.utils import assert_nD + +__all__ = ['morphological_chan_vese', + 'morphological_geodesic_active_contour', + 'inverse_gaussian_gradient', + 'circle_level_set', + 'checkerboard_level_set' + ] + + +class _fcycle(object): + + def __init__(self, iterable): + """Call functions from the iterable each time it is called.""" + self.funcs = cycle(iterable) + + def __call__(self, *args, **kwargs): + f = next(self.funcs) + return f(*args, **kwargs) + + +# SI and IS operators for 2D and 3D. +_P2 = [np.eye(3), + np.array([[0, 1, 0]] * 3), + np.flipud(np.eye(3)), + np.rot90([[0, 1, 0]] * 3)] +_P3 = [np.zeros((3, 3, 3)) for i in range(9)] + +_P3[0][:, :, 1] = 1 +_P3[1][:, 1, :] = 1 +_P3[2][1, :, :] = 1 +_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1 +_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1 +_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1 +_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1 +_P3[7][[0, 1, 2], [0, 1, 2], :] = 1 +_P3[8][[0, 1, 2], [2, 1, 0], :] = 1 + + +def sup_inf(u): + """SI operator.""" + + if np.ndim(u) == 2: + P = _P2 + elif np.ndim(u) == 3: + P = _P3 + else: + raise ValueError("u has an invalid number of dimensions " + "(should be 2 or 3)") + + erosions = [] + for P_i in P: + erosions.append(ndi.binary_erosion(u, P_i)) + + return np.array(erosions, dtype=np.int8).max(0) + + +def inf_sup(u): + """IS operator.""" + + if np.ndim(u) == 2: + P = _P2 + elif np.ndim(u) == 3: + P = _P3 + else: + raise ValueError("u has an invalid number of dimensions " + "(should be 2 or 3)") + + dilations = [] + for P_i in P: + dilations.append(ndi.binary_dilation(u, P_i)) + + return np.array(dilations, dtype=np.int8).min(0) + + +_curvop = _fcycle([lambda u: sup_inf(inf_sup(u)), # SIoIS + lambda u: inf_sup(sup_inf(u))]) # ISoSI + + +def _check_input(image, init_level_set): + """Check that shapes of `image` and `init_level_set` match.""" + assert_nD(image, [2, 3]) + + if len(image.shape) != len(init_level_set.shape): + raise ValueError("The dimensions of the initial level set do not " + "match the dimensions of the image.") + + +def _init_level_set(init_level_set, image_shape): + """Auxiliary function for initializing level sets with a string. + + If `init_level_set` is not a string, it is returned as is. + """ + if isinstance(init_level_set, str): + if init_level_set == 'checkerboard': + res = checkerboard_level_set(image_shape) + elif init_level_set == 'circle': + res = circle_level_set(image_shape) + else: + raise ValueError("`init_level_set` not in " + "['checkerboard', 'circle']") + else: + res = init_level_set + return res + + +def circle_level_set(image_shape, center=None, radius=None): + """Create a circle level set with binary values. + + Parameters + ---------- + image_shape : tuple of positive integers + Shape of the image + center : tuple of positive integers, optional + Coordinates of the center of the circle given in (row, column). If not + given, it defaults to the center of the image. + radius : float, optional + Radius of the circle. If not given, it is set to the 75% of the + smallest image dimension. + + Returns + ------- + out : array with shape `image_shape` + Binary level set of the circle with the given `radius` and `center`. + + See also + -------- + checkerboard_level_set + """ + + if center is None: + center = tuple(i // 2 for i in image_shape) + + if radius is None: + radius = min(image_shape) * 3.0 / 8.0 + + grid = np.mgrid[[slice(i) for i in image_shape]] + grid = (grid.T - center).T + phi = radius - np.sqrt(np.sum((grid)**2, 0)) + res = np.int8(phi > 0) + return res + + +def checkerboard_level_set(image_shape, square_size=5): + """Create a checkerboard level set with binary values. + + Parameters + ---------- + image_shape : tuple of positive integers + Shape of the image. + square_size : int, optional + Size of the squares of the checkerboard. It defaults to 5. + + Returns + ------- + out : array with shape `image_shape` + Binary level set of the checkerboard. + + See also + -------- + circle_level_set + """ + + grid = np.mgrid[[slice(i) for i in image_shape]] + grid = (grid // square_size) + + # Alternate 0/1 for even/odd numbers. + grid = grid & 1 + + checkerboard = np.bitwise_xor.reduce(grid, axis=0) + res = np.int8(checkerboard) + return res + + +def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0): + """Inverse of gradient magnitude. + + Compute the magnitude of the gradients in the image and then inverts the + result in the range [0, 1]. Flat areas are assigned values close to 1, + while areas close to borders are assigned values close to 0. + + This function or a similar one defined by the user should be applied over + the image as a preprocessing step before calling + `morphological_geodesic_active_contour`. + + Parameters + ---------- + image : (M, N) or (L, M, N) array + Grayscale image or volume. + alpha : float, optional + Controls the steepness of the inversion. A larger value will make the + transition between the flat areas and border areas steeper in the + resulting array. + sigma : float, optional + Standard deviation of the Gaussian filter applied over the image. + + Returns + ------- + gimage : (M, N) or (L, M, N) array + Preprocessed image (or volume) suitable for + `morphological_geodesic_active_contour`. + """ + gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest') + return 1.0 / np.sqrt(1.0 + alpha * gradnorm) + + +def morphological_chan_vese(image, iterations, init_level_set='checkerboard', + smoothing=1, lambda1=1, lambda2=1, + iter_callback=lambda x: None): + """Morphological Active Contours without Edges (MorphACWE) + + Active contours without edges implemented with morphological operators. It + can be used to segment objects in images and volumes without well defined + borders. It is required that the inside of the object looks different on + average than the outside (i.e., the inner area of the object should be + darker or lighter than the outer area on average). + + Parameters + ---------- + image : (M, N) or (L, M, N) array + Grayscale image or volume to be segmented. + iterations : uint + Number of iterations to run + init_level_set : str, (M, N) array, or (L, M, N) array + Initial level set. If an array is given, it will be binarized and used + as the initial level set. If a string is given, it defines the method + to generate a reasonable initial level set with the shape of the + `image`. Accepted values are 'checkerboard' and 'circle'. See the + documentation of `checkerboard_level_set` and `circle_level_set` + respectively for details about how these level sets are created. + smoothing : uint, optional + Number of times the smoothing operator is applied per iteration. + Reasonable values are around 1-4. Larger values lead to smoother + segmentations. + lambda1 : float, optional + Weight parameter for the outer region. If `lambda1` is larger than + `lambda2`, the outer region will contain a larger range of values than + the inner region. + lambda2 : float, optional + Weight parameter for the inner region. If `lambda2` is larger than + `lambda1`, the inner region will contain a larger range of values than + the outer region. + iter_callback : function, optional + If given, this function is called once per iteration with the current + level set as the only argument. This is useful for debugging or for + plotting intermediate results during the evolution. + + Returns + ------- + out : (M, N) or (L, M, N) array + Final segmentation (i.e., the final level set) + + See also + -------- + circle_level_set, checkerboard_level_set + + Notes + ----- + + This is a version of the Chan-Vese algorithm that uses morphological + operators instead of solving a partial differential equation (PDE) for the + evolution of the contour. The set of morphological operators used in this + algorithm are proved to be infinitesimally equivalent to the Chan-Vese PDE + (see [1]_). However, morphological operators are do not suffer from the + numerical stability issues typically found in PDEs (it is not necessary to + find the right time step for the evolution), and are computationally + faster. + + The algorithm and its theoretical derivation are described in [1]_. + + References + ---------- + .. [1] A Morphological Approach to Curvature-based Evolution of Curves and + Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE + Transactions on Pattern Analysis and Machine Intelligence (PAMI), + 2014, DOI 10.1109/TPAMI.2013.106 + """ + + init_level_set = _init_level_set(init_level_set, image.shape) + + _check_input(image, init_level_set) + + u = np.int8(init_level_set > 0) + + iter_callback(u) + + for _ in range(iterations): + + # inside = u > 0 + # outside = u <= 0 + c0 = (image * (1 - u)).sum() / float((1 - u).sum() + 1e-8) + c1 = (image * u).sum() / float(u.sum() + 1e-8) + + # Image attachment + du = np.gradient(u) + abs_du = np.abs(du).sum(0) + aux = abs_du * (lambda1 * (image - c1)**2 - lambda2 * (image - c0)**2) + + u[aux < 0] = 1 + u[aux > 0] = 0 + + # Smoothing + for _ in range(smoothing): + u = _curvop(u) + + iter_callback(u) + + return u + + +def morphological_geodesic_active_contour(gimage, iterations, + init_level_set='circle', smoothing=1, + threshold='auto', balloon=0, + iter_callback=lambda x: None): + """Morphological Geodesic Active Contours (MorphGAC). + + Geodesic active contours implemented with morphological operators. It can + be used to segment objects with visible but noisy, cluttered, broken + borders. + + Parameters + ---------- + gimage : (M, N) or (L, M, N) array + Preprocessed image or volume to be segmented. This is very rarely the + original image. Instead, this is usually a preprocessed version of the + original image that enhances and highlights the borders (or other + structures) of the object to segment. + `morphological_geodesic_active_contour` will try to stop the contour + evolution in areas where `gimage` is small. See + `morphsnakes.inverse_gaussian_gradient` as an example function to + perform this preprocessing. Note that the quality of + `morphological_geodesic_active_contour` might greatly depend on this + preprocessing. + iterations : uint + Number of iterations to run. + init_level_set : str, (M, N) array, or (L, M, N) array + Initial level set. If an array is given, it will be binarized and used + as the initial level set. If a string is given, it defines the method + to generate a reasonable initial level set with the shape of the + `image`. Accepted values are 'checkerboard' and 'circle'. See the + documentation of `checkerboard_level_set` and `circle_level_set` + respectively for details about how these level sets are created. + smoothing : uint, optional + Number of times the smoothing operator is applied per iteration. + Reasonable values are around 1-4. Larger values lead to smoother + segmentations. + threshold : float, optional + Areas of the image with a value smaller than this threshold will be + considered borders. The evolution of the contour will stop in this + areas. + balloon : float, optional + Balloon force to guide the contour in non-informative areas of the + image, i.e., areas where the gradient of the image is too small to push + the contour towards a border. A negative value will shrink the contour, + while a positive value will expand the contour in these areas. Setting + this to zero will disable the balloon force. + iter_callback : function, optional + If given, this function is called once per iteration with the current + level set as the only argument. This is useful for debugging or for + plotting intermediate results during the evolution. + + Returns + ------- + out : (M, N) or (L, M, N) array + Final segmentation (i.e., the final level set) + + See also + -------- + inverse_gaussian_gradient, circle_level_set, checkerboard_level_set + + Notes + ----- + + This is a version of the Geodesic Active Contours (GAC) algorithm that uses + morphological operators instead of solving partial differential equations + (PDEs) for the evolution of the contour. The set of morphological operators + used in this algorithm are proved to be infinitesimally equivalent to the + GAC PDEs (see [1]_). However, morphological operators are do not suffer + from the numerical stability issues typically found in PDEs (e.g., it is + not necessary to find the right time step for the evolution), and are + computationally faster. + + The algorithm and its theoretical derivation are described in [1]_. + + References + ---------- + .. [1] A Morphological Approach to Curvature-based Evolution of Curves and + Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE + Transactions on Pattern Analysis and Machine Intelligence (PAMI), + 2014, DOI 10.1109/TPAMI.2013.106 + """ + + image = gimage + init_level_set = _init_level_set(init_level_set, image.shape) + + _check_input(image, init_level_set) + + if threshold == 'auto': + threshold = np.percentile(image, 40) + + structure = np.ones((3,) * len(image.shape), dtype=np.int8) + dimage = np.gradient(image) + # threshold_mask = image > threshold + if balloon != 0: + threshold_mask_balloon = image > threshold / np.abs(balloon) + + u = np.int8(init_level_set > 0) + + iter_callback(u) + + for _ in range(iterations): + + # Balloon + if balloon > 0: + aux = ndi.binary_dilation(u, structure) + elif balloon < 0: + aux = ndi.binary_erosion(u, structure) + if balloon != 0: + u[threshold_mask_balloon] = aux[threshold_mask_balloon] + + # Image attachment + aux = np.zeros_like(image) + du = np.gradient(u) + for el1, el2 in zip(dimage, du): + aux += el1 * el2 + u[aux > 0] = 1 + u[aux < 0] = 0 + + # Smoothing + for _ in range(smoothing): + u = _curvop(u) + + iter_callback(u) + + return u diff -Nru skimage-0.13.1/skimage/segmentation/_quickshift_cy.pyx skimage-0.14.0/skimage/segmentation/_quickshift_cy.pyx --- skimage-0.13.1/skimage/segmentation/_quickshift_cy.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/_quickshift_cy.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -47,7 +47,7 @@ # an effect for very high max_dist. # window size for neighboring pixels to consider - cdef double inv_kernel_size_sqr = -0.5 / kernel_size**2 + cdef double inv_kernel_size_sqr = -0.5 / (kernel_size * kernel_size) cdef int kernel_width = ceil(3 * kernel_size) cdef Py_ssize_t height = image.shape[0] @@ -56,7 +56,7 @@ cdef double[:, ::1] densities = np.zeros((height, width), dtype=np.double) - cdef double current_density, closest, dist + cdef double current_density, closest, dist, t cdef Py_ssize_t r, c, r_, c_, channel, r_min, r_max, c_min, c_max cdef double* current_pixel_ptr @@ -73,9 +73,13 @@ for c_ in range(c_min, c_max): dist = 0 for channel in range(channels): - dist += (current_pixel_ptr[channel] - - image[r_, c_, channel])**2 - dist += (r - r_)**2 + (c - c_)**2 + t = (current_pixel_ptr[channel] - + image[r_, c_, channel]) + dist += t * t + t = r - r_ + dist += t * t + t = c - c_ + dist += t * t densities[r, c] += exp(dist * inv_kernel_size_sqr) current_pixel_ptr += channels @@ -106,9 +110,13 @@ # we get crazy memory overhead # (width * height * windowsize**2) for channel in range(channels): - dist += (current_pixel_ptr[channel] - - image[r_, c_, channel])**2 - dist += (r - r_)**2 + (c - c_)**2 + t = (current_pixel_ptr[channel] - + image[r_, c_, channel]) + dist += t * t + t = r - r_ + dist += t * t + t = c - c_ + dist += t * t if dist < closest: closest = dist parent[r, c] = r_ * width + c_ diff -Nru skimage-0.13.1/skimage/segmentation/random_walker_segmentation.py skimage-0.14.0/skimage/segmentation/random_walker_segmentation.py --- skimage-0.13.1/skimage/segmentation/random_walker_segmentation.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/random_walker_segmentation.py 2018-05-29 01:27:44.000000000 +0000 @@ -13,7 +13,6 @@ from .._shared.utils import warn - # executive summary for next code block: try to import umfpack from # scipy, but make sure not to raise a fuss if it fails since it's only # needed to speed up a few cases. @@ -39,10 +38,18 @@ amg_loaded = True except ImportError: amg_loaded = False -from scipy.sparse.linalg import cg + from ..util import img_as_float from ..filters import rank_order +from scipy.sparse.linalg import cg +import scipy +from distutils.version import LooseVersion as Version +import functools + +if Version(scipy.__version__) >= Version('1.1'): + cg = functools.partial(cg, atol=0) + #-----------Laplacian-------------------- @@ -188,6 +195,20 @@ return lap +def _check_isolated_seeds(labels): + """ + Prune isolated seed pixels to prevent labeling errors, and + return coordinates and label values of isolated seeds, so + that it is possible to put labels back in random walker output. + """ + fill = ndi.binary_propagation(labels == 0, mask=(labels >= 0)) + isolated = np.logical_and(labels > 0, np.logical_not(fill)) + inds = np.nonzero(isolated) + values = labels[inds] + labels[inds] = -1 + return inds, values + + #----------- Random walker algorithm -------------------------------- @@ -413,6 +434,10 @@ labels = np.copy(labels) label_values = np.unique(labels) + # If some labeled pixels are isolated inside pruned zones, prune them + # as well and keep the labels for the final output + inds_isolated_seeds, isolated_values = _check_isolated_seeds(labels) + # Reorder label values to have consecutive integers (no gaps) if np.any(np.diff(label_values) != 1): mask = labels >= 0 @@ -426,6 +451,8 @@ labels[np.logical_and(np.logical_not(filled), labels == 0)] = -1 del filled labels = np.atleast_3d(labels) + + if np.any(labels < 0): lap_sparse = _build_laplacian(data, spacing, mask=labels >= 0, beta=beta, multichannel=multichannel) @@ -458,6 +485,9 @@ # Clean up results if return_full_prob: labels = labels.astype(np.float) + # Put back labels of isolated seeds + if len(isolated_values) > 0: + labels[inds_isolated_seeds] = isolated_values X = np.array([_clean_labels_ar(Xline, labels, copy=True).reshape(dims) for Xline in X]) for i in range(1, int(labels.max()) + 1): @@ -466,6 +496,8 @@ X[i - 1, mask_i] = 1 else: X = _clean_labels_ar(X + 1, labels).reshape(dims) + # Put back labels of isolated seeds + X[inds_isolated_seeds] = isolated_values return X diff -Nru skimage-0.13.1/skimage/segmentation/_slic.pyx skimage-0.14.0/skimage/segmentation/_slic.pyx --- skimage-0.13.1/skimage/segmentation/_slic.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/_slic.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -87,7 +87,7 @@ cdef Py_ssize_t i, c, k, x, y, z, x_min, x_max, y_min, y_max, z_min, z_max cdef char change - cdef double dist_center, cx, cy, cz, dy, dz + cdef double dist_center, cx, cy, cz, dx, dy, dz, t cdef double sz, sy, sx sz = spacing[0] @@ -100,7 +100,7 @@ cdef double dist_color # The reference implementation (Achanta et al.) calls this invxywt - cdef double spatial_weight = float(1) / (step ** 2) + cdef double spatial_weight = float(1) / (step * step) with nogil: for i in range(max_iter): @@ -124,15 +124,19 @@ x_max = min(cx + 2 * step_x + 1, width) for z in range(z_min, z_max): - dz = (sz * (cz - z)) ** 2 + dz = sz * (cz - z) + dz *= dz for y in range(y_min, y_max): - dy = (sy * (cy - y)) ** 2 + dy = sy * (cy - y) + dy *= dy for x in range(x_min, x_max): - dist_center = (dz + dy + (sx * (cx - x)) ** 2) * spatial_weight + dx = sx * (cx - x) + dx *= dx + dist_center = (dz + dy + dx) * spatial_weight dist_color = 0 for c in range(3, n_features): - dist_color += (image_zyx[z, y, x, c - 3] - - segments[k, c]) ** 2 + t = image_zyx[z, y, x, c - 3] - segments[k, c] + dist_color += t * t if slic_zero: dist_center += dist_color / max_dist_color[k] else: @@ -178,8 +182,8 @@ dist_color = 0 for c in range(3, n_features): - dist_color += (image_zyx[z, y, x, c - 3] - - segments[k, c]) ** 2 + t = image_zyx[z, y, x, c - 3] - segments[k, c] + dist_color += t * t # The reference implementation seems to only change # the color if it increases from previous iteration @@ -190,7 +194,6 @@ def _enforce_label_connectivity_cython(Py_ssize_t[:, :, ::1] segments, - Py_ssize_t n_segments, Py_ssize_t min_size, Py_ssize_t max_size): """ Helper function to remove small disconnected regions from the labels @@ -199,8 +202,6 @@ ---------- segments : 3D array of int, shape (Z, Y, X) The label field/superpixels found by SLIC. - n_segments: int - Number of specified segments min_size: int Minimum size of the segment max_size: int diff -Nru skimage-0.13.1/skimage/segmentation/slic_superpixels.py skimage-0.14.0/skimage/segmentation/slic_superpixels.py --- skimage-0.13.1/skimage/segmentation/slic_superpixels.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/slic_superpixels.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,4 +1,5 @@ # coding=utf-8 +from __future__ import division import collections as coll import numpy as np @@ -178,7 +179,6 @@ min_size = int(min_size_factor * segment_size) max_size = int(max_size_factor * segment_size) labels = _enforce_label_connectivity_cython(labels, - n_segments, min_size, max_size) diff -Nru skimage-0.13.1/skimage/segmentation/tests/test_active_contour_model.py skimage-0.14.0/skimage/segmentation/tests/test_active_contour_model.py --- skimage-0.13.1/skimage/segmentation/tests/test_active_contour_model.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/tests/test_active_contour_model.py 2018-05-29 01:27:44.000000000 +0000 @@ -3,7 +3,9 @@ from skimage.color import rgb2gray from skimage.filters import gaussian from skimage.segmentation import active_contour -from numpy.testing import assert_equal, assert_allclose, assert_raises + +from skimage._shared import testing +from skimage._shared.testing import assert_equal, assert_allclose def test_periodic_reference(): @@ -13,8 +15,8 @@ x = 220 + 100*np.cos(s) y = 100 + 100*np.sin(s) init = np.array([x, y]).T - snake = active_contour(gaussian(img, 3), init, - alpha=0.015, beta=10, w_line=0, w_edge=1, gamma=0.001) + snake = active_contour(gaussian(img, 3), init, alpha=0.015, beta=10, + w_line=0, w_edge=1, gamma=0.001) refx = [299, 298, 298, 298, 298, 297, 297, 296, 296, 295] refy = [98, 99, 100, 101, 102, 103, 104, 105, 106, 108] assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx) @@ -27,7 +29,7 @@ y = np.linspace(136, 50, 100) init = np.array([x, y]).T snake = active_contour(gaussian(img, 1), init, bc='fixed', - alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1) + alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1) refx = [5, 9, 13, 17, 21, 25, 30, 34, 38, 42] refy = [136, 135, 134, 133, 132, 131, 129, 128, 127, 125] assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx) @@ -40,7 +42,7 @@ y = np.linspace(70, 40, 100) init = np.array([x, y]).T snake = active_contour(gaussian(img, 3), init, bc='free', - alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1) + alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1) refx = [10, 13, 16, 19, 23, 26, 29, 32, 36, 39] refy = [76, 76, 75, 74, 73, 72, 71, 70, 69, 69] assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx) @@ -59,17 +61,17 @@ y = np.linspace(136, 50, 100) init = np.array([x, y]).T snake = active_contour(imgR, init, bc='fixed', - alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1) + alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1) refx = [5, 9, 13, 17, 21, 25, 30, 34, 38, 42] refy = [136, 135, 134, 133, 132, 131, 129, 128, 127, 125] assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx) assert_equal(np.array(snake[:10, 1], dtype=np.int32), refy) snake = active_contour(imgG, init, bc='fixed', - alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1) + alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1) assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx) assert_equal(np.array(snake[:10, 1], dtype=np.int32), refy) - snake = active_contour(imgRGB, init, bc='fixed', - alpha=0.1, beta=1.0, w_line=-5/3., w_edge=0, gamma=0.1) + snake = active_contour(imgRGB, init, bc='fixed', alpha=0.1, beta=1.0, + w_line=-5/3., w_edge=0, gamma=0.1) assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx) assert_equal(np.array(snake[:10, 1], dtype=np.int32), refy) @@ -82,16 +84,16 @@ y = 100 + 100*np.sin(s) init = np.array([x, y]).T snake = active_contour(gaussian(img, 3), init, - bc='periodic', alpha=0.015, beta=10, w_line=0, w_edge=1, - gamma=0.001, max_iterations=100) + bc='periodic', alpha=0.015, beta=10, + w_line=0, w_edge=1, gamma=0.001, max_iterations=100) assert np.sum(np.abs(snake[0, :]-snake[-1, :])) < 2 snake = active_contour(gaussian(img, 3), init, - bc='free', alpha=0.015, beta=10, w_line=0, w_edge=1, - gamma=0.001, max_iterations=100) + bc='free', alpha=0.015, beta=10, + w_line=0, w_edge=1, gamma=0.001, max_iterations=100) assert np.sum(np.abs(snake[0, :]-snake[-1, :])) > 2 snake = active_contour(gaussian(img, 3), init, - bc='fixed', alpha=0.015, beta=10, w_line=0, w_edge=1, - gamma=0.001, max_iterations=100) + bc='fixed', alpha=0.015, beta=10, + w_line=0, w_edge=1, gamma=0.001, max_iterations=100) assert_allclose(snake[0, :], [x[0], y[0]], atol=1e-5) @@ -100,11 +102,7 @@ x = np.linspace(5, 424, 100) y = np.linspace(136, 50, 100) init = np.array([x, y]).T - assert_raises(ValueError, active_contour, img, init, - bc='wrong') - assert_raises(ValueError, active_contour, img, init, - max_iterations=-15) - - -if __name__ == "__main__": - np.testing.run_module_suite() + with testing.raises(ValueError): + active_contour(img, init, bc='wrong') + with testing.raises(ValueError): + active_contour(img, init, max_iterations=-15) diff -Nru skimage-0.13.1/skimage/segmentation/tests/test_boundaries.py skimage-0.14.0/skimage/segmentation/tests/test_boundaries.py --- skimage-0.13.1/skimage/segmentation/tests/test_boundaries.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/tests/test_boundaries.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,7 +1,8 @@ import numpy as np -from numpy.testing import assert_array_equal, assert_allclose from skimage.segmentation import find_boundaries, mark_boundaries +from skimage._shared.testing import assert_array_equal, assert_allclose + white = (1, 1, 1) @@ -107,17 +108,13 @@ marked_proj = np.round(np.mean(marked, axis=-1), 2) ref_result = np.array( - [[ 0.55, 0.63, 0.72, 0.69, 0.6 , 0.55, 0.54], - [ 0.45, 0.58, 0.72, 1. , 1. , 1. , 0.69], - [ 0.42, 0.54, 0.65, 1. , 0.44, 1. , 0.89], - [ 0.69, 1. , 1. , 1. , 0.69, 1. , 0.83], - [ 0.96, 1. , 0.38, 1. , 0.79, 1. , 0.53], - [ 0.89, 1. , 1. , 1. , 0.38, 1. , 0.16], - [ 0.57, 0.78, 0.93, 1. , 0.07, 1. , 0.09], - [ 0.2 , 0.52, 0.92, 1. , 1. , 1. , 0.54], - [ 0.02, 0.35, 0.83, 0.9 , 0.78, 0.81, 0.87]]) + [[ 0.55, 0.63, 0.72, 0.69, 0.6 , 0.55, 0.54], + [ 0.45, 0.58, 0.72, 1. , 1. , 1. , 0.69], + [ 0.42, 0.54, 0.65, 1. , 0.44, 1. , 0.89], + [ 0.69, 1. , 1. , 1. , 0.69, 1. , 0.83], + [ 0.96, 1. , 0.38, 1. , 0.79, 1. , 0.53], + [ 0.89, 1. , 1. , 1. , 0.38, 1. , 0.16], + [ 0.57, 0.78, 0.93, 1. , 0.07, 1. , 0.09], + [ 0.2 , 0.52, 0.92, 1. , 1. , 1. , 0.54], + [ 0.02, 0.35, 0.83, 0.9 , 0.78, 0.81, 0.87]]) assert_allclose(marked_proj, ref_result, atol=0.01) - - -if __name__ == "__main__": - np.testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/segmentation/tests/test_chan_vese.py skimage-0.14.0/skimage/segmentation/tests/test_chan_vese.py --- skimage-0.13.1/skimage/segmentation/tests/test_chan_vese.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/tests/test_chan_vese.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,90 @@ +import numpy as np +from skimage.segmentation import chan_vese + +from skimage._shared import testing +from skimage._shared.testing import assert_array_equal + + +def test_chan_vese_flat_level_set(): + # because the algorithm evolves the level set around the + # zero-level, it the level-set has no zero level, the algorithm + # will not produce results in theory. However, since a continuous + # approximation of the delta function is used, the algorithm + # still affects the entirety of the level-set. Therefore with + # infinite time, the segmentation will still converge. + img = np.zeros((10, 10)) + img[3:6, 3:6] = np.ones((3, 3)) + ls = np.ones((10, 10)) * 1000 + result = chan_vese(img, mu=0.0, tol=1e-3, init_level_set=ls) + assert_array_equal(result.astype(np.float), np.ones((10, 10))) + result = chan_vese(img, mu=0.0, tol=1e-3, init_level_set=-ls) + assert_array_equal(result.astype(np.float), np.zeros((10, 10))) + + +def test_chan_vese_small_disk_level_set(): + img = np.zeros((10, 10)) + img[3:6, 3:6] = np.ones((3, 3)) + result = chan_vese(img, mu=0.0, tol=1e-3, init_level_set="small disk") + assert_array_equal(result.astype(np.float), img) + + +def test_chan_vese_simple_shape(): + img = np.zeros((10, 10)) + img[3:6, 3:6] = np.ones((3, 3)) + result = chan_vese(img, mu=0.0, tol=1e-8).astype(np.float) + assert_array_equal(result, img) + + +def test_chan_vese_extended_output(): + img = np.zeros((10, 10)) + img[3:6, 3:6] = np.ones((3, 3)) + result = chan_vese(img, mu=0.0, tol=1e-8, extended_output=True) + assert_array_equal(len(result), 3) + + +def test_chan_vese_remove_noise(): + ref = np.zeros((10, 10)) + ref[1:6, 1:6] = np.array([[0, 1, 1, 1, 0], + [1, 1, 1, 1, 1], + [1, 1, 1, 1, 1], + [1, 1, 1, 1, 1], + [0, 1, 1, 1, 0]]) + img = ref.copy() + img[8, 3] = 1 + result = chan_vese(img, mu=0.3, tol=1e-3, max_iter=100, dt=10, + init_level_set="disk").astype(np.float) + assert_array_equal(result, ref) + + +def test_chan_vese_incorrect_image_type(): + img = np.zeros((10, 10, 3)) + ls = np.zeros((10, 9)) + with testing.raises(ValueError): + chan_vese(img, mu=0.0, init_level_set=ls) + + +def test_chan_vese_gap_closing(): + ref = np.zeros((20, 20)) + ref[8:15, :] = np.ones((7, 20)) + img = ref.copy() + img[:, 6] = np.zeros((20)) + result = chan_vese(img, mu=0.7, tol=1e-3, max_iter=1000, dt=1000, + init_level_set="disk").astype(np.float) + assert_array_equal(result, ref) + + +def test_chan_vese_incorrect_level_set(): + img = np.zeros((10, 10)) + ls = np.zeros((10, 9)) + with testing.raises(ValueError): + chan_vese(img, mu=0.0, init_level_set=ls) + with testing.raises(ValueError): + chan_vese(img, mu=0.0, init_level_set="a") + + +def test_chan_vese_blank_image(): + img = np.zeros((10, 10)) + level_set = np.random.rand(10, 10) + ref = level_set > 0 + result = chan_vese(img, mu=0.0, tol=0.0, init_level_set=level_set) + assert_array_equal(result, ref) diff -Nru skimage-0.13.1/skimage/segmentation/tests/test_clear_border.py skimage-0.14.0/skimage/segmentation/tests/test_clear_border.py --- skimage-0.13.1/skimage/segmentation/tests/test_clear_border.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/tests/test_clear_border.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,7 +1,8 @@ import numpy as np -from numpy.testing import assert_array_equal, assert_ from skimage.segmentation import clear_border +from skimage._shared.testing import assert_array_equal, assert_ + def test_clear_border(): image = np.array( @@ -77,34 +78,34 @@ def test_clear_border_non_binary_3d(): image3d = np.array( [[[1, 2, 3, 1, 2], - [3, 3, 3, 4, 2], - [3, 4, 3, 4, 2], - [3, 3, 2, 1, 2]], - [[1, 2, 3, 1, 2], - [3, 3, 5, 4, 2], - [3, 4, 5, 4, 2], - [3, 3, 2, 1, 2]], - [[1, 2, 3, 1, 2], - [3, 3, 3, 4, 2], - [3, 4, 3, 4, 2], - [3, 3, 2, 1, 2]], - ]) + [3, 3, 3, 4, 2], + [3, 4, 3, 4, 2], + [3, 3, 2, 1, 2]], + [[1, 2, 3, 1, 2], + [3, 3, 5, 4, 2], + [3, 4, 5, 4, 2], + [3, 3, 2, 1, 2]], + [[1, 2, 3, 1, 2], + [3, 3, 3, 4, 2], + [3, 4, 3, 4, 2], + [3, 3, 2, 1, 2]], + ]) result = clear_border(image3d) expected = np.array( [[[0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0]], - [[0, 0, 0, 0, 0], - [0, 0, 5, 0, 0], - [0, 0, 5, 0, 0], - [0, 0, 0, 0, 0]], - [[0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0]], - ]) + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]], + [[0, 0, 0, 0, 0], + [0, 0, 5, 0, 0], + [0, 0, 5, 0, 0], + [0, 0, 0, 0, 0]], + [[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]], + ]) assert_array_equal(result, expected) assert_(not np.all(image3d == result)) @@ -129,37 +130,35 @@ def test_clear_border_non_binary_inplace_3d(): image3d = np.array( [[[1, 2, 3, 1, 2], - [3, 3, 3, 4, 2], - [3, 4, 3, 4, 2], - [3, 3, 2, 1, 2]], - [[1, 2, 3, 1, 2], - [3, 3, 5, 4, 2], - [3, 4, 5, 4, 2], - [3, 3, 2, 1, 2]], - [[1, 2, 3, 1, 2], - [3, 3, 3, 4, 2], - [3, 4, 3, 4, 2], - [3, 3, 2, 1, 2]], - ]) + [3, 3, 3, 4, 2], + [3, 4, 3, 4, 2], + [3, 3, 2, 1, 2]], + [[1, 2, 3, 1, 2], + [3, 3, 5, 4, 2], + [3, 4, 5, 4, 2], + [3, 3, 2, 1, 2]], + [[1, 2, 3, 1, 2], + [3, 3, 3, 4, 2], + [3, 4, 3, 4, 2], + [3, 3, 2, 1, 2]], + ]) result = clear_border(image3d, in_place=True) expected = np.array( [[[0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0]], - [[0, 0, 0, 0, 0], - [0, 0, 5, 0, 0], - [0, 0, 5, 0, 0], - [0, 0, 0, 0, 0]], - [[0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0]], - ]) + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]], + [[0, 0, 0, 0, 0], + [0, 0, 5, 0, 0], + [0, 0, 5, 0, 0], + [0, 0, 0, 0, 0]], + [[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]], + ]) assert_array_equal(result, expected) assert_array_equal(image3d, result) -if __name__ == "__main__": - np.testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/segmentation/tests/test_felzenszwalb.py skimage-0.14.0/skimage/segmentation/tests/test_felzenszwalb.py --- skimage-0.13.1/skimage/segmentation/tests/test_felzenszwalb.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/tests/test_felzenszwalb.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,10 +1,12 @@ import numpy as np -from numpy.testing import (assert_equal, assert_array_equal, assert_raises, - assert_warns, assert_no_warnings) - -from skimage._shared.testing import assert_greater, test_parallel -from skimage.segmentation import felzenszwalb from skimage import data +from skimage.segmentation import felzenszwalb + +from skimage._shared import testing +from skimage._shared.testing import (assert_greater, test_parallel, + assert_equal, assert_array_equal, + assert_warns, assert_no_warnings) + @test_parallel() def test_grey(): @@ -21,9 +23,10 @@ hist = np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5, 1])[0] assert_greater(hist[i], 40) + def test_minsize(): # single-channel: - img = data.coins()[20:168,0:128] + img = data.coins()[20:168, 0:128] for min_size in np.arange(10, 100, 10): segments = felzenszwalb(img, min_size=min_size, sigma=3) counts = np.bincount(segments.ravel()) @@ -37,6 +40,7 @@ # actually want to test greater or equal. assert_greater(counts.min() + 1, min_size) + def test_3D(): grey_img = np.zeros((10, 10)) rgb_img = np.zeros((10, 10, 3)) @@ -47,10 +51,11 @@ felzenszwalb(rgb_img, multichannel=True) with assert_warns(RuntimeWarning): felzenszwalb(three_d_img, multichannel=True) - with assert_raises(ValueError): + with testing.raises(ValueError): felzenszwalb(rgb_img, multichannel=False) felzenszwalb(three_d_img, multichannel=False) + def test_color(): # very weak tests. img = np.zeros((20, 21, 3)) @@ -75,8 +80,3 @@ assert_equal(len(np.unique(seg)), 2) assert_array_equal(seg[0, :], 0) assert_array_equal(seg[1, :], 1) - - -if __name__ == '__main__': - from numpy import testing - testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/segmentation/tests/test_join.py skimage-0.14.0/skimage/segmentation/tests/test_join.py --- skimage-0.13.1/skimage/segmentation/tests/test_join.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/tests/test_join.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,7 +1,10 @@ import numpy as np -from numpy.testing import assert_array_equal, assert_raises from skimage.segmentation import join_segmentations, relabel_sequential +from skimage._shared import testing +from skimage._shared.testing import assert_array_equal + + def test_join_segmentations(): s1 = np.array([[0, 0, 1, 1], [0, 2, 1, 1], @@ -22,7 +25,8 @@ # test correct exception when arrays are different shapes s3 = np.array([[0, 0, 1, 1], [0, 2, 2, 1]]) - assert_raises(ValueError, join_segmentations, s1, s3) + with testing.raises(ValueError): + join_segmentations(s1, s3) def test_relabel_sequential_offset1(): @@ -31,7 +35,11 @@ ar_relab_ref = np.array([1, 1, 2, 2, 3, 5, 4]) assert_array_equal(ar_relab, ar_relab_ref) fw_ref = np.zeros(100, int) - fw_ref[1] = 1; fw_ref[5] = 2; fw_ref[8] = 3; fw_ref[42] = 4; fw_ref[99] = 5 + fw_ref[1] = 1 + fw_ref[5] = 2 + fw_ref[8] = 3 + fw_ref[42] = 4 + fw_ref[99] = 5 assert_array_equal(fw, fw_ref) inv_ref = np.array([0, 1, 5, 8, 42, 99]) assert_array_equal(inv, inv_ref) @@ -43,7 +51,11 @@ ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8]) assert_array_equal(ar_relab, ar_relab_ref) fw_ref = np.zeros(100, int) - fw_ref[1] = 5; fw_ref[5] = 6; fw_ref[8] = 7; fw_ref[42] = 8; fw_ref[99] = 9 + fw_ref[1] = 5 + fw_ref[5] = 6 + fw_ref[8] = 7 + fw_ref[42] = 8 + fw_ref[99] = 9 assert_array_equal(fw, fw_ref) inv_ref = np.array([0, 0, 0, 0, 0, 1, 5, 8, 42, 99]) assert_array_equal(inv, inv_ref) @@ -55,7 +67,11 @@ ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8, 0]) assert_array_equal(ar_relab, ar_relab_ref) fw_ref = np.zeros(100, int) - fw_ref[1] = 5; fw_ref[5] = 6; fw_ref[8] = 7; fw_ref[42] = 8; fw_ref[99] = 9 + fw_ref[1] = 5 + fw_ref[5] = 6 + fw_ref[8] = 7 + fw_ref[42] = 8 + fw_ref[99] = 9 assert_array_equal(fw, fw_ref) inv_ref = np.array([0, 0, 0, 0, 0, 1, 5, 8, 42, 99]) assert_array_equal(inv, inv_ref) @@ -67,11 +83,11 @@ ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8, 0]) assert_array_equal(ar_relab, ar_relab_ref) fw_ref = np.zeros(100, int) - fw_ref[1] = 5; fw_ref[5] = 6; fw_ref[8] = 7; fw_ref[42] = 8; fw_ref[99] = 9 + fw_ref[1] = 5 + fw_ref[5] = 6 + fw_ref[8] = 7 + fw_ref[42] = 8 + fw_ref[99] = 9 assert_array_equal(fw, fw_ref) inv_ref = np.array([0, 0, 0, 0, 0, 1, 5, 8, 42, 99]) assert_array_equal(inv, inv_ref) - - -if __name__ == "__main__": - np.testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/segmentation/tests/test_morphsnakes.py skimage-0.14.0/skimage/segmentation/tests/test_morphsnakes.py --- skimage-0.13.1/skimage/segmentation/tests/test_morphsnakes.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/tests/test_morphsnakes.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,141 @@ +import numpy as np +from skimage.segmentation import (morphological_chan_vese, + morphological_geodesic_active_contour, + inverse_gaussian_gradient, + circle_level_set) + +from skimage._shared import testing +from skimage._shared.testing import assert_array_equal + + +def gaussian_blob(): + coords = np.mgrid[-5:6, -5:6] + sqrdistances = (coords ** 2).sum(0) + return np.exp(-sqrdistances / 10) + + +def test_morphsnakes_incorrect_image_shape(): + img = np.zeros((10, 10, 3)) + ls = np.zeros((10, 9)) + + with testing.raises(ValueError): + morphological_chan_vese(img, iterations=1, init_level_set=ls) + with testing.raises(ValueError): + morphological_geodesic_active_contour(img, iterations=1, + init_level_set=ls) + + +def test_morphsnakes_incorrect_ndim(): + img = np.zeros((4, 4, 4, 4)) + ls = np.zeros((4, 4, 4, 4)) + + with testing.raises(ValueError): + morphological_chan_vese(img, iterations=1, init_level_set=ls) + with testing.raises(ValueError): + morphological_geodesic_active_contour(img, iterations=1, + init_level_set=ls) + + +def test_morphsnakes_black(): + img = np.zeros((11, 11)) + ls = circle_level_set(img.shape, (5, 5), 3) + + ref_zeros = np.zeros(img.shape, dtype=np.int8) + ref_ones = np.ones(img.shape, dtype=np.int8) + + acwe_ls = morphological_chan_vese(img, iterations=6, init_level_set=ls) + assert_array_equal(acwe_ls, ref_zeros) + + gac_ls = morphological_geodesic_active_contour(img, iterations=6, + init_level_set=ls) + assert_array_equal(gac_ls, ref_zeros) + + gac_ls2 = morphological_geodesic_active_contour(img, iterations=6, + init_level_set=ls, + balloon=1, threshold=-1, + smoothing=0) + assert_array_equal(gac_ls2, ref_ones) + + assert acwe_ls.dtype == gac_ls.dtype == gac_ls2.dtype == np.int8 + + +def test_morphsnakes_simple_shape_chan_vese(): + img = gaussian_blob() + ls1 = circle_level_set(img.shape, (5, 5), 3) + ls2 = circle_level_set(img.shape, (5, 5), 6) + + acwe_ls1 = morphological_chan_vese(img, iterations=10, init_level_set=ls1) + acwe_ls2 = morphological_chan_vese(img, iterations=10, init_level_set=ls2) + + assert_array_equal(acwe_ls1, acwe_ls2) + + assert acwe_ls1.dtype == acwe_ls2.dtype == np.int8 + + +def test_morphsnakes_simple_shape_geodesic_active_contour(): + img = np.float_(circle_level_set((11, 11), (5, 5), 3.5)) + gimg = inverse_gaussian_gradient(img, alpha=10.0, sigma=1.0) + ls = circle_level_set(img.shape, (5, 5), 6) + + ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], + dtype=np.int8) + + gac_ls = morphological_geodesic_active_contour(gimg, iterations=10, + init_level_set=ls, + balloon=-1) + assert_array_equal(gac_ls, ref) + assert gac_ls.dtype == np.int8 + + +def test_init_level_sets(): + image = np.zeros((6, 6)) + checkerboard_ls = morphological_chan_vese(image, 0, 'checkerboard') + checkerboard_ref = np.array([[0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [1, 1, 1, 1, 1, 0]], dtype=np.int8) + + circle_ls = morphological_geodesic_active_contour(image, 0, 'circle') + circle_ref = np.array([[0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1], + [0, 1, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 0]], dtype=np.int8) + + assert_array_equal(checkerboard_ls, checkerboard_ref) + assert_array_equal(circle_ls, circle_ref) + + +def test_morphsnakes_3d(): + image = np.zeros((7, 7, 7)) + + evolution = [] + + def callback(x): + evolution.append(x.sum()) + + ls = morphological_chan_vese(image, 5, 'circle', + iter_callback=callback) + + # Check that the initial circle level set is correct + assert evolution[0] == 81 + + # Check that the final level set is correct + assert ls.sum() == 0 + + # Check that the contour is shrinking at every iteration + for v1, v2 in zip(evolution[:-1], evolution[1:]): + assert v1 >= v2 diff -Nru skimage-0.13.1/skimage/segmentation/tests/test_quickshift.py skimage-0.14.0/skimage/segmentation/tests/test_quickshift.py --- skimage-0.13.1/skimage/segmentation/tests/test_quickshift.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/tests/test_quickshift.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,9 +1,9 @@ import numpy as np -from numpy.testing import assert_equal, assert_array_equal -from nose.tools import assert_true -from skimage._shared.testing import assert_greater, test_parallel from skimage.segmentation import quickshift +from skimage._shared.testing import (assert_greater, test_parallel, + assert_equal, assert_array_equal) + @test_parallel() def test_grey(): @@ -45,10 +45,5 @@ # very oversegmented: assert_equal(len(np.unique(seg2)), 7) # still don't cross lines - assert_true((seg2[9, :] != seg2[10, :]).all()) - assert_true((seg2[:, 9] != seg2[:, 10]).all()) - - -if __name__ == '__main__': - from numpy import testing - testing.run_module_suite() + assert (seg2[9, :] != seg2[10, :]).all() + assert (seg2[:, 9] != seg2[:, 10]).all() diff -Nru skimage-0.13.1/skimage/segmentation/tests/test_random_walker.py skimage-0.14.0/skimage/segmentation/tests/test_random_walker.py --- skimage-0.13.1/skimage/segmentation/tests/test_random_walker.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/tests/test_random_walker.py 2018-05-29 01:27:44.000000000 +0000 @@ -2,6 +2,8 @@ from skimage.segmentation import random_walker from skimage.transform import resize from skimage._shared._warnings import expected_warnings +from skimage._shared import testing + # older versions of scipy raise a warning with new NumPy because they use # numpy.rank() instead of arr.ndim or numpy.linalg.matrix_rank. @@ -213,7 +215,9 @@ # Rescale `data` along Z axis data_aniso = np.zeros((n, n, n // 2)) for i, yz in enumerate(data): - data_aniso[i, :, :] = resize(yz, (n, n // 2)) + data_aniso[i, :, :] = resize(yz, (n, n // 2), + mode='constant', + anti_aliasing=False) # Generate new labels small_l = int(lx // 5) @@ -226,7 +230,7 @@ # Test with `spacing` kwarg with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]): labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg', - spacing=(1., 1., 0.5)) + spacing=(1., 1., 0.5)) assert (labels_aniso[13:17, 13:17, 7:9] == 2).all() @@ -240,7 +244,9 @@ # `resize` is not yet 3D capable, so this must be done by looping in 2D. data_aniso = np.zeros((n, n * 2, n)) for i, yz in enumerate(data): - data_aniso[i, :, :] = resize(yz, (n * 2, n)) + data_aniso[i, :, :] = resize(yz, (n * 2, n), + mode='constant', + anti_aliasing=False) # Generate new labels small_l = int(lx // 5) @@ -261,7 +267,9 @@ # `resize` is not yet 3D capable, so this must be done by looping in 2D. data_aniso = np.zeros((n, n * 2, n)) for i in range(data.shape[1]): - data_aniso[i, :, :] = resize(data[:, 1, :], (n * 2, n)) + data_aniso[i, :, :] = resize(data[:, 1, :], (n * 2, n), + mode='constant', + anti_aliasing=False) # Generate new labels small_l = int(lx // 5) @@ -312,32 +320,50 @@ # Too few dimensions img = np.ones(10) labels = np.arange(10) - np.testing.assert_raises(ValueError, random_walker, img, labels) - np.testing.assert_raises(ValueError, - random_walker, img, labels, multichannel=True) + with testing.raises(ValueError): + random_walker(img, labels) + with testing.raises(ValueError): + random_walker(img, labels, multichannel=True) # Too many dimensions np.random.seed(42) img = np.random.normal(size=(3, 3, 3, 3, 3)) labels = np.arange(3 ** 5).reshape(img.shape) - np.testing.assert_raises(ValueError, random_walker, img, labels) - np.testing.assert_raises(ValueError, - random_walker, img, labels, multichannel=True) + with testing.raises(ValueError): + random_walker(img, labels) + with testing.raises(ValueError): + random_walker(img, labels, multichannel=True) # Spacing incorrect length img = np.random.normal(size=(10, 10)) labels = np.zeros((10, 10)) labels[2, 4] = 2 labels[6, 8] = 5 - np.testing.assert_raises(ValueError, - random_walker, img, labels, spacing=(1,)) + with testing.raises(ValueError): + random_walker(img, labels, spacing=(1,)) # Invalid mode img = np.random.normal(size=(10, 10)) labels = np.zeros((10, 10)) - np.testing.assert_raises(ValueError, - random_walker, img, labels, mode='bad') + with testing.raises(ValueError): + random_walker(img, labels, mode='bad') -if __name__ == '__main__': - np.testing.run_module_suite() +def test_isolated_seeds(): + np.random.seed(0) + a = np.random.random((7, 7)) + mask = - np.ones(a.shape) + # This pixel is an isolated seed + mask[1, 1] = 1 + # Unlabeled pixels + mask[3:, 3:] = 0 + # Seeds connected to unlabeled pixels + mask[4, 4] = 2 + mask[6, 6] = 1 + + # Test that no error is raised, and that labels of isolated seeds are OK + res = random_walker(a, mask) + assert res[1, 1] == 1 + res = random_walker(a, mask, return_full_prob=True) + assert res[0, 1, 1] == 1 + assert res[1, 1, 1] == 0 diff -Nru skimage-0.13.1/skimage/segmentation/tests/test_slic.py skimage-0.14.0/skimage/segmentation/tests/test_slic.py --- skimage-0.13.1/skimage/segmentation/tests/test_slic.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/segmentation/tests/test_slic.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,8 +1,10 @@ -import itertools as it +from itertools import product + import numpy as np -from numpy.testing import assert_equal, assert_raises from skimage.segmentation import slic -from skimage._shared.testing import test_parallel + +from skimage._shared import testing +from skimage._shared.testing import test_parallel, assert_equal @test_parallel() @@ -73,8 +75,8 @@ for dim_size in img.shape[:-1]: midpoint = dim_size // 2 slices.append((slice(None, midpoint), slice(midpoint, None))) - slices = list(it.product(*slices)) - colors = list(it.product(*(([0, 1],) * 3))) + slices = list(product(*slices)) + colors = list(product(*(([0, 1],) * 3))) for s, c in zip(slices, colors): img[s] = c img += 0.01 * rnd.normal(size=img.shape) @@ -94,7 +96,7 @@ for dim_size in img.shape: midpoint = dim_size // 2 slices.append((slice(None, midpoint), slice(midpoint, None))) - slices = list(it.product(*slices)) + slices = list(product(*slices)) shades = np.arange(0, 1.000001, 1.0 / 7) for s, sh in zip(slices, shades): img[s] = sh @@ -140,7 +142,8 @@ def test_invalid_lab_conversion(): img = np.array([[1, 1, 1, 0, 0], [1, 1, 0, 0, 0]], np.float) + 1 - assert_raises(ValueError, slic, img, multichannel=True, convert2lab=True) + with testing.raises(ValueError): + slic(img, multichannel=True, convert2lab=True) def test_enforce_connectivity(): @@ -207,9 +210,3 @@ seg = slic(img, sigma=0, n_segments=500, compactness=1, multichannel=False, convert2lab=False) assert np.all(seg.ravel() == np.arange(seg.size)) - - -if __name__ == '__main__': - from numpy import testing - - testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/_shared/fast_exp.h skimage-0.14.0/skimage/_shared/fast_exp.h --- skimage-0.13.1/skimage/_shared/fast_exp.h 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/skimage/_shared/fast_exp.h 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,40 @@ +/* A fast approximation of the exponential function. + * Reference [1]: https://schraudolph.org/pubs/Schraudolph99.pdf + * Reference [2]: http://dx.doi.org/10.1162/089976600300015033 + * Additional improvements by Leonid Bloch. */ + +/* use just EXP_A = 1512775 for integer version, to avoid FP calculations */ +#define EXP_A (1512775.3951951856938) /* 2^20*ln2 */ +/* For min. RMS error */ +#define EXP_BC 1072632447 /* 1023*2^20 - 60801 */ +/* For min. max. relative error */ +/* #define EXP_BC 1072647449 */ /* 1023*2^20 - 45799 */ +/* For min. mean relative error */ +/* #define EXP_BC 1072625005 */ /* 1023*2^20 - 68243 */ + +__inline double fast_exp (double y) +{ + union + { + double d; + struct { int i, j; } n; + char t[8]; + } _eco; + + _eco.n.i = 1; + + switch(_eco.t[0]) { + case 1: + /* Little endian */ + _eco.n.j = (int)(EXP_A*(y)) + EXP_BC; + _eco.n.i = 0; + break; + case 0: + /* Big endian */ + _eco.n.i = (int)(EXP_A*(y)) + EXP_BC; + _eco.n.j = 0; + break; + } + + return _eco.d; +} diff -Nru skimage-0.13.1/skimage/_shared/_geometry.py skimage-0.14.0/skimage/_shared/_geometry.py --- skimage-0.13.1/skimage/_shared/_geometry.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/_shared/_geometry.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,7 +1,6 @@ __all__ = ['polygon_clip', 'polygon_area'] import numpy as np -from matplotlib import path, transforms def polygon_clip(rp, cp, r0, c0, r1, c1): @@ -25,6 +24,8 @@ AGG 2.4 and exposed in Matplotlib. """ + from matplotlib import path, transforms + poly = path.Path(np.vstack((rp, cp)).T, closed=True) clip_rect = transforms.Bbox([[r0, c0], [r1, c1]]) poly_clipped = poly.clip_to_bbox(clip_rect).to_polygons()[0] diff -Nru skimage-0.13.1/skimage/_shared/interpolation.pyx skimage-0.14.0/skimage/_shared/interpolation.pyx --- skimage-0.13.1/skimage/_shared/interpolation.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/_shared/interpolation.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -1,9 +1,7 @@ -from interpolation cimport coord_map, get_pixel2d -import numpy as np -cimport numpy as cnp +from interpolation cimport coord_map -def coord_map_py(Py_ssize_t dim, long coord, mode): +def coord_map_py(dim, coord, mode): """Python wrapper for `interpolation.coord_map`.""" - cdef char mode_c = ord(mode[0].upper()) + mode_c = ord(mode[0].upper()) return coord_map(dim, coord, mode_c) diff -Nru skimage-0.13.1/skimage/_shared/testing.py skimage-0.14.0/skimage/_shared/testing.py --- skimage-0.13.1/skimage/_shared/testing.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/_shared/testing.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,5 +1,6 @@ -"""Testing utilities.""" - +""" +Testing utilities. +""" import os import re @@ -7,50 +8,53 @@ import functools from tempfile import NamedTemporaryFile -from numpy import testing import numpy as np +from numpy import testing +from numpy.testing import (assert_array_equal, assert_array_almost_equal, + assert_array_less, assert_array_almost_equal_nulp, + assert_equal, TestCase, assert_allclose, + assert_almost_equal, assert_, assert_warns, + assert_no_warnings) + from ._warnings import expected_warnings import warnings from .. import data, io, img_as_uint, img_as_float, img_as_int, img_as_ubyte +import pytest SKIP_RE = re.compile("(\s*>>>.*?)(\s*)#\s*skip\s+if\s+(.*)$") +skipif = pytest.mark.skipif +parametrize = pytest.mark.parametrize +raises = pytest.raises +fixture = pytest.fixture -def _assert_less(a, b, msg=None): + +def assert_less(a, b, msg=None): message = "%r is not lower than %r" % (a, b) if msg is not None: message += ": " + msg assert a < b, message -def _assert_greater(a, b, msg=None): +def assert_greater(a, b, msg=None): message = "%r is not greater than %r" % (a, b) if msg is not None: message += ": " + msg assert a > b, message -try: - from nose.tools import assert_less -except ImportError: - assert_less = _assert_less - -try: - from nose.tools import assert_greater -except ImportError: - assert_greater = _assert_greater - - def doctest_skip_parser(func): """ Decorator replaces custom skip test markup in doctests Say a function has a docstring:: + >>> something, HAVE_AMODULE, HAVE_BMODULE = 0, False, False >>> something # skip if not HAVE_AMODULE - >>> something + else + 0 >>> something # skip if HAVE_BMODULE + 0 This decorator will evaluate the expression after ``skip if``. If this evaluates to True, then the comment is replaced by ``# doctest: +SKIP``. If @@ -61,8 +65,8 @@ global ``HAVE_BMODULE`` is False, the returned function will have docstring:: >>> something # doctest: +SKIP - >>> something + else - >>> something + >>> something + else # doctest: +SKIP + >>> something # doctest: +SKIP """ lines = func.__doc__.split('\n') @@ -88,14 +92,14 @@ return func -def roundtrip(img, plugin, suffix): +def roundtrip(image, plugin, suffix): """Save and read an image using a specified plugin""" if '.' not in suffix: suffix = '.' + suffix temp_file = NamedTemporaryFile(suffix=suffix, delete=False) fname = temp_file.name temp_file.close() - io.imsave(fname, img, plugin=plugin) + io.imsave(fname, image, plugin=plugin) new = io.imread(fname, plugin=plugin) try: os.remove(fname) diff -Nru skimage-0.13.1/skimage/_shared/tests/test_interpolation.py skimage-0.14.0/skimage/_shared/tests/test_interpolation.py --- skimage-0.13.1/skimage/_shared/tests/test_interpolation.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/_shared/tests/test_interpolation.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,6 +1,5 @@ from skimage._shared.interpolation import coord_map_py -from numpy.testing import assert_array_equal - +from skimage._shared.testing import assert_array_equal def test_coord_map(): symmetric = [coord_map_py(4, n, 'S') for n in range(-6, 6)] @@ -19,9 +18,6 @@ expected_reflect = [0, 1, 2, 3, 2, 1, 0, 1, 2, 3, 2, 1] assert_array_equal(reflect, expected_reflect) - constant = [coord_map_py(4, n, 'C') for n in range(-6, 6)] - expected_constant = [0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0] - assert_array_equal(constant, expected_constant) - other = [coord_map_py(4, n, 'undefined') for n in range(-6, 6)] - assert_array_equal(other, list(range(-6, 6))) + expected_other = list(range(-6, 6)) + assert_array_equal(other, expected_other) diff -Nru skimage-0.13.1/skimage/_shared/tests/test_safe_as_int.py skimage-0.14.0/skimage/_shared/tests/test_safe_as_int.py --- skimage-0.13.1/skimage/_shared/tests/test_safe_as_int.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/_shared/tests/test_safe_as_int.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,36 +1,42 @@ import numpy as np from skimage._shared.utils import safe_as_int +from skimage._shared import testing def test_int_cast_not_possible(): - np.testing.assert_raises(ValueError, safe_as_int, 7.1) - np.testing.assert_raises(ValueError, safe_as_int, [7.1, 0.9]) - np.testing.assert_raises(ValueError, safe_as_int, np.r_[7.1, 0.9]) - np.testing.assert_raises(ValueError, safe_as_int, (7.1, 0.9)) - np.testing.assert_raises(ValueError, safe_as_int, ((3, 4, 1), - (2, 7.6, 289))) - - np.testing.assert_raises(ValueError, safe_as_int, 7.1, 0.09) - np.testing.assert_raises(ValueError, safe_as_int, [7.1, 0.9], 0.09) - np.testing.assert_raises(ValueError, safe_as_int, np.r_[7.1, 0.9], 0.09) - np.testing.assert_raises(ValueError, safe_as_int, (7.1, 0.9), 0.09) - np.testing.assert_raises(ValueError, safe_as_int, ((3, 4, 1), - (2, 7.6, 289)), 0.25) + with testing.raises(ValueError): + safe_as_int(7.1) + with testing.raises(ValueError): + safe_as_int([7.1, 0.9]) + with testing.raises(ValueError): + safe_as_int(np.r_[7.1, 0.9]) + with testing.raises(ValueError): + safe_as_int((7.1, 0.9)) + with testing.raises(ValueError): + safe_as_int(((3, 4, 1), + (2, 7.6, 289))) + with testing.raises(ValueError): + safe_as_int(7.1, 0.09) + with testing.raises(ValueError): + safe_as_int([7.1, 0.9], 0.09) + with testing.raises(ValueError): + safe_as_int(np.r_[7.1, 0.9], 0.09) + with testing.raises(ValueError): + safe_as_int((7.1, 0.9), 0.09) + with testing.raises(ValueError): + safe_as_int(((3, 4, 1), + (2, 7.6, 289)), 0.25) def test_int_cast_possible(): - np.testing.assert_equal(safe_as_int(7.1, atol=0.11), 7) - np.testing.assert_equal(safe_as_int(-7.1, atol=0.11), -7) - np.testing.assert_equal(safe_as_int(41.9, atol=0.11), 42) - np.testing.assert_array_equal(safe_as_int([2, 42, 5789234.0, 87, 4]), - np.r_[2, 42, 5789234, 87, 4]) - np.testing.assert_array_equal(safe_as_int(np.r_[[[3, 4, 1.000000001], - [7, 2, -8.999999999], - [6, 9, -4234918347.]]]), - np.r_[[[3, 4, 1], - [7, 2, -9], - [6, 9, -4234918347]]]) - - -if __name__ == '__main__': - np.testing.run_module_suite() + testing.assert_equal(safe_as_int(7.1, atol=0.11), 7) + testing.assert_equal(safe_as_int(-7.1, atol=0.11), -7) + testing.assert_equal(safe_as_int(41.9, atol=0.11), 42) + testing.assert_array_equal(safe_as_int([2, 42, 5789234.0, 87, 4]), + np.r_[2, 42, 5789234, 87, 4]) + testing.assert_array_equal(safe_as_int(np.r_[[[3, 4, 1.000000001], + [7, 2, -8.999999999], + [6, 9, -4234918347.]]]), + np.r_[[[3, 4, 1], + [7, 2, -9], + [6, 9, -4234918347]]]) diff -Nru skimage-0.13.1/skimage/_shared/tests/test_testing.py skimage-0.14.0/skimage/_shared/tests/test_testing.py --- skimage-0.13.1/skimage/_shared/tests/test_testing.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/_shared/tests/test_testing.py 2018-05-29 01:27:44.000000000 +0000 @@ -2,8 +2,9 @@ """ import numpy as np -from nose.tools import (assert_true, assert_raises, assert_equal) +from numpy.testing import assert_equal from skimage._shared.testing import doctest_skip_parser, test_parallel +from skimage._shared import testing def test_skipper(): @@ -32,8 +33,8 @@ f2 = doctest_skip_parser(f) c2 = doctest_skip_parser(c) - assert_true(f is f2) - assert_true(c is c2) + assert f is f2 + assert c is c2 expected = \ """ Header @@ -53,7 +54,7 @@ f2 = doctest_skip_parser(f) c2 = doctest_skip_parser(c) - assert_true(f is f2) + assert f is f2 expected = \ """ Header @@ -68,8 +69,10 @@ del HAVE_AMODULE f.__doc__ = docstring c.__doc__ = docstring - assert_raises(NameError, doctest_skip_parser, f) - assert_raises(NameError, doctest_skip_parser, c) + with testing.raises(NameError): + doctest_skip_parser(f) + with testing.raises(NameError): + doctest_skip_parser(c) def test_test_parallel(): diff -Nru skimage-0.13.1/skimage/_shared/tests/test_utils.py skimage-0.14.0/skimage/_shared/tests/test_utils.py --- skimage-0.13.1/skimage/_shared/tests/test_utils.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/_shared/tests/test_utils.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,14 +1,15 @@ from skimage._shared.utils import (copy_func, assert_nD) import numpy.testing as npt -from numpy.testing import assert_raises - - +import numpy as np +from skimage._shared import testing def test_assert_nD(): z = np.random.random(200**2).reshape((200, 200)) x = z[10:30, 30:10] - assert_raises(ValueError, assert_nD, x, 2) + with testing.raises(ValueError): + assert_nD(x, 2) + def test_copyfunc(): def foo(a): diff -Nru skimage-0.13.1/skimage/_shared/tests/test_version_requirements.py skimage-0.14.0/skimage/_shared/tests/test_version_requirements.py --- skimage-0.13.1/skimage/_shared/tests/test_version_requirements.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/_shared/tests/test_version_requirements.py 2018-05-29 01:27:44.000000000 +0000 @@ -2,16 +2,16 @@ """ import numpy as np -from numpy.testing import assert_raises, assert_equal -import nose +from numpy.testing import assert_equal from skimage._shared import version_requirements as version_req +from skimage._shared import testing def test_get_module_version(): assert version_req.get_module_version('numpy') assert version_req.get_module_version('scipy') - assert_raises(ImportError, - lambda: version_req.get_module_version('fakenumpy')) + with testing.raises(ImportError): + version_req.get_module_version('fakenumpy') def test_is_installed(): @@ -33,9 +33,9 @@ def bar(): return 0 - assert_raises(ImportError, lambda: bar()) + with testing.raises(ImportError): + bar() def test_get_module(): - assert_equal(version_req.get_module('numpy'), np) - assert_equal(version_req.get_module('nose'), nose) + assert version_req.get_module("numpy") is np diff -Nru skimage-0.13.1/skimage/_shared/transform.pxd skimage-0.14.0/skimage/_shared/transform.pxd --- skimage-0.13.1/skimage/_shared/transform.pxd 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/_shared/transform.pxd 2018-05-29 01:27:44.000000000 +0000 @@ -1,2 +1,19 @@ -cdef float integrate(float[:, ::1] sat, Py_ssize_t r0, Py_ssize_t c0, - Py_ssize_t r1, Py_ssize_t c1) nogil +import cython +cimport numpy as cnp + + +ctypedef fused integral_floating: + cnp.uint8_t + cnp.uint16_t + cnp.uint32_t + cnp.uint64_t + cnp.int8_t + cnp.int16_t + cnp.int32_t + cnp.int64_t + cython.floating + + +cdef integral_floating integrate(integral_floating[:, ::1] sat, + Py_ssize_t r0, Py_ssize_t c0, + Py_ssize_t r1, Py_ssize_t c1) nogil diff -Nru skimage-0.13.1/skimage/_shared/transform.pyx skimage-0.14.0/skimage/_shared/transform.pyx --- skimage-0.13.1/skimage/_shared/transform.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/_shared/transform.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -4,8 +4,9 @@ #cython: wraparound=False -cdef float integrate(float[:, ::1] sat, Py_ssize_t r0, Py_ssize_t c0, - Py_ssize_t r1, Py_ssize_t c1) nogil: +cdef integral_floating integrate(integral_floating[:, ::1] sat, + Py_ssize_t r0, Py_ssize_t c0, + Py_ssize_t r1, Py_ssize_t c1) nogil: """ Using a summed area table / integral image, calculate the sum over a given window. @@ -28,7 +29,7 @@ S : int Sum over the given window. """ - cdef float S = 0 + cdef integral_floating S = 0 S += sat[r1, c1] diff -Nru skimage-0.13.1/skimage/_shared/utils.py skimage-0.14.0/skimage/_shared/utils.py --- skimage-0.13.1/skimage/_shared/utils.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/_shared/utils.py 2018-05-29 01:27:44.000000000 +0000 @@ -120,18 +120,18 @@ Examples -------- - >>> _safe_as_int(7.0) + >>> safe_as_int(7.0) 7 - >>> _safe_as_int([9, 4, 2.9999999999]) - array([9, 4, 3], dtype=int32) + >>> safe_as_int([9, 4, 2.9999999999]) + array([9, 4, 3]) - >>> _safe_as_int(53.01) + >>> safe_as_int(53.1) Traceback (most recent call last): ... ValueError: Integer argument required but received 53.1, check inputs. - >>> _safe_as_int(53.01, atol=0.01) + >>> safe_as_int(53.01, atol=0.01) 53 """ diff -Nru skimage-0.13.1/skimage/transform/_geometric.py skimage-0.14.0/skimage/transform/_geometric.py --- skimage-0.13.1/skimage/transform/_geometric.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/_geometric.py 2018-05-29 01:27:44.000000000 +0000 @@ -706,14 +706,16 @@ elif (hasattr(other, '__name__') and other.__name__ == 'inverse' and hasattr(get_bound_method_class(other), '_inv_matrix')): - return ProjectiveTransform(self._inv_matrix.dot(self.params)) + return ProjectiveTransform(other.__self__._inv_matrix.dot(self.params)) else: raise TypeError("Cannot combine transformations of differing " "types.") class AffineTransform(ProjectiveTransform): - """2D affine transformation of the form: + """2D affine transformation. + + Has the following form:: X = a0*x + a1*y + a2 = = sx*x*cos(rotation) - sy*y*sin(rotation + shear) + a2 @@ -938,7 +940,9 @@ class EuclideanTransform(ProjectiveTransform): - """2D Euclidean transformation of the form: + """2D Euclidean transformation. + + Has the following form:: X = a0 * x - b0 * y + a1 = = x * cos(rotation) - y * sin(rotation) + a1 @@ -1035,7 +1039,9 @@ class SimilarityTransform(EuclideanTransform): - """2D similarity transformation of the form: + """2D similarity transformation. + + Has the following form:: X = a0 * x - b0 * y + a1 = = s * x * cos(rotation) - s * y * sin(rotation) + a1 @@ -1139,7 +1145,9 @@ class PolynomialTransform(GeometricTransform): - """2D polynomial transformation of the form: + """2D polynomial transformation. + + Has the following form:: X = sum[j=0:order]( sum[i=0:j]( a_ji * x**(j - i) * y**i )) Y = sum[j=0:order]( sum[i=0:j]( b_ji * x**(j - i) * y**i )) diff -Nru skimage-0.13.1/skimage/transform/hough_transform.py skimage-0.14.0/skimage/transform/hough_transform.py --- skimage-0.13.1/skimage/transform/hough_transform.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/hough_transform.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,6 +1,4 @@ import numpy as np -from scipy import ndimage -from .. import measure from ._hough_transform import (_hough_circle, _hough_ellipse, _hough_line, @@ -57,13 +55,16 @@ 2 """ - from skimage.feature.peak import _prominent_peaks + from ..feature.peak import _prominent_peaks h, a, d = _prominent_peaks(hspace, min_xdistance=min_angle, min_ydistance=min_distance, threshold=threshold, num_peaks=num_peaks) - return (h, angles[a], dists[d]) + if a.any(): + return (h, angles[a], dists[d]) + else: + return (h, np.array([]), np.array([])) def hough_circle(image, radius, normalize=True, full_output=False): @@ -105,18 +106,17 @@ (25, 35, 23) """ - radius = np.atleast_1d(np.asarray(radius)) return _hough_circle(image, radius.astype(np.intp), normalize=normalize, full_output=full_output) -def hough_ellipse(img, threshold=4, accuracy=1, min_size=4, max_size=None): +def hough_ellipse(image, threshold=4, accuracy=1, min_size=4, max_size=None): """Perform an elliptical Hough transform. Parameters ---------- - img : (M, N) ndarray + image : (M, N) ndarray Input image with nonzero values representing edges. threshold: int, optional Accumulator threshold value. @@ -131,7 +131,7 @@ Returns ------- - result : ndarray with fields [(accumulator, y0, x0, a, b, orientation)] + result : ndarray with fields [(accumulator, yc, xc, a, b, orientation)]. Where ``(yc, xc)`` is the center, ``(a, b)`` the major and minor axes, respectively. The `orientation` value follows `skimage.draw.ellipse_perimeter` convention. @@ -159,16 +159,16 @@ method." Pattern Recognition, 2002. Proceedings. 16th International Conference on. Vol. 2. IEEE, 2002 """ - return _hough_ellipse(img, threshold=threshold, accuracy=accuracy, + return _hough_ellipse(image, threshold=threshold, accuracy=accuracy, min_size=min_size, max_size=max_size) -def hough_line(img, theta=None): +def hough_line(image, theta=None): """Perform a straight line Hough transform. Parameters ---------- - img : (M, N) ndarray + image : (M, N) ndarray Input image with nonzero values representing edges. theta : 1D ndarray of double, optional Angles at which to compute the transform, in radians. @@ -211,23 +211,23 @@ .. plot:: hough_tf.py """ - if img.ndim != 2: - raise ValueError('The input image `img` must be 2D.') + if image.ndim != 2: + raise ValueError('The input image `image` must be 2D.') if theta is None: # These values are approximations of pi/2 theta = np.linspace(-np.pi / 2, np.pi / 2, 180) - return _hough_line(img, theta=theta) + return _hough_line(image, theta=theta) -def probabilistic_hough_line(img, threshold=10, line_length=50, line_gap=10, - theta=None): +def probabilistic_hough_line(image, threshold=10, line_length=50, line_gap=10, + theta=None, seed=None): """Return lines from a progressive probabilistic line Hough transform. Parameters ---------- - img : (M, N) ndarray + image : (M, N) ndarray Input image with nonzero values representing edges. threshold : int, optional Threshold @@ -240,6 +240,8 @@ theta : 1D ndarray, dtype=double, optional Angles at which to compute the transform, in radians. If None, use a range from -pi/2 to pi/2. + seed : int, optional + Seed to initialize the random number generator. Returns ------- @@ -254,15 +256,14 @@ Conference on Computer Vision and Pattern Recognition, 1999. """ - if img.ndim != 2: - raise ValueError('The input image `img` must be 2D.') + if image.ndim != 2: + raise ValueError('The input image `image` must be 2D.') if theta is None: theta = np.pi / 2 - np.arange(180) / 180.0 * np.pi - return _prob_hough_line(img, threshold=threshold, line_length=line_length, - line_gap=line_gap, theta=theta) - + return _prob_hough_line(image, threshold=threshold, line_length=line_length, + line_gap=line_gap, theta=theta, seed=seed) def hough_circle_peaks(hspaces, radii, min_xdistance=1, min_ydistance=1, @@ -307,16 +308,15 @@ Examples -------- - >>> from skimage import transform as tf - >>> from skimage import draw + >>> from skimage import transform, draw >>> img = np.zeros((120, 100), dtype=int) >>> radius, x_0, y_0 = (20, 99, 50) >>> y, x = draw.circle_perimeter(y_0, x_0, radius) >>> img[x, y] = 1 - >>> hspaces = tf.hough_circle(img, radius) + >>> hspaces = transform.hough_circle(img, radius) >>> accum, cx, cy, rad = hough_circle_peaks(hspaces, [radius,]) """ - from skimage.feature.peak import _prominent_peaks + from ..feature.peak import _prominent_peaks r = [] cx = [] @@ -329,7 +329,6 @@ min_ydistance=min_ydistance, threshold=threshold, num_peaks=num_peaks) - r.extend((rad,)*len(h_p)) cx.extend(x_p) cy.extend(y_p) @@ -350,4 +349,3 @@ r[s][::-1][:tnp]) return (accum[s][::-1], cx[s][::-1], cy[s][::-1], r[s][::-1]) - diff -Nru skimage-0.13.1/skimage/transform/_hough_transform.pyx skimage-0.14.0/skimage/transform/_hough_transform.pyx --- skimage-0.13.1/skimage/transform/_hough_transform.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/_hough_transform.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -7,8 +7,9 @@ cimport numpy as cnp cimport cython -from libc.math cimport abs, fabs, sqrt, ceil, atan2, M_PI -from libc.stdlib cimport rand +from cpython.mem cimport PyMem_Malloc, PyMem_Free +from libc.stdlib cimport abs +from libc.math cimport fabs, sqrt, ceil, atan2, M_PI from ..draw import circle_perimeter @@ -98,8 +99,8 @@ return acc -def _hough_ellipse(cnp.ndarray img, int threshold=4, double accuracy=1, - int min_size=4, max_size=None): +def _hough_ellipse(cnp.ndarray img, Py_ssize_t threshold=4, double accuracy=1, + Py_ssize_t min_size=4, max_size=None): """Perform an elliptical Hough transform. Parameters @@ -119,7 +120,7 @@ Returns ------- - result : ndarray with fields [(accumulator, y0, x0, a, b, orientation)] + result : ndarray with fields [(accumulator, yc, xc, a, b, orientation)] Where ``(yc, xc)`` is the center, ``(a, b)`` the major and minor axes, respectively. The `orientation` value follows `skimage.draw.ellipse_perimeter` convention. @@ -147,24 +148,32 @@ if img.ndim != 2: raise ValueError('The input image must be 2D.') + # The creation of the array `pixels` results in a rather nasty error + # when the image is empty. + # As discussed in GitHub #2820 and #2996, we opt to return an empty array. + if not np.any(img): + return np.zeros((0, 6)) + cdef Py_ssize_t[:, ::1] pixels = np.row_stack(np.nonzero(img)) + cdef Py_ssize_t num_pixels = pixels.shape[1] cdef list acc = list() cdef list results = list() - cdef double bin_size = accuracy ** 2 + cdef double bin_size = accuracy * accuracy - cdef int max_b_squared + cdef double max_b_squared if max_size is None: if img.shape[0] < img.shape[1]: - max_b_squared = np.round(0.5 * img.shape[0]) ** 2 + max_b_squared = np.round(0.5 * img.shape[0]) else: - max_b_squared = np.round(0.5 * img.shape[1]) ** 2 + max_b_squared = np.round(0.5 * img.shape[1]) + max_b_squared *= max_b_squared else: - max_b_squared = max_size**2 + max_b_squared = max_size * max_size cdef Py_ssize_t p1, p2, p3, p1x, p1y, p2x, p2y, p3x, p3y - cdef double xc, yc, a, b, d, k - cdef double cos_tau_squared, b_squared, f_squared, orientation + cdef double xc, yc, a, b, d, k, dx, dy + cdef double cos_tau_squared, b_squared, orientation for p1 in range(num_pixels): p1x = pixels[1, p1] @@ -175,7 +184,9 @@ p2y = pixels[0, p2] # Candidate: center (xc, yc) and main axis a - a = 0.5 * sqrt((p1x - p2x)**2 + (p1y - p2y)**2) + dx = p1x - p2x + dy = p1y - p2y + a = 0.5 * sqrt(dx * dx + dy * dy) if a > 0.5 * min_size: xc = 0.5 * (p1x + p2x) yc = 0.5 * (p1y + p2y) @@ -183,16 +194,19 @@ for p3 in range(num_pixels): p3x = pixels[1, p3] p3y = pixels[0, p3] - - d = sqrt((p3x - xc)**2 + (p3y - yc)**2) + dx = p3x - xc + dy = p3y - yc + d = sqrt(dx * dx + dy * dy) if d > min_size: - f_squared = (p3x - p1x)**2 + (p3y - p1y)**2 - cos_tau_squared = ((a**2 + d**2 - f_squared) - / (2 * a * d))**2 + dx = p3x - p1x + dy = p3y - p1y + cos_tau_squared = ((a*a + d*d - dx*dx - dy*dy) + / (2 * a * d)) + cos_tau_squared *= cos_tau_squared # Consider b2 > 0 and avoid division by zero - k = a**2 - d**2 * cos_tau_squared + k = a*a - d*d * cos_tau_squared if k > 0 and cos_tau_squared < 1: - b_squared = a**2 * d**2 * (1 - cos_tau_squared) / k + b_squared = a*a * d*d * (1 - cos_tau_squared) / k # b2 range is limited to avoid histogram memory # overflow if b_squared <= max_b_squared: @@ -306,15 +320,16 @@ x = x_idxs[i] y = y_idxs[i] for j in range(nthetas): - accum_idx = round((ctheta[j] * x + stheta[j] * y)) + offset + accum_idx = round((ctheta[j] * x + stheta[j] * y)) + offset accum[accum_idx, j] += 1 return accum, theta, bins -def _probabilistic_hough_line(cnp.ndarray img, int threshold, - int line_length, int line_gap, - cnp.ndarray[ndim=1, dtype=cnp.double_t] theta): +def _probabilistic_hough_line(cnp.ndarray img, Py_ssize_t threshold, + Py_ssize_t line_length, Py_ssize_t line_gap, + cnp.ndarray[ndim=1, dtype=cnp.double_t] theta, + seed=None): """Return lines from a progressive probabilistic line Hough transform. Parameters @@ -331,6 +346,8 @@ Increase the parameter to merge broken lines more aggresively. theta : 1D ndarray, dtype=double Angles at which to compute the transform, in radians. + seed : int, optional + Seed to initialize the random number generator. Returns ------- @@ -349,160 +366,172 @@ # compute the bins and allocate the accumulator array cdef cnp.ndarray[ndim=2, dtype=cnp.uint8_t] mask = \ - np.zeros((height, width), dtype=np.uint8) - cdef cnp.int32_t[:, ::1] line_end = np.zeros((2, 2), dtype=np.int32) + np.zeros((height, width), dtype=np.uint8) + cdef Py_ssize_t *line_end = \ + PyMem_Malloc(4 * sizeof(Py_ssize_t)) + if not line_end: + raise MemoryError('could not allocate line_end') cdef Py_ssize_t max_distance, offset, num_indexes, index cdef double a, b - cdef Py_ssize_t nidxs, i, j, x, y, px, py, accum_idx - cdef int value, max_value, max_theta + cdef Py_ssize_t nidxs, i, j, k, x, y, px, py, accum_idx, max_theta + cdef Py_ssize_t xflag, x0, y0, dx0, dy0, dx, dy, gap, x1, y1, count + cdef cnp.int64_t value, max_value, cdef int shift = 16 - # maximum line number cutoff - cdef Py_ssize_t lines_max = 2 ** 15 - cdef Py_ssize_t xflag, x0, y0, dx0, dy0, dx, dy, gap, x1, y1, \ - good_line, count - cdef list lines = list() - - max_distance = 2 * ceil((sqrt(img.shape[0] * img.shape[0] + - img.shape[1] * img.shape[1]))) - cdef cnp.int64_t[:, ::1] accum = \ - np.zeros((max_distance, theta.shape[0]), dtype=np.int64) + cdef int good_line + cdef Py_ssize_t nlines = 0 + cdef Py_ssize_t lines_max = 2 ** 15 # maximum line number cutoff + cdef cnp.intp_t[:, :, ::1] lines = np.zeros((lines_max, 2, 2), + dtype=np.intp) + max_distance = 2 * ceil((sqrt(img.shape[0] * img.shape[0] + + img.shape[1] * img.shape[1]))) + cdef cnp.int64_t[:, ::1] accum = np.zeros((max_distance, theta.shape[0]), + dtype=np.int64) offset = max_distance / 2 - nthetas = theta.shape[0] + cdef Py_ssize_t nthetas = theta.shape[0] # compute sine and cosine of angles cdef cnp.double_t[::1] ctheta = np.cos(theta) cdef cnp.double_t[::1] stheta = np.sin(theta) # find the nonzero indexes + cdef cnp.intp_t[:] y_idxs, x_idxs y_idxs, x_idxs = np.nonzero(img) - cdef list points = list(zip(x_idxs, y_idxs)) + # mask all non-zero indexes mask[y_idxs, x_idxs] = 1 - while 1: + count = len(x_idxs) + random_state = np.random.RandomState(seed) + random_ = np.arange(count, dtype=np.intp) + random_state.shuffle(random_) + cdef cnp.intp_t[::1] random = random_ - # quit if no remaining points - count = len(points) - if count == 0: - break - - # select random non-zero point - index = rand() % count - x = points[index][0] - y = points[index][1] - del points[index] - - # if previously eliminated, skip - if not mask[y, x]: - continue - - value = 0 - max_value = threshold - 1 - max_theta = -1 - - # apply hough transform on point - for j in range(nthetas): - accum_idx = round((ctheta[j] * x + stheta[j] * y)) + offset - accum[accum_idx, j] += 1 - value = accum[accum_idx, j] - if value > max_value: - max_value = value - max_theta = j - if max_value < threshold: - continue - - # from the random point walk in opposite directions and find line - # beginning and end - a = -stheta[max_theta] - b = ctheta[max_theta] - x0 = x - y0 = y - # calculate gradient of walks using fixed point math - xflag = fabs(a) > fabs(b) - if xflag: - if a > 0: - dx0 = 1 - else: - dx0 = -1 - dy0 = round(b * (1 << shift) / fabs(a)) - y0 = (y0 << shift) + (1 << (shift - 1)) - else: - if b > 0: - dy0 = 1 - else: - dy0 = -1 - dx0 = round(a * (1 << shift) / fabs(b)) - x0 = (x0 << shift) + (1 << (shift - 1)) - - # pass 1: walk the line, merging lines less than specified gap length - for k in range(2): - gap = 0 - px = x0 - py = y0 - dx = dx0 - dy = dy0 - if k > 0: - dx = -dx - dy = -dy - while 1: - if xflag: - x1 = px - y1 = py >> shift + with nogil: + while count > 0: + count -= 1 + # select random non-zero point + index = random[count] + x = x_idxs[index] + y = y_idxs[index] + + # if previously eliminated, skip + if not mask[y, x]: + continue + + value = 0 + max_value = threshold - 1 + max_theta = -1 + + # apply hough transform on point + for j in range(nthetas): + accum_idx = round((ctheta[j] * x + stheta[j] * y)) + offset + accum[accum_idx, j] += 1 + value = accum[accum_idx, j] + if value > max_value: + max_value = value + max_theta = j + if max_value < threshold: + continue + + # from the random point walk in opposite directions and find line + # beginning and end + a = -stheta[max_theta] + b = ctheta[max_theta] + x0 = x + y0 = y + # calculate gradient of walks using fixed point math + xflag = fabs(a) > fabs(b) + if xflag: + if a > 0: + dx0 = 1 else: - x1 = px >> shift - y1 = py - # check when line exits image boundary - if x1 < 0 or x1 >= width or y1 < 0 or y1 >= height: - break - gap += 1 - # if non-zero point found, continue the line - if mask[y1, x1]: - gap = 0 - line_end[k, 1] = y1 - line_end[k, 0] = x1 - # if gap to this point was too large, end the line - elif gap > line_gap: - break - px += dx - py += dy - # confirm line length is sufficient - good_line = abs(line_end[1, 1] - line_end[0, 1]) >= line_length or \ - abs(line_end[1, 0] - line_end[0, 0]) >= line_length - - # pass 2: walk the line again and reset accumulator and mask - for k in range(2): - px = x0 - py = y0 - dx = dx0 - dy = dy0 - if k > 0: - dx = -dx - dy = -dy - while 1: - if xflag: - x1 = px - y1 = py >> shift + dx0 = -1 + dy0 = round(b * (1 << shift) / fabs(a)) + y0 = (y0 << shift) + (1 << (shift - 1)) + else: + if b > 0: + dy0 = 1 else: - x1 = px >> shift - y1 = py - # if non-zero point found, continue the line - if mask[y1, x1]: - if good_line: - accum_idx = round((ctheta[j] * x1 \ - + stheta[j] * y1)) + offset - accum[accum_idx, max_theta] -= 1 - mask[y1, x1] = 0 - # exit when the point is the line end - if x1 == line_end[k, 0] and y1 == line_end[k, 1]: + dy0 = -1 + dx0 = round(a * (1 << shift) / fabs(b)) + x0 = (x0 << shift) + (1 << (shift - 1)) + + # pass 1: walk the line, merging lines less than specified gap + # length + for k in range(2): + gap = 0 + px = x0 + py = y0 + dx = dx0 + dy = dy0 + if k > 0: + dx = -dx + dy = -dy + while 1: + if xflag: + x1 = px + y1 = py >> shift + else: + x1 = px >> shift + y1 = py + # check when line exits image boundary + if x1 < 0 or x1 >= width or y1 < 0 or y1 >= height: + break + gap += 1 + # if non-zero point found, continue the line + if mask[y1, x1]: + gap = 0 + line_end[2*k] = x1 + line_end[2*k + 1] = y1 + # if gap to this point was too large, end the line + elif gap > line_gap: + break + px += dx + py += dy + + # confirm line length is sufficient + good_line = (abs(line_end[3] - line_end[1]) >= line_length or + abs(line_end[2] - line_end[0]) >= line_length) + + # pass 2: walk the line again and reset accumulator and mask + for k in range(2): + px = x0 + py = y0 + dx = dx0 + dy = dy0 + if k > 0: + dx = -dx + dy = -dy + while 1: + if xflag: + x1 = px + y1 = py >> shift + else: + x1 = px >> shift + y1 = py + # if non-zero point found, continue the line + if mask[y1, x1]: + if good_line: + accum_idx = round( + (ctheta[j] * x1 + stheta[j] * y1)) + offset + accum[accum_idx, max_theta] -= 1 + mask[y1, x1] = 0 + # exit when the point is the line end + if x1 == line_end[2*k] and y1 == line_end[2*k + 1]: + break + px += dx + py += dy + + # add line to the result + if good_line: + lines[nlines, 0, 0] = line_end[0] + lines[nlines, 0, 1] = line_end[1] + lines[nlines, 1, 0] = line_end[2] + lines[nlines, 1, 1] = line_end[3] + nlines += 1 + if nlines > lines_max: break - px += dx - py += dy - - # add line to the result - if good_line: - lines.append(((line_end[0, 0], line_end[0, 1]), - (line_end[1, 0], line_end[1, 1]))) - if len(lines) > lines_max: - return lines - return lines + PyMem_Free(line_end) + return [((line[0, 0], line[0, 1]), (line[1, 0], line[1, 1])) + for line in lines[:nlines]] diff -Nru skimage-0.13.1/skimage/transform/__init__.py skimage-0.14.0/skimage/transform/__init__.py --- skimage-0.13.1/skimage/transform/__init__.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/__init__.py 2018-05-29 01:27:44.000000000 +0000 @@ -11,8 +11,8 @@ ProjectiveTransform, FundamentalMatrixTransform, EssentialMatrixTransform, PolynomialTransform, PiecewiseAffineTransform) -from ._warps import (swirl, resize, rotate, rescale, downscale_local_mean, - warp, warp_coords) +from ._warps import (swirl, resize, rotate, rescale, + downscale_local_mean, warp, warp_coords) from .pyramids import (pyramid_reduce, pyramid_expand, pyramid_gaussian, pyramid_laplacian) from .seam_carving import seam_carve diff -Nru skimage-0.13.1/skimage/transform/integral.py skimage-0.14.0/skimage/transform/integral.py --- skimage-0.13.1/skimage/transform/integral.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/integral.py 2018-05-29 01:27:44.000000000 +0000 @@ -4,7 +4,7 @@ from .._shared.utils import warn -def integral_image(img): +def integral_image(image): """Integral image / summed area table. The integral image contains the sum of all elements above and to the @@ -16,7 +16,7 @@ Parameters ---------- - img : ndarray + image : ndarray Input image. Returns @@ -30,13 +30,13 @@ ACM SIGGRAPH Computer Graphics, vol. 18, 1984, pp. 207-212. """ - S = img - for i in range(img.ndim): + S = image + for i in range(image.ndim): S = S.cumsum(axis=i) return S -def integrate(ii, start, end, *args): +def integrate(ii, start, end): """Use an integral image to integrate over a given window. Parameters @@ -51,11 +51,6 @@ Coordinates of bottom right corner of window(s). Each tuple in the list containing the end row, col, ... index i.e `[(row_win1, col_win1, ...), (row_win2, col_win2, ...), ...]`. - args: optional - For backward compatibility with versions prior to 0.12. - The earlier function signature was `integrate(ii, r0, c0, r1, c1)`, - where `r0`, `c0` are int(lists) specifying start coordinates - of window(s) to be integrated and `r1`, `c1` the end coordinates. Returns ------- @@ -75,23 +70,9 @@ >>> integrate(ii, [(1, 0), (3, 3)], [(1, 2), (4, 5)]) array([ 3., 6.]) """ - rows = 1 - # handle input from new input format - if len(args) == 0: - start = np.atleast_2d(np.array(start)) - end = np.atleast_2d(np.array(end)) - rows = start.shape[0] - # handle deprecated input format - else: - warn("The syntax 'integrate(ii, r0, c0, r1, c1)' is " - "deprecated, and will be phased out in release 0.14. " - "The new syntax is " - "'integrate(ii, (r0, c0), (r1, c1))'.") - if isinstance(start, collections.Iterable): - rows = len(start) - args = (start, end) + args - start = np.array(args[:int(len(args)/2)]).T - end = np.array(args[int(len(args)/2):]).T + start = np.atleast_2d(np.array(start)) + end = np.atleast_2d(np.array(end)) + rows = start.shape[0] total_shape = ii.shape total_shape = np.tile(total_shape, [rows, 1]) diff -Nru skimage-0.13.1/skimage/transform/pyramids.py skimage-0.14.0/skimage/transform/pyramids.py --- skimage-0.13.1/skimage/transform/pyramids.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/pyramids.py 2018-05-29 01:27:44.000000000 +0000 @@ -3,23 +3,19 @@ from scipy import ndimage as ndi from ..transform import resize from ..util import img_as_float +from ._warps import _multichannel_default -def _smooth(image, sigma, mode, cval): +def _smooth(image, sigma, mode, cval, multichannel=None): """Return image with each channel smoothed by the Gaussian filter.""" - + multichannel = _multichannel_default(multichannel, image.ndim) smoothed = np.empty(image.shape, dtype=np.double) - # apply Gaussian filter to all dimensions independently - if image.ndim == 3: - for dim in range(image.shape[2]): - ndi.gaussian_filter(image[..., dim], sigma, - output=smoothed[..., dim], - mode=mode, cval=cval) - else: - ndi.gaussian_filter(image, sigma, output=smoothed, - mode=mode, cval=cval) - + # apply Gaussian filter to all channels independently + if multichannel: + sigma = (sigma, )*(image.ndim - 1) + (0, ) + ndi.gaussian_filter(image, sigma, output=smoothed, + mode=mode, cval=cval) return smoothed @@ -29,12 +25,12 @@ def pyramid_reduce(image, downscale=2, sigma=None, order=1, - mode='reflect', cval=0): + mode='reflect', cval=0, multichannel=None): """Smooth and then downsample image. Parameters ---------- - image : array + image : ndarray Input image. downscale : float, optional Downscale factor. @@ -50,6 +46,11 @@ cval is the value when mode is equal to 'constant'. cval : float, optional Value to fill past edges of input if mode is 'constant'. + multichannel : bool, optional + Whether the last axis of the image is to be interpreted as multiple + channels or another spatial dimension. By default, is set to True for + 3D (2D+color) inputs, and False for others. Starting in release 0.16, + this will always default to False. Returns ------- @@ -61,34 +62,33 @@ .. [1] http://web.mit.edu/persci/people/adelson/pub_pdfs/pyramid83.pdf """ - + multichannel = _multichannel_default(multichannel, image.ndim) _check_factor(downscale) image = img_as_float(image) - rows = image.shape[0] - cols = image.shape[1] - out_rows = math.ceil(rows / float(downscale)) - out_cols = math.ceil(cols / float(downscale)) + out_shape = tuple([math.ceil(d / float(downscale)) for d in image.shape]) + if multichannel: + out_shape = out_shape[:-1] if sigma is None: # automatically determine sigma which covers > 99% of distribution sigma = 2 * downscale / 6.0 - smoothed = _smooth(image, sigma, mode, cval) - out = resize(smoothed, (out_rows, out_cols), order=order, - mode=mode, cval=cval) + smoothed = _smooth(image, sigma, mode, cval, multichannel) + out = resize(smoothed, out_shape, order=order, mode=mode, cval=cval, + anti_aliasing=False) return out def pyramid_expand(image, upscale=2, sigma=None, order=1, - mode='reflect', cval=0): + mode='reflect', cval=0, multichannel=None): """Upsample and then smooth image. Parameters ---------- - image : array + image : ndarray Input image. upscale : float, optional Upscale factor. @@ -104,6 +104,12 @@ cval is the value when mode is equal to 'constant'. cval : float, optional Value to fill past edges of input if mode is 'constant'. + multichannel : bool, optional + Whether the last axis of the image is to be interpreted as multiple + channels or another spatial dimension. By default, is set to True for + 3D (2D+color) inputs, and False for others. Starting in release 0.16, + this will always default to False. + Returns ------- @@ -115,29 +121,28 @@ .. [1] http://web.mit.edu/persci/people/adelson/pub_pdfs/pyramid83.pdf """ - + multichannel = _multichannel_default(multichannel, image.ndim) _check_factor(upscale) image = img_as_float(image) - rows = image.shape[0] - cols = image.shape[1] - out_rows = math.ceil(upscale * rows) - out_cols = math.ceil(upscale * cols) + out_shape = tuple([math.ceil(upscale * d) for d in image.shape]) + if multichannel: + out_shape = out_shape[:-1] if sigma is None: # automatically determine sigma which covers > 99% of distribution sigma = 2 * upscale / 6.0 - resized = resize(image, (out_rows, out_cols), order=order, - mode=mode, cval=cval) - out = _smooth(resized, sigma, mode, cval) + resized = resize(image, out_shape, order=order, + mode=mode, cval=cval, anti_aliasing=False) + out = _smooth(resized, sigma, mode, cval, multichannel) return out def pyramid_gaussian(image, max_layer=-1, downscale=2, sigma=None, order=1, - mode='reflect', cval=0): + mode='reflect', cval=0, multichannel=None): """Yield images of the Gaussian pyramid formed by the input image. Recursively applies the `pyramid_reduce` function to the image, and yields @@ -150,7 +155,7 @@ Parameters ---------- - image : array + image : ndarray Input image. max_layer : int Number of layers for the pyramid. 0th layer is the original image. @@ -169,6 +174,12 @@ cval is the value when mode is equal to 'constant'. cval : float, optional Value to fill past edges of input if mode is 'constant'. + multichannel : bool, optional + Whether the last axis of the image is to be interpreted as multiple + channels or another spatial dimension. By default, is set to True for + 3D (2D+color) inputs, and False for others. Starting in release 0.16, + this will always default to False. + Returns ------- @@ -180,15 +191,13 @@ .. [1] http://web.mit.edu/persci/people/adelson/pub_pdfs/pyramid83.pdf """ - _check_factor(downscale) # cast to float for consistent data type in pyramid image = img_as_float(image) layer = 0 - rows = image.shape[0] - cols = image.shape[1] + current_shape = image.shape prev_layer_image = image yield image @@ -199,23 +208,21 @@ layer += 1 layer_image = pyramid_reduce(prev_layer_image, downscale, sigma, order, - mode, cval) + mode, cval, multichannel=multichannel) - prev_rows = rows - prev_cols = cols + prev_shape = np.asarray(current_shape) prev_layer_image = layer_image - rows = layer_image.shape[0] - cols = layer_image.shape[1] + current_shape = np.asarray(layer_image.shape) # no change to previous pyramid layer - if prev_rows == rows and prev_cols == cols: + if np.all(current_shape == prev_shape): break yield layer_image def pyramid_laplacian(image, max_layer=-1, downscale=2, sigma=None, order=1, - mode='reflect', cval=0): + mode='reflect', cval=0, multichannel=None): """Yield images of the laplacian pyramid formed by the input image. Each layer contains the difference between the downsampled and the @@ -231,7 +238,7 @@ Parameters ---------- - image : array + image : ndarray Input image. max_layer : int Number of layers for the pyramid. 0th layer is the original image. @@ -250,6 +257,12 @@ cval is the value when mode is equal to 'constant'. cval : float, optional Value to fill past edges of input if mode is 'constant'. + multichannel : bool, optional + Whether the last axis of the image is to be interpreted as multiple + channels or another spatial dimension. By default, is set to True for + 3D (2D+color) inputs, and False for others. Starting in release 0.16, + this will always default to False. + Returns ------- @@ -262,7 +275,7 @@ .. [2] http://sepwww.stanford.edu/data/media/public/sep/morgan/texturematch/paper_html/node3.html """ - + multichannel = _multichannel_default(multichannel, image.ndim) _check_factor(downscale) # cast to float for consistent data type in pyramid @@ -272,32 +285,28 @@ # automatically determine sigma which covers > 99% of distribution sigma = 2 * downscale / 6.0 - layer = 0 - rows = image.shape[0] - cols = image.shape[1] + current_shape = image.shape - smoothed_image = _smooth(image, sigma, mode, cval) + smoothed_image = _smooth(image, sigma, mode, cval, multichannel) yield image - smoothed_image # build downsampled images until max_layer is reached or downscale process # does not change image size - while layer != max_layer: - layer += 1 + if max_layer == -1: + max_layer = int(np.ceil(math.log(np.max(current_shape), downscale))) - out_rows = math.ceil(rows / float(downscale)) - out_cols = math.ceil(cols / float(downscale)) + for layer in range(max_layer): - resized_image = resize(smoothed_image, (out_rows, out_cols), - order=order, mode=mode, cval=cval) - smoothed_image = _smooth(resized_image, sigma, mode, cval) - - prev_rows = rows - prev_cols = cols - rows = resized_image.shape[0] - cols = resized_image.shape[1] + out_shape = tuple( + [math.ceil(d / float(downscale)) for d in current_shape]) - # no change to previous pyramid layer - if prev_rows == rows and prev_cols == cols: - break + if multichannel: + out_shape = out_shape[:-1] + + resized_image = resize(smoothed_image, out_shape, order=order, + mode=mode, cval=cval, anti_aliasing=False) + smoothed_image = _smooth(resized_image, sigma, mode, cval, + multichannel) + current_shape = np.asarray(resized_image.shape) yield resized_image - smoothed_image diff -Nru skimage-0.13.1/skimage/transform/_radon_transform.pyx skimage-0.14.0/skimage/transform/_radon_transform.pyx --- skimage-0.13.1/skimage/transform/_radon_transform.pyx 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/_radon_transform.pyx 2018-05-29 01:27:44.000000000 +0000 @@ -39,7 +39,7 @@ cdef cnp.double_t t = ray_position - projection_center # s0 is the half-length of the ray's path in the reconstruction circle cdef cnp.double_t s0 - s0 = sqrt(radius**2 - t**2) if radius**2 >= t**2 else 0. + s0 = sqrt(radius * radius - t * t) if radius*radius >= t*t else 0. cdef Py_ssize_t Ns = 2 * (ceil(2 * s0)) # number of steps # along the ray cdef cnp.double_t ray_sum = 0. @@ -71,19 +71,19 @@ if i > 0 and j > 0: weight = (1. - di) * (1. - dj) * ds ray_sum += weight * image[i, j] - weight_norm += weight**2 + weight_norm += weight * weight if i > 0 and j < image.shape[1] - 1: weight = (1. - di) * dj * ds ray_sum += weight * image[i, j+1] - weight_norm += weight**2 + weight_norm += weight * weight if i < image.shape[0] - 1 and j > 0: weight = di * (1 - dj) * ds ray_sum += weight * image[i+1, j] - weight_norm += weight**2 + weight_norm += weight * weight if i < image.shape[0] - 1 and j < image.shape[1] - 1: weight = di * dj * ds ray_sum += weight * image[i+1, j+1] - weight_norm += weight**2 + weight_norm += weight * weight return ray_sum, weight_norm @@ -126,7 +126,7 @@ cdef cnp.double_t t = ray_position - projection_center # s0 is the half-length of the ray's path in the reconstruction circle cdef cnp.double_t s0 - s0 = sqrt(radius*radius - t*t) if radius**2 >= t**2 else 0. + s0 = sqrt(radius*radius - t*t) if radius*radius >= t*t else 0. cdef Py_ssize_t Ns = 2 * (ceil(2 * s0)) # beta for equiripple Hamming window cdef cnp.double_t hamming_beta = 0.46164 diff -Nru skimage-0.13.1/skimage/transform/seam_carving.py skimage-0.14.0/skimage/transform/seam_carving.py --- skimage-0.13.1/skimage/transform/seam_carving.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/seam_carving.py 2018-05-29 01:27:44.000000000 +0000 @@ -4,7 +4,7 @@ import numpy as np -def seam_carve(img, energy_map, mode, num, border=1, force_copy=True): +def seam_carve(image, energy_map, mode, num, border=1, force_copy=True): """ Carve vertical or horizontal seams off an image. Carves out vertical/horizontal seams from an image while using the given @@ -33,7 +33,7 @@ If set, the `image` and `energy_map` are copied before being used by the method which modifies it in place. Set this to `False` if the original image and the energy map are no longer needed after - this opetration. + this operation. Returns ------- @@ -47,20 +47,21 @@ http://www.cs.jhu.edu/~misha/ReadingSeminar/Papers/Avidan07.pdf """ - utils.assert_nD(img, (2, 3)) - image = util.img_as_float(img, force_copy) + utils.assert_nD(image, (2, 3)) + image = util.img_as_float(image, force_copy) energy_map = util.img_as_float(energy_map, force_copy) if image.ndim == 2: image = image[..., np.newaxis] if mode == 'horizontal': - image = np.transpose(image, (1, 0, 2)) + image = np.swapaxes(image, 0, 1) + energy_map = np.swapaxes(energy_map, 0, 1) image = np.ascontiguousarray(image) out = _seam_carve_v(image, energy_map, num, border) if mode == 'horizontal': - out = np.transpose(out, (1, 0, 2)) + out = np.swapaxes(out, 0, 1) return np.squeeze(out) diff -Nru skimage-0.13.1/skimage/transform/tests/test_finite_radon_transform.py skimage-0.14.0/skimage/transform/tests/test_finite_radon_transform.py --- skimage-0.13.1/skimage/transform/tests/test_finite_radon_transform.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/tests/test_finite_radon_transform.py 2018-05-29 01:27:44.000000000 +0000 @@ -16,8 +16,3 @@ f = frt2(L) fi = ifrt2(f) assert len(np.nonzero(L - fi)[0]) == 0 - -if __name__ == '__main__': - from numpy.testing import run_module_suite - run_module_suite() - diff -Nru skimage-0.13.1/skimage/transform/tests/test_geometric.py skimage-0.14.0/skimage/transform/tests/test_geometric.py --- skimage-0.13.1/skimage/transform/tests/test_geometric.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/tests/test_geometric.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,13 +1,13 @@ import numpy as np -from numpy.testing import (assert_equal, assert_almost_equal, - assert_raises) from skimage.transform._geometric import GeometricTransform from skimage.transform import (estimate_transform, matrix_transform, EuclideanTransform, SimilarityTransform, AffineTransform, FundamentalMatrixTransform, EssentialMatrixTransform, ProjectiveTransform, PolynomialTransform, PiecewiseAffineTransform) -from skimage._shared._warnings import expected_warnings + +from skimage._shared import testing +from skimage._shared.testing import assert_equal, assert_almost_equal SRC = np.array([ @@ -36,8 +36,8 @@ for tform in ('euclidean', 'similarity', 'affine', 'projective', 'polynomial'): estimate_transform(tform, SRC[:2, :], DST[:2, :]) - assert_raises(ValueError, estimate_transform, 'foobar', - SRC[:2, :], DST[:2, :]) + with testing.raises(ValueError): + estimate_transform('foobar', SRC[:2, :], DST[:2, :]) def test_matrix_transform(): @@ -138,7 +138,6 @@ assert_almost_equal(tform.rotation, rotation) assert_almost_equal(tform.translation, translation) - # test special case for scale if rotation=90deg scale = 0.1 rotation = np.pi / 2 @@ -238,7 +237,8 @@ tform = FundamentalMatrixTransform() tform.params = essential_matrix_tform.params src = np.array([[0, 0], [0, 1], [1, 1]]) - assert_almost_equal(tform.inverse(src), [[0, 1, 0], [0, 1, -1], [0, 1, -1]]) + assert_almost_equal(tform.inverse(src), + [[0, 1, 0], [0, 1, -1], [0, 1, -1]]) def test_essential_matrix_init(): @@ -278,7 +278,8 @@ tform = EssentialMatrixTransform(rotation=np.eye(3), translation=np.array([1, 0, 0])) src = np.array([[0, 0], [0, 1], [1, 1]]) - assert_almost_equal(tform.inverse(src), [[0, 1, 0], [0, 1, -1], [0, 1, -1]]) + assert_almost_equal(tform.inverse(src), + [[0, 1, 0], [0, 1, -1], [0, 1, -1]]) def test_essential_matrix_residuals(): @@ -336,7 +337,8 @@ def test_polynomial_inverse(): - assert_raises(Exception, PolynomialTransform().inverse, 0) + with testing.raises(Exception): + PolynomialTransform().inverse(0) def test_union(): @@ -356,51 +358,70 @@ tform = AffineTransform(scale=(0.1, 0.1), rotation=0.3) assert_almost_equal((tform + tform.inverse).params, np.eye(3)) + tform1 = SimilarityTransform(scale=0.1, rotation=0.3) + tform2 = SimilarityTransform(scale=0.1, rotation=0.9) + tform3 = SimilarityTransform(scale=0.1 * 1/0.1, rotation=0.3 - 0.9) + tform = tform1 + tform2.inverse + assert_almost_equal(tform.params, tform3.params) + def test_union_differing_types(): tform1 = SimilarityTransform() tform2 = PolynomialTransform() - assert_raises(TypeError, tform1.__add__, tform2) + with testing.raises(TypeError): + tform1.__add__(tform2) def test_geometric_tform(): tform = GeometricTransform() - assert_raises(NotImplementedError, tform, 0) - assert_raises(NotImplementedError, tform.inverse, 0) - assert_raises(NotImplementedError, tform.__add__, 0) + with testing.raises(NotImplementedError): + tform(0) + with testing.raises(NotImplementedError): + tform.inverse(0) + with testing.raises(NotImplementedError): + tform.__add__(0) def test_invalid_input(): - assert_raises(ValueError, ProjectiveTransform, np.zeros((2, 3))) - assert_raises(ValueError, AffineTransform, np.zeros((2, 3))) - assert_raises(ValueError, SimilarityTransform, np.zeros((2, 3))) - assert_raises(ValueError, EuclideanTransform, np.zeros((2, 3))) - - assert_raises(ValueError, AffineTransform, - matrix=np.zeros((2, 3)), scale=1) - assert_raises(ValueError, SimilarityTransform, - matrix=np.zeros((2, 3)), scale=1) - assert_raises(ValueError, EuclideanTransform, - matrix=np.zeros((2, 3)), translation=(0, 0)) - - assert_raises(ValueError, PolynomialTransform, np.zeros((3, 3))) - - assert_raises(ValueError, FundamentalMatrixTransform, - matrix=np.zeros((3, 2))) - assert_raises(ValueError, EssentialMatrixTransform, - matrix=np.zeros((3, 2))) - assert_raises(ValueError, EssentialMatrixTransform, - rotation=np.zeros((3, 2))) - assert_raises(ValueError, EssentialMatrixTransform, - rotation=np.zeros((3, 3))) - assert_raises(ValueError, EssentialMatrixTransform, - rotation=np.eye(3)) - assert_raises(ValueError, EssentialMatrixTransform, - rotation=np.eye(3), translation=np.zeros((2,))) - assert_raises(ValueError, EssentialMatrixTransform, - rotation=np.eye(3), translation=np.zeros((2,))) - assert_raises(ValueError, EssentialMatrixTransform, - rotation=np.eye(3), translation=np.zeros((3,))) + with testing.raises(ValueError): + ProjectiveTransform(np.zeros((2, 3))) + with testing.raises(ValueError): + AffineTransform(np.zeros((2, 3))) + with testing.raises(ValueError): + SimilarityTransform(np.zeros((2, 3))) + with testing.raises(ValueError): + EuclideanTransform(np.zeros((2, 3))) + with testing.raises(ValueError): + AffineTransform(matrix=np.zeros((2, 3)), scale=1) + with testing.raises(ValueError): + SimilarityTransform(matrix=np.zeros((2, 3)), scale=1) + with testing.raises(ValueError): + EuclideanTransform( + matrix=np.zeros((2, 3)), translation=(0, 0)) + with testing.raises(ValueError): + PolynomialTransform(np.zeros((3, 3))) + with testing.raises(ValueError): + FundamentalMatrixTransform(matrix=np.zeros((3, 2))) + with testing.raises(ValueError): + EssentialMatrixTransform(matrix=np.zeros((3, 2))) + + with testing.raises(ValueError): + EssentialMatrixTransform(rotation=np.zeros((3, 2))) + with testing.raises(ValueError): + EssentialMatrixTransform( + rotation=np.zeros((3, 3))) + with testing.raises(ValueError): + EssentialMatrixTransform( + rotation=np.eye(3)) + with testing.raises(ValueError): + EssentialMatrixTransform(rotation=np.eye(3), + translation=np.zeros((2,))) + with testing.raises(ValueError): + EssentialMatrixTransform(rotation=np.eye(3), + translation=np.zeros((2,))) + with testing.raises(ValueError): + EssentialMatrixTransform( + rotation=np.eye(3), translation=np.zeros((3,))) def test_degenerate(): @@ -417,8 +438,3 @@ tform = ProjectiveTransform() tform.estimate(src, dst) assert np.all(np.isnan(tform.params)) - - -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() diff -Nru skimage-0.13.1/skimage/transform/tests/test_hough_transform.py skimage-0.14.0/skimage/transform/tests/test_hough_transform.py --- skimage-0.13.1/skimage/transform/tests/test_hough_transform.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/tests/test_hough_transform.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,10 +1,13 @@ import numpy as np -from numpy.testing import assert_almost_equal, assert_equal, assert_raises -import skimage.transform as tf +from skimage import transform +from skimage import data +from skimage.feature import canny from skimage.draw import line, circle_perimeter, ellipse_perimeter -from skimage._shared._warnings import expected_warnings -from skimage._shared.testing import test_parallel + +from skimage._shared import testing +from skimage._shared.testing import (assert_almost_equal, assert_equal, + test_parallel) @test_parallel() @@ -14,7 +17,7 @@ rr, cc = line(60, 130, 80, 10) img[rr, cc] = 1 - out, angles, d = tf.hough_line(img) + out, angles, d = transform.hough_line(img) y, x = np.where(out == out.max()) dist = d[y[0]] @@ -28,7 +31,7 @@ img = np.zeros((10, 10)) img[0, 0] = 1 - out, angles, d = tf.hough_line(img, np.linspace(0, 360, 10)) + out, angles, d = transform.hough_line(img, np.linspace(0, 360, 10)) assert_equal(len(angles), 10) @@ -38,7 +41,8 @@ img[10] = 1 # Expected error, img must be 2D - assert_raises(ValueError, tf.hough_line, img, np.linspace(0, 360, 10)) + with testing.raises(ValueError): + transform.hough_line(img, np.linspace(0, 360, 10)) def test_probabilistic_hough(): @@ -51,8 +55,8 @@ # decrease default theta sampling because similar orientations may confuse # as mentioned in article of Galambos et al theta = np.linspace(0, np.pi, 45) - lines = tf.probabilistic_hough_line(img, threshold=10, line_length=10, - line_gap=1, theta=theta) + lines = transform.probabilistic_hough_line( + img, threshold=10, line_length=10, line_gap=1, theta=theta) # sort the lines according to the x-axis sorted_lines = [] for line in lines: @@ -64,7 +68,18 @@ assert([(25, 25), (74, 74)] in sorted_lines) # Execute with default theta - tf.probabilistic_hough_line(img, line_length=10, line_gap=3) + transform.probabilistic_hough_line(img, line_length=10, line_gap=3) + + +def test_probabilistic_hough_seed(): + # Load image that is likely to give a randomly varying number of lines + image = data.checkerboard() + + # Use constant seed to ensure a deterministic output + lines = transform.probabilistic_hough_line(image, threshold=50, + line_length=50, line_gap=1, + seed=1234) + assert len(lines) == 65 def test_probabilistic_hough_bad_input(): @@ -72,7 +87,8 @@ img[10] = 1 # Expected error, img must be 2D - assert_raises(ValueError, tf.probabilistic_hough_line, img) + with testing.raises(ValueError): + transform.probabilistic_hough_line(img) def test_hough_line_peaks(): @@ -80,9 +96,9 @@ rr, cc = line(60, 130, 80, 10) img[rr, cc] = 1 - out, angles, d = tf.hough_line(img) + out, angles, d = transform.hough_line(img) - out, theta, dist = tf.hough_line_peaks(out, angles, d) + out, theta, dist = transform.hough_line_peaks(out, angles, d) assert_equal(len(dist), 1) assert_almost_equal(dist[0], 80.723, 1) @@ -98,9 +114,9 @@ testim[15:35, 50] = True testim[1:-1, 58] = True - hough_space, angles, dists = tf.hough_line(testim) + hough_space, angles, dists = transform.hough_line(testim) - hspace, _, _ = tf.hough_line_peaks(hough_space, angles, dists) + hspace, _, _ = transform.hough_line_peaks(hough_space, angles, dists) assert hspace[0] > hspace[1] @@ -108,11 +124,11 @@ img = np.zeros((100, 100), dtype=np.bool_) img[:, 30] = True img[:, 40] = True - hspace, angles, dists = tf.hough_line(img) - assert len(tf.hough_line_peaks(hspace, angles, dists, - min_distance=5)[0]) == 2 - assert len(tf.hough_line_peaks(hspace, angles, dists, - min_distance=15)[0]) == 1 + hspace, angles, dists = transform.hough_line(img) + assert len(transform.hough_line_peaks(hspace, angles, dists, + min_distance=5)[0]) == 2 + assert len(transform.hough_line_peaks(hspace, angles, dists, + min_distance=15)[0]) == 1 def test_hough_line_peaks_angle(): @@ -124,36 +140,46 @@ img[:, 0] = True img[0, :] = True - hspace, angles, dists = tf.hough_line(img) - assert len(tf.hough_line_peaks(hspace, angles, dists, - min_angle=45)[0]) == 2 - assert len(tf.hough_line_peaks(hspace, angles, dists, - min_angle=90)[0]) == 1 + hspace, angles, dists = transform.hough_line(img) + assert len(transform.hough_line_peaks(hspace, angles, dists, + min_angle=45)[0]) == 2 + assert len(transform.hough_line_peaks(hspace, angles, dists, + min_angle=90)[0]) == 1 theta = np.linspace(0, np.pi, 100) - hspace, angles, dists = tf.hough_line(img, theta) - assert len(tf.hough_line_peaks(hspace, angles, dists, - min_angle=45)[0]) == 2 - assert len(tf.hough_line_peaks(hspace, angles, dists, - min_angle=90)[0]) == 1 + hspace, angles, dists = transform.hough_line(img, theta) + assert len(transform.hough_line_peaks(hspace, angles, dists, + min_angle=45)[0]) == 2 + assert len(transform.hough_line_peaks(hspace, angles, dists, + min_angle=90)[0]) == 1 theta = np.linspace(np.pi / 3, 4. / 3 * np.pi, 100) - hspace, angles, dists = tf.hough_line(img, theta) - assert len(tf.hough_line_peaks(hspace, angles, dists, - min_angle=45)[0]) == 2 - assert len(tf.hough_line_peaks(hspace, angles, dists, - min_angle=90)[0]) == 1 + hspace, angles, dists = transform.hough_line(img, theta) + assert len(transform.hough_line_peaks(hspace, angles, dists, + min_angle=45)[0]) == 2 + assert len(transform.hough_line_peaks(hspace, angles, dists, + min_angle=90)[0]) == 1 def test_hough_line_peaks_num(): img = np.zeros((100, 100), dtype=np.bool_) img[:, 30] = True img[:, 40] = True - hspace, angles, dists = tf.hough_line(img) - assert len(tf.hough_line_peaks(hspace, angles, dists, min_distance=0, - min_angle=0, num_peaks=1)[0]) == 1 + hspace, angles, dists = transform.hough_line(img) + assert len(transform.hough_line_peaks(hspace, angles, dists, + min_distance=0, min_angle=0, + num_peaks=1)[0]) == 1 +def test_hough_line_peaks_zero_input(): + # Test to make sure empty input doesn't cause a failure + img = np.zeros((100, 100), dtype='uint8') + theta = np.linspace(0, np.pi, 100) + hspace, angles, dists = transform.hough_line(img, theta) + h, a, d = transform.hough_line_peaks(hspace, angles, dists) + assert_equal(a, np.array([])) + + @test_parallel() def test_hough_circle(): # Prepare picture @@ -163,10 +189,10 @@ y, x = circle_perimeter(y_0, x_0, radius) img[x, y] = 1 - out1 = tf.hough_circle(img, radius) - out2 = tf.hough_circle(img, [radius]) + out1 = transform.hough_circle(img, radius) + out2 = transform.hough_circle(img, [radius]) assert_equal(out1, out2) - out = tf.hough_circle(img, np.array([radius], dtype=np.intp)) + out = transform.hough_circle(img, np.array([radius], dtype=np.intp)) assert_equal(out, out1) x, y = np.where(out[0] == out[0].max()) assert_equal(x[0], x_0) @@ -182,8 +208,8 @@ y, x = circle_perimeter(y_0, x_0, radius) img[x[np.where(x > 0)], y[np.where(x > 0)]] = 1 - out = tf.hough_circle(img, np.array([radius], dtype=np.intp), - full_output=True) + out = transform.hough_circle(img, np.array([radius], dtype=np.intp), + full_output=True) x, y = np.where(out[0] == out[0].max()) # Offset for x_0, y_0 @@ -202,10 +228,12 @@ img[x, y] = 1 radii = [rad_0, rad_1] - hspaces = tf.hough_circle(img, radii) - out = tf.hough_circle_peaks(hspaces, radii, min_xdistance=1, min_ydistance=1, - threshold=None, num_peaks=np.inf, total_num_peaks=np.inf) - s = np.argsort(out[3]) # sort by radii + hspaces = transform.hough_circle(img, radii) + out = transform.hough_circle_peaks(hspaces, radii, min_xdistance=1, + min_ydistance=1, threshold=None, + num_peaks=np.inf, + total_num_peaks=np.inf) + s = np.argsort(out[3]) # sort by radii assert_equal(out[1][s], np.array([y_0, y_1])) assert_equal(out[2][s], np.array([x_0, x_1])) assert_equal(out[3][s], np.array([rad_0, rad_1])) @@ -223,12 +251,13 @@ img[x, y] = 1 radii = [rad_0, rad_1] - hspaces = tf.hough_circle(img, radii) - out = tf.hough_circle_peaks(hspaces, radii, min_xdistance=1, min_ydistance=1, - threshold=None, num_peaks=np.inf, total_num_peaks=1) - assert_equal(out[1][0], np.array([y_1,])) - assert_equal(out[2][0], np.array([x_1,])) - assert_equal(out[3][0], np.array([rad_1,])) + hspaces = transform.hough_circle(img, radii) + out = transform.hough_circle_peaks(hspaces, radii, min_xdistance=1, + min_ydistance=1, threshold=None, + num_peaks=np.inf, total_num_peaks=1) + assert_equal(out[1][0], np.array([y_1, ])) + assert_equal(out[2][0], np.array([x_1, ])) + assert_equal(out[3][0], np.array([rad_1, ])) def test_hough_ellipse_zero_angle(): @@ -240,7 +269,7 @@ angle = 0 rr, cc = ellipse_perimeter(y0, x0, ry, rx) img[rr, cc] = 1 - result = tf.hough_ellipse(img, threshold=9) + result = transform.hough_ellipse(img, threshold=9) best = result[-1] assert_equal(best[1], y0) assert_equal(best[2], x0) @@ -265,7 +294,7 @@ angle = np.pi / 1.35 rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle) img[rr, cc] = 1 - result = tf.hough_ellipse(img, threshold=15, accuracy=3) + result = transform.hough_ellipse(img, threshold=15, accuracy=3) result.sort(order='accumulator') best = result[-1] assert_almost_equal(best[1] / 100., y0 / 100., decimal=1) @@ -291,7 +320,7 @@ angle = np.pi / 1.35 rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle) img[rr, cc] = 1 - result = tf.hough_ellipse(img, threshold=15, accuracy=3) + result = transform.hough_ellipse(img, threshold=15, accuracy=3) result.sort(order='accumulator') best = result[-1] assert_almost_equal(best[1] / 100., y0 / 100., decimal=1) @@ -317,7 +346,7 @@ angle = np.pi / 1.35 + np.pi / 2. rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle) img[rr, cc] = 1 - result = tf.hough_ellipse(img, threshold=15, accuracy=3) + result = transform.hough_ellipse(img, threshold=15, accuracy=3) result.sort(order='accumulator') best = result[-1] # Check if I re-draw the ellipse, points are the same! @@ -338,7 +367,7 @@ angle = np.pi / 1.35 + np.pi rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle) img[rr, cc] = 1 - result = tf.hough_ellipse(img, threshold=15, accuracy=3) + result = transform.hough_ellipse(img, threshold=15, accuracy=3) result.sort(order='accumulator') best = result[-1] # Check if I re-draw the ellipse, points are the same! @@ -359,7 +388,7 @@ angle = - np.pi / 1.35 rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle) img[rr, cc] = 1 - result = tf.hough_ellipse(img, threshold=15, accuracy=3) + result = transform.hough_ellipse(img, threshold=15, accuracy=3) result.sort(order='accumulator') best = result[-1] # Check if I re-draw the ellipse, points are the same! @@ -380,7 +409,7 @@ angle = - np.pi / 1.35 rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle) img[rr, cc] = 1 - result = tf.hough_ellipse(img, threshold=15, accuracy=3) + result = transform.hough_ellipse(img, threshold=15, accuracy=3) result.sort(order='accumulator') best = result[-1] # Check if I re-draw the ellipse, points are the same! @@ -401,7 +430,7 @@ angle = - np.pi / 1.35 - np.pi / 2. rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle) img[rr, cc] = 1 - result = tf.hough_ellipse(img, threshold=15, accuracy=3) + result = transform.hough_ellipse(img, threshold=15, accuracy=3) result.sort(order='accumulator') best = result[-1] # Check if I re-draw the ellipse, points are the same! @@ -422,7 +451,7 @@ angle = - np.pi / 1.35 - np.pi rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle) img[rr, cc] = 1 - result = tf.hough_ellipse(img, threshold=15, accuracy=3) + result = transform.hough_ellipse(img, threshold=15, accuracy=3) result.sort(order='accumulator') best = result[-1] # Check if I re-draw the ellipse, points are the same! @@ -433,5 +462,5 @@ assert_equal(cc, cc2) -if __name__ == "__main__": - np.testing.run_module_suite() +def test_hough_ellipse_all_black_img(): + assert(transform.hough_ellipse(np.zeros((100, 100))).shape == (0, 6)) diff -Nru skimage-0.13.1/skimage/transform/tests/test_integral.py skimage-0.14.0/skimage/transform/tests/test_integral.py --- skimage-0.13.1/skimage/transform/tests/test_integral.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/tests/test_integral.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,8 +1,9 @@ import numpy as np -from numpy.testing import assert_equal - from skimage.transform import integral_image, integrate +from skimage._shared.testing import assert_equal + + np.random.seed(0) x = (np.random.rand(50, 50) * 255).astype(np.uint8) s = integral_image(x) @@ -38,15 +39,9 @@ x[:20, :20].sum(), x[:20, 10:20].sum(), x[10:20, :20].sum(), - x[0,0], + x[0, 0], x[10, 10], x[30:, 31:].sum()]) start_pts = [(r0[i], c0[i]) for i in range(len(r0))] end_pts = [(r1[i], c1[i]) for i in range(len(r0))] - assert_equal(expected, integrate(s, r0, c0, r1, c1)) # test deprecated assert_equal(expected, integrate(s, start_pts, end_pts)) - - -if __name__ == '__main__': - from numpy.testing import run_module_suite - run_module_suite() diff -Nru skimage-0.13.1/skimage/transform/tests/test_pyramids.py skimage-0.14.0/skimage/transform/tests/test_pyramids.py --- skimage-0.13.1/skimage/transform/tests/test_pyramids.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/tests/test_pyramids.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,7 +1,12 @@ -from numpy.testing import assert_array_equal, assert_raises, run_module_suite +import math +import numpy as np from skimage import data from skimage.transform import pyramids +from skimage._shared import testing +from skimage._shared.testing import assert_array_equal, assert_, assert_equal +from skimage._shared._warnings import expected_warnings + image = data.astronaut() image_gray = image[..., 0] @@ -9,64 +14,120 @@ def test_pyramid_reduce_rgb(): rows, cols, dim = image.shape - out = pyramids.pyramid_reduce(image, downscale=2) + with expected_warnings(['The default multichannel']): + out = pyramids.pyramid_reduce(image, downscale=2) assert_array_equal(out.shape, (rows / 2, cols / 2, dim)) def test_pyramid_reduce_gray(): rows, cols = image_gray.shape - out = pyramids.pyramid_reduce(image_gray, downscale=2) + with expected_warnings(['The default multichannel']): + out = pyramids.pyramid_reduce(image_gray, downscale=2) assert_array_equal(out.shape, (rows / 2, cols / 2)) +def test_pyramid_reduce_nd(): + for ndim in [1, 2, 3, 4]: + img = np.random.randn(*((8, ) * ndim)) + out = pyramids.pyramid_reduce(img, downscale=2, + multichannel=False) + expected_shape = np.asarray(img.shape) / 2 + assert_array_equal(out.shape, expected_shape) + + def test_pyramid_expand_rgb(): rows, cols, dim = image.shape - out = pyramids.pyramid_expand(image, upscale=2) + with expected_warnings(['The default multichannel']): + out = pyramids.pyramid_expand(image, upscale=2) assert_array_equal(out.shape, (rows * 2, cols * 2, dim)) def test_pyramid_expand_gray(): rows, cols = image_gray.shape - out = pyramids.pyramid_expand(image_gray, upscale=2) + with expected_warnings(['The default multichannel']): + out = pyramids.pyramid_expand(image_gray, upscale=2) assert_array_equal(out.shape, (rows * 2, cols * 2)) +def test_pyramid_expand_nd(): + for ndim in [1, 2, 3, 4]: + img = np.random.randn(*((4, ) * ndim)) + out = pyramids.pyramid_expand(img, upscale=2, + multichannel=False) + expected_shape = np.asarray(img.shape) * 2 + assert_array_equal(out.shape, expected_shape) + + def test_build_gaussian_pyramid_rgb(): rows, cols, dim = image.shape - pyramid = pyramids.pyramid_gaussian(image, downscale=2) - for layer, out in enumerate(pyramid): - layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim) - assert_array_equal(out.shape, layer_shape) + with expected_warnings(['The default multichannel']): + pyramid = pyramids.pyramid_gaussian(image, downscale=2) + for layer, out in enumerate(pyramid): + layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim) + assert_array_equal(out.shape, layer_shape) def test_build_gaussian_pyramid_gray(): rows, cols = image_gray.shape - pyramid = pyramids.pyramid_gaussian(image_gray, downscale=2) - for layer, out in enumerate(pyramid): - layer_shape = (rows / 2 ** layer, cols / 2 ** layer) - assert_array_equal(out.shape, layer_shape) + with expected_warnings(['The default multichannel']): + pyramid = pyramids.pyramid_gaussian(image_gray, downscale=2) + for layer, out in enumerate(pyramid): + layer_shape = (rows / 2 ** layer, cols / 2 ** layer) + assert_array_equal(out.shape, layer_shape) + + +def test_build_gaussian_pyramid_nd(): + for ndim in [1, 2, 3, 4]: + img = np.random.randn(*((8, ) * ndim)) + original_shape = np.asarray(img.shape) + pyramid = pyramids.pyramid_gaussian(img, downscale=2, + multichannel=False) + for layer, out in enumerate(pyramid): + layer_shape = original_shape / 2 ** layer + assert_array_equal(out.shape, layer_shape) def test_build_laplacian_pyramid_rgb(): rows, cols, dim = image.shape - pyramid = pyramids.pyramid_laplacian(image, downscale=2) - for layer, out in enumerate(pyramid): - layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim) - assert_array_equal(out.shape, layer_shape) + with expected_warnings(['The default multichannel']): + pyramid = pyramids.pyramid_laplacian(image, downscale=2) + for layer, out in enumerate(pyramid): + layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim) + assert_array_equal(out.shape, layer_shape) + + +def test_build_laplacian_pyramid_nd(): + for ndim in [1, 2, 3, 4]: + img = np.random.randn(*(16, )*ndim) + original_shape = np.asarray(img.shape) + pyramid = pyramids.pyramid_laplacian(img, downscale=2, + multichannel=False) + for layer, out in enumerate(pyramid): + print(out.shape) + layer_shape = original_shape / 2 ** layer + assert_array_equal(out.shape, layer_shape) + + +def test_laplacian_pyramid_max_layers(): + for downscale in [2, 3, 5, 7]: + img = np.random.randn(32, 8) + pyramid = pyramids.pyramid_laplacian(img, downscale=downscale, + multichannel=False) + max_layer = int(np.ceil(math.log(np.max(img.shape), downscale))) + for layer, out in enumerate(pyramid): + if layer < max_layer: + # should not reach all axes as size 1 prior to final level + assert_(np.max(out.shape) > 1) + # total number of images is max_layer + 1 + assert_equal(max_layer, layer) -def test_build_laplacian_pyramid_gray(): - rows, cols = image_gray.shape - pyramid = pyramids.pyramid_laplacian(image_gray, downscale=2) - for layer, out in enumerate(pyramid): - layer_shape = (rows / 2 ** layer, cols / 2 ** layer) - assert_array_equal(out.shape, layer_shape) + # final layer should be size 1 on all axes + assert_array_equal((out.shape), (1, 1)) def test_check_factor(): - assert_raises(ValueError, pyramids._check_factor, 0.99) - assert_raises(ValueError, pyramids._check_factor, - 2) - - -if __name__ == "__main__": - run_module_suite() + with testing.raises(ValueError): + pyramids._check_factor(0.99) + with testing.raises(ValueError): + pyramids._check_factor(- 2) diff -Nru skimage-0.13.1/skimage/transform/tests/test_radon_transform.py skimage-0.14.0/skimage/transform/tests/test_radon_transform.py --- skimage-0.13.1/skimage/transform/tests/test_radon_transform.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/tests/test_radon_transform.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,19 +1,22 @@ from __future__ import print_function, division -import numpy as np -from numpy.testing import assert_raises +import os import itertools -import os.path -from skimage.transform import radon, iradon, iradon_sart, rescale -from skimage.io import imread +import numpy as np from skimage import data_dir +from skimage.io import imread +from skimage.transform import radon, iradon, iradon_sart, rescale + +from skimage._shared import testing from skimage._shared.testing import test_parallel from skimage._shared._warnings import expected_warnings + PHANTOM = imread(os.path.join(data_dir, "phantom.png"), - as_grey=True)[::2, ::2] -PHANTOM = rescale(PHANTOM, 0.5, order=1, mode='reflect') + as_gray=True)[::2, ::2] +PHANTOM = rescale(PHANTOM, 0.5, order=1, + mode='constant', anti_aliasing=False, multichannel=False) def _debug_plot(original, result, sinogram=None): @@ -57,14 +60,23 @@ assert np.std(sinogram_max) < 1e-6 -def test_radon_center(): - shapes = [(16, 16), (17, 17)] - circles = [False, True] - for shape, circle in itertools.product(shapes, circles): - yield check_radon_center, shape, circle - rectangular_shapes = [(32, 16), (33, 17)] - for shape in rectangular_shapes: - yield check_radon_center, shape, False +shapes_for_test_radon_center = [(16, 16), (17, 17)] +circles_for_test_radon_center = [False, True] + + +@testing.parametrize("shape, circle", + itertools.product(shapes_for_test_radon_center, + circles_for_test_radon_center)) +def test_radon_center(shape, circle): + check_radon_center(shape, circle) + + +rectangular_shapes = [(32, 16), (33, 17)] + + +@testing.parametrize("shape", rectangular_shapes) +def test_radon_center_rectangular(shape): + check_radon_center(shape, False) def check_iradon_center(size, theta, circle): @@ -105,19 +117,24 @@ assert np.allclose(reconstruction, reconstruction_opposite) -def test_iradon_center(): - sizes = [16, 17] - thetas = [0, 90] - circles = [False, True] - for size, theta, circle in itertools.product(sizes, thetas, circles): - yield check_iradon_center, size, theta, circle +sizes_for_test_iradon_center = [16, 17] +thetas_for_test_iradon_center = [0, 90] +circles_for_test_iradon_center = [False, True] + + +@testing.parametrize("size, theta, circle", + itertools.product(sizes_for_test_iradon_center, + thetas_for_test_iradon_center, + circles_for_test_radon_center)) +def test_iradon_center(size, theta, circle): + check_iradon_center(size, theta, circle) def check_radon_iradon(interpolation_type, filter_type): debug = False image = PHANTOM reconstructed = iradon(radon(image, circle=False), filter=filter_type, - interpolation=interpolation_type) + interpolation=interpolation_type, circle=False) delta = np.mean(np.abs(image - reconstructed)) print('\n\tmean error:', delta) if debug: @@ -132,14 +149,18 @@ assert delta < allowed_delta -def test_radon_iradon(): - filter_types = ["ramp", "shepp-logan", "cosine", "hamming", "hann"] - interpolation_types = ['linear', 'nearest'] - for interpolation_type, filter_type in \ - itertools.product(interpolation_types, filter_types): - yield check_radon_iradon, interpolation_type, filter_type - # cubic interpolation is slow; only run one test for it - yield check_radon_iradon, 'cubic', 'shepp-logan' +filter_types = ["ramp", "shepp-logan", "cosine", "hamming", "hann"] +interpolation_types = ['linear', 'nearest'] +radon_iradon_inputs = list(itertools.product(interpolation_types, + filter_types)) +# cubic interpolation is slow; only run one test for it +radon_iradon_inputs.append(('cubic', 'shepp-logan')) + + +@testing.parametrize("interpolation_type, filter_type", + radon_iradon_inputs) +def test_radon_iradon(interpolation_type, filter_type): + check_radon_iradon(interpolation_type, filter_type) def test_iradon_angles(): @@ -154,7 +175,8 @@ theta = np.linspace(0, 180, nb_angles, endpoint=False) radon_image_200 = radon(image, theta=theta, circle=False) reconstructed = iradon(radon_image_200, circle=False) - delta_200 = np.mean(abs(_rescale_intensity(image) - _rescale_intensity(reconstructed))) + delta_200 = np.mean(abs(_rescale_intensity(image) - + _rescale_intensity(reconstructed))) assert delta_200 < 0.03 # Lower number of projections nb_angles = 80 @@ -184,21 +206,35 @@ == np.unravel_index(np.argmax(image), image.shape)) -def test_radon_iradon_minimal(): - shapes = [(3, 3), (4, 4), (5, 5)] - for shape in shapes: +shapes = [(3, 3), (4, 4), (5, 5)] + + +def generate_test_data_for_radon_iradon_minimal(shapes): + def shape2coordinates(shape): c0, c1 = shape[0] // 2, shape[1] // 2 coordinates = itertools.product((c0 - 1, c0, c0 + 1), (c1 - 1, c1, c1 + 1)) - for coordinate in coordinates: - yield check_radon_iradon_minimal, shape, coordinate + return coordinates + + def shape2shapeandcoordinates(shape): + return itertools.product([shape], shape2coordinates(shape)) + + return itertools.chain.from_iterable([shape2shapeandcoordinates(shape) + for shape in shapes]) + + +@testing.parametrize("shape, coordinate", + generate_test_data_for_radon_iradon_minimal(shapes)) +def test_radon_iradon_minimal(shape, coordinate): + check_radon_iradon_minimal(shape, coordinate) def test_reconstruct_with_wrong_angles(): a = np.zeros((3, 3)) p = radon(a, theta=[0, 1, 2], circle=False) iradon(p, theta=[0, 1, 2], circle=False) - assert_raises(ValueError, iradon, p, theta=[0, 1, 2, 3]) + with testing.raises(ValueError): + iradon(p, theta=[0, 1, 2, 3]) def _random_circle(shape): @@ -243,7 +279,10 @@ image = _random_circle((size, size)) theta = np.linspace(0., 180., size, False) sinogram_circle = radon(image, theta, circle=True) - argmax_shape = lambda a: np.unravel_index(np.argmax(a), a.shape) + + def argmax_shape(a): + return np.unravel_index(np.argmax(a), a.shape) + print('\n\targmax of circle:', argmax_shape(sinogram_circle)) sinogram_square = radon(image, theta, circle=False) print('\targmax of square:', argmax_shape(sinogram_square)) @@ -252,13 +291,13 @@ argmax_shape(sinogram_circle_to_square)) error = abs(sinogram_square - sinogram_circle_to_square) print(np.mean(error), np.max(error)) - assert (argmax_shape(sinogram_square) - == argmax_shape(sinogram_circle_to_square)) + assert (argmax_shape(sinogram_square) == + argmax_shape(sinogram_circle_to_square)) -def test_sinogram_circle_to_square(): - for size in (50, 51): - yield check_sinogram_circle_to_square, size +@testing.parametrize("size", (50, 51)) +def test_sinogram_circle_to_square(size): + check_sinogram_circle_to_square(size) def check_radon_iradon_circle(interpolation, shape, output_size): @@ -289,13 +328,20 @@ np.allclose(reconstruction_rectangle, reconstruction_circle) -def test_radon_iradon_circle(): - shape = (61, 79) - interpolations = ('nearest', 'linear') - output_sizes = (None, min(shape), max(shape), 97) - for interpolation, output_size in itertools.product(interpolations, - output_sizes): - yield check_radon_iradon_circle, interpolation, shape, output_size +# if adding more shapes to test data, you might want to look at commit d0f2bac3f +shapes_radon_iradon_circle = ((61, 79), ) +interpolations = ('nearest', 'linear') +output_sizes = (None, + min(shapes_radon_iradon_circle[0]), + max(shapes_radon_iradon_circle[0]), + 97) + + +@testing.parametrize("shape, interpolation, output_size", + itertools.product(shapes_radon_iradon_circle, + interpolations, output_sizes)) +def test_radon_iradon_circle(shape, interpolation, output_size): + check_radon_iradon_circle(interpolation, shape, output_size) def test_order_angles_golden_ratio(): @@ -315,7 +361,8 @@ def test_iradon_sart(): debug = False - image = rescale(PHANTOM, 0.8, mode='reflect') + image = rescale(PHANTOM, 0.8, mode='reflect', + multichannel=False, anti_aliasing=False) theta_ordered = np.linspace(0., 180., image.shape[0], endpoint=False) theta_missing_wedge = np.linspace(0., 150., image.shape[0], endpoint=True) for theta, error_factor in ((theta_ordered, 1.), @@ -372,8 +419,3 @@ delta = np.mean(np.abs(reconstructed - image)) print('delta (1 iteration, shifted sinogram) =', delta) assert delta < 0.022 * error_factor - - -if __name__ == "__main__": - from numpy.testing import run_module_suite - run_module_suite() diff -Nru skimage-0.13.1/skimage/transform/tests/test_seam_carving.py skimage-0.14.0/skimage/transform/tests/test_seam_carving.py --- skimage-0.13.1/skimage/transform/tests/test_seam_carving.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/tests/test_seam_carving.py 2018-05-29 01:27:44.000000000 +0000 @@ -7,16 +7,25 @@ img = np.array([[0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0, 1, 0, 0], - [0, 1, 0, 0, 0], - [1, 0, 0, 0, 0]], dtype=np.float) + [0, 1, 0, 0, 1], + [1, 0, 0, 1, 0]], dtype=np.float) + + expected = np.array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 1], + [0, 0, 1, 0]], dtype=np.float) + energy = 1 - img out = transform.seam_carve(img, energy, 'vertical', 1, border=0) - testing.assert_allclose(out, 0) + testing.assert_equal(out, expected) img = img.T + energy = energy.T + out = transform.seam_carve(img, energy, 'horizontal', 1, border=0) - testing.assert_allclose(out, 0) + testing.assert_equal(out, expected.T) if __name__ == '__main__': diff -Nru skimage-0.13.1/skimage/transform/tests/test_warps.py skimage-0.14.0/skimage/transform/tests/test_warps.py --- skimage-0.13.1/skimage/transform/tests/test_warps.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/tests/test_warps.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,5 +1,3 @@ -from numpy.testing import (assert_almost_equal, run_module_suite, - assert_equal, assert_raises) import numpy as np from scipy.ndimage import map_coordinates @@ -11,8 +9,11 @@ downscale_local_mean) from skimage import transform as tf, data, img_as_float from skimage.color import rgb2gray + +from skimage._shared import testing +from skimage._shared.testing import (assert_almost_equal, assert_equal, + test_parallel) from skimage._shared._warnings import expected_warnings -from skimage._shared.testing import test_parallel np.random.seed(0) @@ -46,7 +47,8 @@ refx = np.zeros((5, 5), dtype=np.double) refx[1, 1] = 1 - shift = lambda xy: xy + 1 + def shift(xy): + return xy + 1 outx = warp(x, shift, order=1) assert_almost_equal(outx, refx) @@ -91,12 +93,12 @@ x = np.zeros((5, 5), dtype=np.double) x[2, 2] = 1 - with expected_warnings(['The default mode']): - outx = rescale(x, 3, order=3, clip=False) + outx = rescale(x, 3, order=3, clip=False, + multichannel=False, anti_aliasing=False, mode='constant') assert outx.min() < 0 - with expected_warnings(['The default mode']): - outx = rescale(x, 3, order=3, clip=True) + outx = rescale(x, 3, order=3, clip=True, + multichannel=False, anti_aliasing=False, mode='constant') assert_almost_equal(outx.min(), 0) assert_almost_equal(outx.max(), 1) @@ -162,8 +164,8 @@ # same scale factor x = np.zeros((5, 5), dtype=np.double) x[1, 1] = 1 - with expected_warnings(['The default mode']): - scaled = rescale(x, 2, order=0) + scaled = rescale(x, 2, order=0, + multichannel=False, anti_aliasing=False, mode='constant') ref = np.zeros((10, 10)) ref[2:4, 2:4] = 1 assert_almost_equal(scaled, ref) @@ -171,18 +173,84 @@ # different scale factors x = np.zeros((5, 5), dtype=np.double) x[1, 1] = 1 - with expected_warnings(['The default mode']): - scaled = rescale(x, (2, 1), order=0) + + scaled = rescale(x, (2, 1), order=0, + multichannel=False, anti_aliasing=False, mode='constant') ref = np.zeros((10, 5)) ref[2:4, 1] = 1 assert_almost_equal(scaled, ref) +def test_rescale_invalid_scale(): + x = np.zeros((10, 10, 3)) + with testing.raises(ValueError): + rescale(x, (2, 2), + multichannel=False, anti_aliasing=False, mode='constant') + with testing.raises(ValueError): + rescale(x, (2, 2, 2), + multichannel=True, anti_aliasing=False, mode='constant') + + +def test_rescale_multichannel(): + # 1D + channels + x = np.zeros((8, 3), dtype=np.double) + scaled = rescale(x, 2, order=0, multichannel=True, anti_aliasing=False, + mode='constant') + assert_equal(scaled.shape, (16, 3)) + # 2D + scaled = rescale(x, 2, order=0, multichannel=False, anti_aliasing=False, + mode='constant') + assert_equal(scaled.shape, (16, 6)) + + # 2D + channels + x = np.zeros((8, 8, 3), dtype=np.double) + scaled = rescale(x, 2, order=0, multichannel=True, anti_aliasing=False, + mode='constant') + assert_equal(scaled.shape, (16, 16, 3)) + # 3D + scaled = rescale(x, 2, order=0, multichannel=False, anti_aliasing=False, + mode='constant') + assert_equal(scaled.shape, (16, 16, 6)) + + # 3D + channels + x = np.zeros((8, 8, 8, 3), dtype=np.double) + scaled = rescale(x, 2, order=0, multichannel=True, anti_aliasing=False, + mode='constant') + assert_equal(scaled.shape, (16, 16, 16, 3)) + # 4D + scaled = rescale(x, 2, order=0, multichannel=False, anti_aliasing=False, + mode='constant') + assert_equal(scaled.shape, (16, 16, 16, 6)) + + +def test_rescale_multichannel_multiscale(): + x = np.zeros((5, 5, 3), dtype=np.double) + scaled = rescale(x, (2, 1), order=0, multichannel=True, + anti_aliasing=False, mode='constant') + assert_equal(scaled.shape, (10, 5, 3)) + + +def test_rescale_multichannel_defaults(): + # ensure multichannel=None matches the previous default behaviour + + # 2D: multichannel should default to False + x = np.zeros((8, 3), dtype=np.double) + with expected_warnings(['multichannel']): + scaled = rescale(x, 2, order=0, anti_aliasing=False, mode='constant') + assert_equal(scaled.shape, (16, 6)) + + # 3D: multichannel should default to True + x = np.zeros((8, 8, 3), dtype=np.double) + with expected_warnings(['multichannel']): + scaled = rescale(x, 2, order=0, anti_aliasing=False, mode='constant') + assert_equal(scaled.shape, (16, 16, 3)) + + def test_resize2d(): x = np.zeros((5, 5), dtype=np.double) x[1, 1] = 1 - with expected_warnings(['The default mode']): - resized = resize(x, (10, 10), order=0) + resized = resize(x, (10, 10), order=0, anti_aliasing=False, + mode='constant') ref = np.zeros((10, 10)) ref[2:4, 2:4] = 1 assert_almost_equal(resized, ref) @@ -192,13 +260,16 @@ # keep 3rd dimension x = np.zeros((5, 5, 3), dtype=np.double) x[1, 1, :] = 1 - with expected_warnings(['The default mode']): - resized = resize(x, (10, 10), order=0) + resized = resize(x, (10, 10), order=0, anti_aliasing=False, + mode='constant') + with testing.raises(ValueError): + # output_shape too short + resize(x, (10, ), order=0, anti_aliasing=False, mode='constant') ref = np.zeros((10, 10, 3)) ref[2:4, 2:4, :] = 1 assert_almost_equal(resized, ref) - with expected_warnings(['The default mode']): - resized = resize(x, (10, 10, 3), order=0) + resized = resize(x, (10, 10, 3), order=0, anti_aliasing=False, + mode='constant') assert_almost_equal(resized, ref) @@ -206,8 +277,8 @@ # resize 3rd dimension x = np.zeros((5, 5, 3), dtype=np.double) x[1, 1, :] = 1 - with expected_warnings(['The default mode']): - resized = resize(x, (10, 10, 1), order=0) + resized = resize(x, (10, 10, 1), order=0, anti_aliasing=False, + mode='constant') ref = np.zeros((10, 10, 1)) ref[2:4, 2:4] = 1 assert_almost_equal(resized, ref) @@ -217,19 +288,44 @@ # 3D output with 2D input x = np.zeros((5, 5), dtype=np.double) x[1, 1] = 1 - with expected_warnings(['The default mode']): - resized = resize(x, (10, 10, 1), order=0) + resized = resize(x, (10, 10, 1), order=0, anti_aliasing=False, + mode='constant') ref = np.zeros((10, 10, 1)) ref[2:4, 2:4] = 1 assert_almost_equal(resized, ref) +def test_resize2d_4d(): + # resize with extra output dimensions + x = np.zeros((5, 5), dtype=np.double) + x[1, 1] = 1 + out_shape = (10, 10, 1, 1) + resized = resize(x, out_shape, order=0, anti_aliasing=False, + mode='constant') + ref = np.zeros(out_shape) + ref[2:4, 2:4, ...] = 1 + assert_almost_equal(resized, ref) + + +def test_resize_nd(): + for dim in range(1, 6): + shape = 2 + np.arange(dim) * 2 + x = np.ones(shape) + out_shape = np.asarray(shape) * 1.5 + resized = resize(x, out_shape, order=0, mode='reflect', + anti_aliasing=False) + expected_shape = 1.5 * shape + assert_equal(resized.shape, expected_shape) + assert np.all(resized == 1) + + def test_resize3d_bilinear(): # bilinear 3rd dimension x = np.zeros((5, 5, 2), dtype=np.double) x[1, 1, 0] = 0 x[1, 1, 1] = 1 - resized = resize(x, (10, 10, 1), order=1, mode='constant') + resized = resize(x, (10, 10, 1), order=1, mode='constant', + anti_aliasing=False) ref = np.zeros((10, 10, 1)) ref[1:5, 1:5, :] = 0.03125 ref[1:5, 2:4, :] = 0.09375 @@ -255,7 +351,7 @@ swirled = tf.swirl(image, strength=10, **swirl_params) unswirled = tf.swirl(swirled, strength=-10, **swirl_params) - assert np.mean(np.abs(image[1:-1,1:-1] - unswirled[1:-1,1:-1])) < 0.01 + assert np.mean(np.abs(image[1:-1, 1:-1] - unswirled[1:-1, 1:-1])) < 0.01 def test_const_cval_out_of_range(): @@ -271,7 +367,7 @@ assert np.allclose(img, warp(img, AffineTransform(rotation=0))) assert not np.allclose(img, warp(img, AffineTransform(rotation=0.1))) rgb_img = np.transpose(np.asarray([img, np.zeros_like(img), img]), - (1, 2, 0)) + (1, 2, 0)) warped_rgb_img = warp(rgb_img, AffineTransform(rotation=0.1)) assert np.allclose(rgb_img, warp(rgb_img, AffineTransform(rotation=0))) assert not np.allclose(rgb_img, warped_rgb_img) @@ -287,23 +383,78 @@ map_coordinates(image[:, :, 0], coords[:2]) +def test_downsize(): + x = np.zeros((10, 10), dtype=np.double) + x[2:4, 2:4] = 1 + scaled = resize(x, (5, 5), order=0, anti_aliasing=False, mode='constant') + assert_equal(scaled.shape, (5, 5)) + assert_equal(scaled[1, 1], 1) + assert_equal(scaled[2:, :].sum(), 0) + assert_equal(scaled[:, 2:].sum(), 0) + + +def test_downsize_anti_aliasing(): + x = np.zeros((10, 10), dtype=np.double) + x[2, 2] = 1 + scaled = resize(x, (5, 5), order=1, anti_aliasing=True, mode='constant') + assert_equal(scaled.shape, (5, 5)) + assert np.all(scaled[:3, :3] > 0) + assert_equal(scaled[3:, :].sum(), 0) + assert_equal(scaled[:, 3:].sum(), 0) + + +def test_downsize_anti_aliasing_invalid_stddev(): + x = np.zeros((10, 10), dtype=np.double) + with testing.raises(ValueError): + resize(x, (5, 5), order=0, anti_aliasing=True, anti_aliasing_sigma=-1, + mode='constant') + with expected_warnings(["Anti-aliasing standard deviation greater"]): + resize(x, (5, 15), order=0, anti_aliasing=True, + anti_aliasing_sigma=(1, 1), mode="reflect") + resize(x, (5, 15), order=0, anti_aliasing=True, + anti_aliasing_sigma=(0, 1), mode="reflect") + + +def test_downscale(): + x = np.zeros((10, 10), dtype=np.double) + x[2:4, 2:4] = 1 + scaled = rescale(x, 0.5, order=0, anti_aliasing=False, + multichannel=False, mode='constant') + assert_equal(scaled.shape, (5, 5)) + assert_equal(scaled[1, 1], 1) + assert_equal(scaled[2:, :].sum(), 0) + assert_equal(scaled[:, 2:].sum(), 0) + + +def test_downscale_anti_aliasing(): + x = np.zeros((10, 10), dtype=np.double) + x[2, 2] = 1 + scaled = rescale(x, 0.5, order=1, anti_aliasing=True, + multichannel=False, mode='constant') + assert_equal(scaled.shape, (5, 5)) + assert np.all(scaled[:3, :3] > 0) + assert_equal(scaled[3:, :].sum(), 0) + assert_equal(scaled[:, 3:].sum(), 0) + + def test_downscale_local_mean(): image1 = np.arange(4 * 6).reshape(4, 6) out1 = downscale_local_mean(image1, (2, 3)) - expected1 = np.array([[ 4., 7.], - [ 16., 19.]]) + expected1 = np.array([[4., 7.], + [16., 19.]]) assert_equal(expected1, out1) image2 = np.arange(5 * 8).reshape(5, 8) out2 = downscale_local_mean(image2, (4, 5)) - expected2 = np.array([[ 14. , 10.8], - [ 8.5, 5.7]]) + expected2 = np.array([[14., 10.8], + [8.5, 5.7]]) assert_equal(expected2, out2) def test_invalid(): - assert_raises(ValueError, warp, np.ones((4, 3, 3, 3)), - SimilarityTransform()) + with testing.raises(ValueError): + warp(np.ones((4, 3, 3, 3)), + SimilarityTransform()) def test_inverse(): @@ -316,32 +467,27 @@ def test_slow_warp_nonint_oshape(): image = np.random.rand(5, 5) - assert_raises(ValueError, warp, image, lambda xy: xy, - output_shape=(13.1, 19.5)) + with testing.raises(ValueError): + warp(image, lambda xy: xy, + output_shape=(13.1, 19.5)) warp(image, lambda xy: xy, output_shape=(13.0001, 19.9999)) def test_keep_range(): image = np.linspace(0, 2, 25).reshape(5, 5) - - with expected_warnings(['The default mode']): - out = rescale(image, 2, preserve_range=False, clip=True, order=0) + out = rescale(image, 2, preserve_range=False, clip=True, order=0, + mode='constant', multichannel='False', anti_aliasing=False) assert out.min() == 0 assert out.max() == 2 - with expected_warnings(['The default mode']): - out = rescale(image, 2, preserve_range=True, clip=True, order=0) + out = rescale(image, 2, preserve_range=True, clip=True, order=0, + mode='constant', multichannel='False', anti_aliasing=False) assert out.min() == 0 assert out.max() == 2 - with expected_warnings(['The default mode']): - out = rescale(image.astype(np.uint8), 2, preserve_range=False, - clip=True, order=0) + out = rescale(image.astype(np.uint8), 2, preserve_range=False, + mode='constant', multichannel='False', anti_aliasing=False, + clip=True, order=0) assert out.min() == 0 assert out.max() == 2 / 255.0 - - - -if __name__ == "__main__": - run_module_suite() diff -Nru skimage-0.13.1/skimage/transform/_warps.py skimage-0.14.0/skimage/transform/_warps.py --- skimage-0.13.1/skimage/transform/_warps.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/transform/_warps.py 2018-05-29 01:27:44.000000000 +0000 @@ -7,9 +7,8 @@ from ._warps_cy import _warp_fast from ..measure import block_reduce -from ..util import img_as_float -from .._shared.utils import get_bound_method_class, safe_as_int, warn, convert_to_float - +from .._shared.utils import (get_bound_method_class, safe_as_int, warn, + convert_to_float) HOMOGRAPHY_TRANSFORMS = ( SimilarityTransform, @@ -18,24 +17,38 @@ ) +def _multichannel_default(multichannel, ndim): + if multichannel is not None: + return multichannel + else: + warn('The default multichannel argument (None) is deprecated. Please ' + 'specify either True or False explicitly. multichannel will ' + 'default to False starting with release 0.16.') + # utility for maintaining previous color image default behavior + if ndim == 3: + return True + else: + return False + + def resize(image, output_shape, order=1, mode=None, cval=0, clip=True, - preserve_range=False): + preserve_range=False, anti_aliasing=None, anti_aliasing_sigma=None): """Resize image to match a certain size. - Performs interpolation to up-size or down-size images. For down-sampling - N-dimensional images by applying a function or the arithmetic mean, see - `skimage.measure.block_reduce` and `skimage.transform.downscale_local_mean`, - respectively. + Performs interpolation to up-size or down-size images. Note that anti- + aliasing should be enabled when down-sizing images to avoid aliasing + artifacts. For down-sampling N-dimensional images with an integer factor + also see `skimage.transform.downscale_local_mean`. Parameters ---------- image : ndarray Input image. output_shape : tuple or ndarray - Size of the generated output image `(rows, cols[, dim])`. If `dim` is - not provided, the number of channels is preserved. In case the number - of input channels does not equal the number of output channels a - 3-dimensional interpolation is applied. + Size of the generated output image `(rows, cols[, ...][, dim])`. If + `dim` is not provided, the number of channels is preserved. In case the + number of input channels does not equal the number of output channels a + n-dimensional interpolation is applied. Returns ------- @@ -61,6 +74,14 @@ preserve_range : bool, optional Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of `img_as_float`. + anti_aliasing : bool, optional + Whether to apply a Gaussian filter to smooth the image prior to + down-scaling. It is crucial to filter when down-sampling the image to + avoid aliasing artifacts. + anti_aliasing_sigma : {float, tuple of floats}, optional + Standard deviation for Gaussian filtering to avoid aliasing artifacts. + By default, this value is chosen as (1 - s) / 2 where s is the + down-scaling factor. Notes ----- @@ -84,48 +105,61 @@ warn("The default mode, 'constant', will be changed to 'reflect' in " "skimage 0.15.") - rows, cols = output_shape[0], output_shape[1] - orig_rows, orig_cols = image.shape[0], image.shape[1] - - row_scale = float(orig_rows) / rows - col_scale = float(orig_cols) / cols - - # 3-dimensional interpolation - if len(output_shape) == 3 and (image.ndim == 2 - or output_shape[2] != image.shape[2]): - ndi_mode = _to_ndimage_mode(mode) - dim = output_shape[2] - if image.ndim == 2: - image = image[:, :, np.newaxis] - orig_dim = image.shape[2] - dim_scale = float(orig_dim) / dim - - map_rows, map_cols, map_dims = np.mgrid[:rows, :cols, :dim] - map_rows = row_scale * (map_rows + 0.5) - 0.5 - map_cols = col_scale * (map_cols + 0.5) - 0.5 - map_dims = dim_scale * (map_dims + 0.5) - 0.5 - - coord_map = np.array([map_rows, map_cols, map_dims]) - - image = convert_to_float(image, preserve_range) - - out = ndi.map_coordinates(image, coord_map, order=order, - mode=ndi_mode, cval=cval) - - _clip_warp_output(image, out, order, mode, cval, clip) - - else: # 2-dimensional interpolation - + if anti_aliasing is None: + anti_aliasing = False + warn("Anti-aliasing will be enabled by default in skimage 0.15 to " + "avoid aliasing artifacts when down-sampling images.") + + output_shape = tuple(output_shape) + output_ndim = len(output_shape) + input_shape = image.shape + if output_ndim > image.ndim: + # append dimensions to input_shape + input_shape = input_shape + (1, ) * (output_ndim - image.ndim) + image = np.reshape(image, input_shape) + elif output_ndim == image.ndim - 1: + # multichannel case: append shape of last axis + output_shape = output_shape + (image.shape[-1], ) + elif output_ndim < image.ndim - 1: + raise ValueError("len(output_shape) cannot be smaller than the image " + "dimensions") + + factors = (np.asarray(input_shape, dtype=float) / + np.asarray(output_shape, dtype=float)) + + if anti_aliasing: + if anti_aliasing_sigma is None: + anti_aliasing_sigma = np.maximum(0, (factors - 1) / 2) + else: + anti_aliasing_sigma = \ + np.atleast_1d(anti_aliasing_sigma) * np.ones_like(factors) + if np.any(anti_aliasing_sigma < 0): + raise ValueError("Anti-aliasing standard deviation must be " + "greater than or equal to zero") + elif np.any((anti_aliasing_sigma > 0) & (factors <= 1)): + warn("Anti-aliasing standard deviation greater than zero but " + "not down-sampling along all axes") + + image = ndi.gaussian_filter(image, anti_aliasing_sigma, + cval=cval, mode=mode) + + # 2-dimensional interpolation + if len(output_shape) == 2 or (len(output_shape) == 3 and + output_shape[2] == input_shape[2]): + rows = output_shape[0] + cols = output_shape[1] + input_rows = input_shape[0] + input_cols = input_shape[1] if rows == 1 and cols == 1: - tform = AffineTransform(translation=(orig_cols / 2.0 - 0.5, - orig_rows / 2.0 - 0.5)) + tform = AffineTransform(translation=(input_cols / 2.0 - 0.5, + input_rows / 2.0 - 0.5)) else: # 3 control points necessary to estimate exact AffineTransform src_corners = np.array([[1, 1], [1, rows], [cols, rows]]) - 1 dst_corners = np.zeros(src_corners.shape, dtype=np.double) # take into account that 0th pixel is at position (0.5, 0.5) - dst_corners[:, 0] = col_scale * (src_corners[:, 0] + 0.5) - 0.5 - dst_corners[:, 1] = row_scale * (src_corners[:, 1] + 0.5) - 0.5 + dst_corners[:, 0] = factors[1] * (src_corners[:, 0] + 0.5) - 0.5 + dst_corners[:, 1] = factors[0] * (src_corners[:, 1] + 0.5) - 0.5 tform = AffineTransform() tform.estimate(src_corners, dst_corners) @@ -134,17 +168,34 @@ mode=mode, cval=cval, clip=clip, preserve_range=preserve_range) + else: # n-dimensional interpolation + coord_arrays = [factors[i] * (np.arange(d) + 0.5) - 0.5 + for i, d in enumerate(output_shape)] + + coord_map = np.array(np.meshgrid(*coord_arrays, + sparse=False, + indexing='ij')) + + image = convert_to_float(image, preserve_range) + + ndi_mode = _to_ndimage_mode(mode) + out = ndi.map_coordinates(image, coord_map, order=order, + mode=ndi_mode, cval=cval) + + _clip_warp_output(image, out, order, mode, cval, clip) + return out def rescale(image, scale, order=1, mode=None, cval=0, clip=True, - preserve_range=False): + preserve_range=False, multichannel=None, + anti_aliasing=None, anti_aliasing_sigma=None): """Scale image by a certain factor. - Performs interpolation to upscale or down-scale images. For down-sampling - N-dimensional images with integer factors by applying a function or the - arithmetic mean, see `skimage.measure.block_reduce` and - `skimage.transform.downscale_local_mean`, respectively. + Performs interpolation to up-scale or down-scale images. Note that anti- + aliasing should be enabled when down-sizing images to avoid aliasing + artifacts. For down-sampling N-dimensional images with an integer factor + also see `skimage.transform.downscale_local_mean`. Parameters ---------- @@ -152,7 +203,7 @@ Input image. scale : {float, tuple of floats} Scale factors. Separate scale factors can be defined as - `(row_scale, col_scale)`. + `(rows, cols[, ...][, dim])`. Returns ------- @@ -178,6 +229,27 @@ preserve_range : bool, optional Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of `img_as_float`. + multichannel : bool, optional + Whether the last axis of the image is to be interpreted as multiple + channels or another spatial dimension. By default, is set to True for + 3D (2D+color) inputs, and False for others. Starting in release 0.16, + this will always default to False. + anti_aliasing : bool, optional + Whether to apply a Gaussian filter to smooth the image prior to + down-scaling. It is crucial to filter when down-sampling the image to + avoid aliasing artifacts. + anti_aliasing_sigma : {float, tuple of floats}, optional + Standard deviation for Gaussian filtering to avoid aliasing artifacts. + By default, this value is chosen as (1 - s) / 2 where s is the + down-scaling factor. + + Notes + ----- + Modes 'reflect' and 'symmetric' are similar, but differ in whether the edge + pixels are duplicated during the reflection. As an example, if an array + has values [0, 1, 2] and was padded to the right by four values using + symmetric, the result would be [0, 1, 2, 2, 1, 0, 0], while for reflect it + would be [0, 1, 2, 1, 0, 1, 2]. Examples -------- @@ -190,19 +262,24 @@ (256, 256) """ - - try: - row_scale, col_scale = scale - except TypeError: - row_scale = col_scale = scale - - orig_rows, orig_cols = image.shape[0], image.shape[1] - rows = np.round(row_scale * orig_rows) - cols = np.round(col_scale * orig_cols) - output_shape = (rows, cols) + multichannel = _multichannel_default(multichannel, image.ndim) + scale = np.atleast_1d(scale) + if len(scale) > 1: + if ((not multichannel and len(scale) != image.ndim) or + (multichannel and len(scale) != image.ndim - 1)): + raise ValueError("Supply a single scale, or one value per spatial " + "axis") + if multichannel: + scale = np.concatenate((scale, [1])) + orig_shape = np.asarray(image.shape) + output_shape = np.round(scale * orig_shape) + if multichannel: # don't scale channel dimension + output_shape[-1] = orig_shape[-1] return resize(image, output_shape, order=order, mode=mode, cval=cval, - clip=clip, preserve_range=preserve_range) + clip=clip, preserve_range=preserve_range, + anti_aliasing=anti_aliasing, + anti_aliasing_sigma=anti_aliasing_sigma) def rotate(image, angle, resize=False, center=None, order=1, mode='constant', @@ -247,6 +324,14 @@ Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of `img_as_float`. + Notes + ----- + Modes 'reflect' and 'symmetric' are similar, but differ in whether the edge + pixels are duplicated during the reflection. As an example, if an array + has values [0, 1, 2] and was padded to the right by four values using + symmetric, the result would be [0, 1, 2, 2, 1, 0, 0], while for reflect it + would be [0, 1, 2, 1, 0, 1, 2]. + Examples -------- >>> from skimage import data @@ -369,7 +454,7 @@ ---------- image : ndarray Input image. - center : (row, column) tuple or (2,) ndarray, optional + center : (column, row) tuple or (2,) ndarray, optional Center coordinate of transformation. strength : float, optional The amount of swirling applied. @@ -414,7 +499,7 @@ mode = 'constant' if center is None: - center = np.array(image.shape)[:2] / 2 + center = np.array(image.shape)[:2][::-1] / 2 warp_args = {'center': center, 'rotation': rotation, @@ -527,15 +612,6 @@ return coords -def _convert_warp_input(image, preserve_range): - """Convert input image to double image with the appropriate range.""" - if preserve_range: - image = image.astype(np.double) - else: - image = img_as_float(image) - return image - - def _clip_warp_output(input_image, output_image, order, mode, cval, clip): """Clip output image to range of values of input image. @@ -570,8 +646,8 @@ min_val = input_image.min() max_val = input_image.max() - preserve_cval = mode == 'constant' and not \ - (min_val <= cval <= max_val) + preserve_cval = (mode == 'constant' and not + (min_val <= cval <= max_val)) if preserve_cval: cval_mask = output_image == cval @@ -719,7 +795,7 @@ >>> warped = warp(cube, coords) """ - image = _convert_warp_input(image, preserve_range) + image = convert_to_float(image, preserve_range) input_shape = np.array(image.shape) @@ -754,10 +830,9 @@ # inverse_map is a homography matrix = inverse_map.params - elif (hasattr(inverse_map, '__name__') - and inverse_map.__name__ == 'inverse' - and get_bound_method_class(inverse_map) \ - in HOMOGRAPHY_TRANSFORMS): + elif (hasattr(inverse_map, '__name__') and + inverse_map.__name__ == 'inverse' and + get_bound_method_class(inverse_map) in HOMOGRAPHY_TRANSFORMS): # inverse_map is the inverse of a homography matrix = np.linalg.inv(six.get_method_self(inverse_map).params) @@ -765,8 +840,8 @@ matrix = matrix.astype(np.double) if image.ndim == 2: warped = _warp_fast(image, matrix, - output_shape=output_shape, - order=order, mode=mode, cval=cval) + output_shape=output_shape, + order=order, mode=mode, cval=cval) elif image.ndim == 3: dims = [] for dim in range(image.shape[2]): @@ -778,8 +853,8 @@ if warped is None: # use ndi.map_coordinates - if (isinstance(inverse_map, np.ndarray) - and inverse_map.shape == (3, 3)): + if (isinstance(inverse_map, np.ndarray) and + inverse_map.shape == (3, 3)): # inverse_map is a transformation matrix as numpy array, # this is only used for order >= 4. inverse_map = ProjectiveTransform(matrix=inverse_map) diff -Nru skimage-0.13.1/skimage/util/dtype.py skimage-0.14.0/skimage/util/dtype.py --- skimage-0.13.1/skimage/util/dtype.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/util/dtype.py 2018-05-29 01:27:44.000000000 +0000 @@ -2,27 +2,35 @@ import numpy as np from warnings import warn -__all__ = ['img_as_float', 'img_as_int', 'img_as_uint', 'img_as_ubyte', + +__all__ = ['img_as_float32', 'img_as_float64', 'img_as_float', + 'img_as_int', 'img_as_uint', 'img_as_ubyte', 'img_as_bool', 'dtype_limits'] +# For integers Numpy uses `_integer_types` basis internally, and builds a leaky +# `np.XintYY` abstraction on top of it. This leads to situations when, for +# example, there are two np.Xint64 dtypes with the same attributes but +# different object references. In order to avoid any potential issues, +# we use the basis dtypes here. For more information, see: +# - https://github.com/scikit-image/scikit-image/issues/3043 +# For convenience, for these dtypes we indicate also the possible bit depths +# (some of them are platform specific). For the details, see: +# http://www.unix.org/whitepapers/64bit.html +_integer_types = (np.byte, np.ubyte, # 8 bits + np.short, np.ushort, # 16 bits + np.intc, np.uintc, # 16 or 32 or 64 bits + np.int_, np.uint, # 32 or 64 bits + np.longlong, np.ulonglong) # 64 bits +_integer_ranges = {t: (np.iinfo(t).min, np.iinfo(t).max) + for t in _integer_types} dtype_range = {np.bool_: (False, True), np.bool8: (False, True), - np.uint8: (0, 255), - np.uint16: (0, 65535), - np.uint32: (0, 2**32 - 1), - np.uint64: (0, 2**64 - 1), - np.int8: (-128, 127), - np.int16: (-32768, 32767), - np.int32: (-2**31, 2**31 - 1), - np.int64: (-2**63, 2**63 - 1), np.float16: (-1, 1), np.float32: (-1, 1), np.float64: (-1, 1)} +dtype_range.update(_integer_ranges) -_supported_types = (np.bool_, np.bool8, - np.uint8, np.uint16, np.uint32, np.uint64, - np.int8, np.int16, np.int32, np.int64, - np.float16, np.float32, np.float64) +_supported_types = list(dtype_range.keys()) def dtype_limits(image, clip_negative=None): @@ -243,43 +251,51 @@ # floating point -> integer prec_loss() # use float type that can represent output integer type - image = image.astype(_dtype_itemsize(itemsize_out, dtype_in, - np.float32, np.float64)) + computation_type = _dtype_itemsize(itemsize_out, dtype_in, + np.float32, np.float64) + if not uniform: if kind_out == 'u': - image *= imax_out + image_out = np.multiply(image, imax_out, + dtype=computation_type) else: - image *= imax_out - imin_out - image -= 1.0 - image /= 2.0 - np.rint(image, out=image) - np.clip(image, imin_out, imax_out, out=image) + image_out = np.multiply(image, (imax_out - imin_out) / 2, + dtype=computation_type) + image_out -= 1.0 / 2. + np.rint(image_out, out=image_out) + np.clip(image_out, imin_out, imax_out, out=image_out) elif kind_out == 'u': - image *= imax_out + 1 - np.clip(image, 0, imax_out, out=image) + image_out = np.multiply(image, imax_out + 1, + dtype=computation_type) + np.clip(image_out, 0, imax_out, out=image_out) else: - image *= (imax_out - imin_out + 1.0) / 2.0 - np.floor(image, out=image) - np.clip(image, imin_out, imax_out, out=image) - return image.astype(dtype_out) + image_out = np.multiply(image, (imax_out - imin_out + 1.0) / 2.0, + dtype=computation_type) + np.floor(image_out, out=image_out) + np.clip(image_out, imin_out, imax_out, out=image_out) + return image_out.astype(dtype_out) # signed/unsigned int -> float if kind_out == 'f': if itemsize_in >= itemsize_out: prec_loss() + # use float type that can exactly represent input integers - image = image.astype(_dtype_itemsize(itemsize_in, dtype_out, - np.float32, np.float64)) + computation_type = _dtype_itemsize(itemsize_in, dtype_out, + np.float32, np.float64) if kind_in == 'u': - image /= imax_in + # using np.divide or np.multiply doesn't copy the data + # until the computation time + image = np.multiply(image, 1. / imax_in, + dtype=computation_type) # DirectX uses this conversion also for signed ints # if imin_in: # np.maximum(image, -1.0, out=image) else: - image *= 2.0 - image += 1.0 - image /= imax_in - imin_in - return image.astype(dtype_out) + image = np.multiply(image, 2. / (imax_in - imin_in), + dtype=computation_type) + image += 1.0 / (imax_in - imin_in) + return np.asarray(image, dtype_out) # unsigned int -> signed/unsigned int if kind_in == 'u': @@ -310,7 +326,33 @@ return image.astype(dtype_out) -def img_as_float(image, force_copy=False): +def img_as_float32(image, force_copy=False): + """Convert an image to single-precision (32-bit) floating point format. + + Parameters + ---------- + image : ndarray + Input image. + force_copy : bool, optional + Force a copy of the data, irrespective of its current dtype. + + Returns + ------- + out : ndarray of float32 + Output image. + + Notes + ----- + The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when + converting from unsigned or signed datatypes, respectively. + If the input image has a float type, intensity values are not modified + and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0]. + + """ + return convert(image, np.float32, force_copy) + + +def img_as_float64(image, force_copy=False): """Convert an image to double-precision (64-bit) floating point format. Parameters @@ -336,6 +378,9 @@ return convert(image, np.float64, force_copy) +img_as_float = img_as_float64 + + def img_as_uint(image, force_copy=False): """Convert an image to 16-bit unsigned integer format. diff -Nru skimage-0.13.1/skimage/util/__init__.py skimage-0.14.0/skimage/util/__init__.py --- skimage-0.13.1/skimage/util/__init__.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/util/__init__.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,4 +1,5 @@ -from .dtype import (img_as_float, img_as_int, img_as_uint, img_as_ubyte, +from .dtype import (img_as_float32, img_as_float64, img_as_float, + img_as_int, img_as_uint, img_as_ubyte, img_as_bool, dtype_limits) from .shape import view_as_blocks, view_as_windows from .noise import random_noise @@ -8,6 +9,7 @@ from ._regular_grid import regular_grid, regular_seeds from .unique import unique_rows from ._invert import invert +from ._montage import montage, montage2d from .._shared.utils import copy_func @@ -15,7 +17,9 @@ pad = copy_func(numpy_pad, name='pad') -__all__ = ['img_as_float', +__all__ = ['img_as_float32', + 'img_as_float64', + 'img_as_float', 'img_as_int', 'img_as_uint', 'img_as_ubyte', @@ -25,6 +29,8 @@ 'view_as_windows', 'pad', 'crop', + 'montage', + 'montage2d', 'random_noise', 'regular_grid', 'regular_seeds', diff -Nru skimage-0.13.1/skimage/util/_invert.py skimage-0.14.0/skimage/util/_invert.py --- skimage-0.13.1/skimage/util/_invert.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/util/_invert.py 2018-05-29 01:27:44.000000000 +0000 @@ -2,32 +2,73 @@ from .dtype import dtype_limits -def invert(image): +def invert(image, signed_float=False): """Invert an image. - Substract the image to the maximum value allowed by the dtype maximum. + Invert the intensity range of the input image, so that the dtype maximum + is now the dtype minimum, and vice-versa. This operation is + slightly different depending on the input dtype: + + - unsigned integers: subtract the image from the dtype maximum + - signed integers: subtract the image from -1 (see Notes) + - floats: subtract the image from 1 (if signed_float is False, so we + assume the image is unsigned), or from 0 (if signed_float is True). + + See the examples for clarification. Parameters ---------- image : ndarray - The input image. + Input image. + signed_float : bool, optional + If True and the image is of type float, the range is assumed to + be [-1, 1]. If False and the image is of type float, the range is + assumed to be [0, 1]. Returns ------- - invert : ndarray + inverted : ndarray Inverted image. + Notes + ----- + Ideally, for signed integers we would simply multiply by -1. However, + signed integer ranges are asymmetric. For example, for np.int8, the range + of possible values is [-128, 127], so that -128 * -1 equals -128! By + subtracting from -1, we correctly map the maximum dtype value to the + minimum. + Examples -------- - >>> img = np.array([[100, 0, 200], - ... [0, 50, 0], - ... [30, 0, 255]], np.uint8) + >>> img = np.array([[100, 0, 200], + ... [ 0, 50, 0], + ... [ 30, 0, 255]], np.uint8) >>> invert(img) array([[155, 255, 55], [255, 205, 255], [225, 255, 0]], dtype=uint8) + >>> img2 = np.array([[ -2, 0, -128], + ... [127, 0, 5]], np.int8) + >>> invert(img2) + array([[ 1, -1, 127], + [-128, -1, -6]], dtype=int8) + >>> img3 = np.array([[ 0., 1., 0.5, 0.75]]) + >>> invert(img3) + array([[ 1. , 0. , 0.5 , 0.25]]) + >>> img4 = np.array([[ 0., 1., -1., -0.25]]) + >>> invert(img4, signed_float=True) + array([[-0. , -1. , 1. , 0.25]]) """ if image.dtype == 'bool': - return ~image - else: - return dtype_limits(image, clip_negative=False)[1] - image + inverted = ~image + elif np.issubdtype(image.dtype, np.unsignedinteger): + max_val = dtype_limits(image, clip_negative=False)[1] + inverted = np.subtract(max_val, image, dtype=image.dtype) + elif np.issubdtype(image.dtype, np.signedinteger): + inverted = np.subtract(-1, image, dtype=image.dtype) + else: # float dtype + if signed_float: + inverted = -image + else: + inverted = np.subtract(1, image, dtype=image.dtype) + return inverted diff -Nru skimage-0.13.1/skimage/util/_montage.py skimage-0.14.0/skimage/util/_montage.py --- skimage-0.13.1/skimage/util/_montage.py 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/skimage/util/_montage.py 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,256 @@ +import numpy as np +from .. import exposure +from .._shared.utils import deprecated + + +__all__ = ['montage', 'montage2d'] + + +def montage(arr_in, fill='mean', rescale_intensity=False, grid_shape=None, + padding_width=0, multichannel=False): + """Create a montage of several single- or multichannel images. + + Create a rectangular montage from an input array representing an ensemble + of equally shaped single- (gray) or multichannel (color) images. + + For example, ``montage(arr_in)`` called with the following `arr_in` + + +---+---+---+ + | 1 | 2 | 3 | + +---+---+---+ + + will return + + +---+---+ + | 1 | 2 | + +---+---+ + | 3 | * | + +---+---+ + + where the '*' patch will be determined by the `fill` parameter. + + Parameters + ---------- + arr_in : (K, M, N[, C]) ndarray + An array representing an ensemble of `K` images of equal shape. + fill : float or array-like of floats or 'mean', optional + Value to fill the padding areas and/or the extra tiles in + the output array. Has to be `float` for single channel collections. + For multichannel collections has to be an array-like of shape of + number of channels. If `mean`, uses the mean value over all images. + rescale_intensity : bool, optional + Whether to rescale the intensity of each image to [0, 1]. + grid_shape : tuple, optional + The desired grid shape for the montage `(ntiles_row, ntiles_column)`. + The default aspect ratio is square. + padding_width : int, optional + The size of the spacing between the tiles and between the tiles and + the borders. If non-zero, makes the boundaries of individual images + easier to perceive. + multichannel : boolean, optional + If True, the last `arr_in` dimension is threated as a color channel, + otherwise as spatial. + + Returns + ------- + arr_out : (K*(M+p)+p, K*(N+p)+p[, C]) ndarray + Output array with input images glued together (including padding `p`). + + Examples + -------- + >>> import numpy as np + >>> from skimage.util import montage + >>> arr_in = np.arange(3 * 2 * 2).reshape(3, 2, 2) + >>> arr_in # doctest: +NORMALIZE_WHITESPACE + array([[[ 0, 1], + [ 2, 3]], + [[ 4, 5], + [ 6, 7]], + [[ 8, 9], + [10, 11]]]) + >>> arr_out = montage(arr_in) + >>> arr_out.shape + (4, 4) + >>> arr_out + array([[ 0, 1, 4, 5], + [ 2, 3, 6, 7], + [ 8, 9, 5, 5], + [10, 11, 5, 5]]) + >>> arr_in.mean() + 5.5 + >>> arr_out_nonsquare = montage(arr_in, grid_shape=(1, 3)) + >>> arr_out_nonsquare + array([[ 0, 1, 4, 5, 8, 9], + [ 2, 3, 6, 7, 10, 11]]) + >>> arr_out_nonsquare.shape + (2, 6) + """ + + if multichannel: + arr_in = np.asarray(arr_in) + else: + arr_in = np.asarray(arr_in)[..., np.newaxis] + + if arr_in.ndim != 4: + raise ValueError('Input array has to be either 3- or 4-dimensional') + + n_images, n_rows, n_cols, n_chan = arr_in.shape + + if grid_shape: + ntiles_row, ntiles_col = [int(s) for s in grid_shape] + else: + ntiles_row = ntiles_col = int(np.ceil(np.sqrt(n_images))) + + # Rescale intensity if necessary + if rescale_intensity: + for i in range(n_images): + arr_in[i] = exposure.rescale_intensity(arr_in[i]) + + # Calculate the fill value + if fill == 'mean': + fill = arr_in.mean(axis=(0, 1, 2)) + fill = np.atleast_1d(fill).astype(arr_in.dtype) + + # Pre-allocate an array with padding for montage + n_pad = padding_width + arr_out = np.empty(((n_rows + n_pad) * ntiles_row + n_pad, + (n_cols + n_pad) * ntiles_col + n_pad, + n_chan), dtype=arr_in.dtype) + for idx_chan in range(n_chan): + arr_out[..., idx_chan] = fill[idx_chan] + + slices_row = [slice(n_pad + (n_rows + n_pad) * n, + n_pad + (n_rows + n_pad) * n + n_rows) + for n in range(ntiles_row)] + slices_col = [slice(n_pad + (n_cols + n_pad) * n, + n_pad + (n_cols + n_pad) * n + n_cols) + for n in range(ntiles_col)] + + # Copy the data to the output array + for idx_image, image in enumerate(arr_in): + idx_sr = idx_image // ntiles_col + idx_sc = idx_image % ntiles_col + arr_out[slices_row[idx_sr], slices_col[idx_sc], :] = image + + if multichannel: + return arr_out + else: + return arr_out[..., 0] + + +@deprecated('montage', removed_version='0.15') +def montage2d(arr_in, fill='mean', rescale_intensity=False, grid_shape=None, + padding_width=0): + """Create a 2-dimensional 'montage' from a 3-dimensional input array + representing an ensemble of equally shaped 2-dimensional images. + + For example, ``montage2d(arr_in, fill)`` with the following `arr_in` + + +---+---+---+ + | 1 | 2 | 3 | + +---+---+---+ + + will return: + + +---+---+ + | 1 | 2 | + +---+---+ + | 3 | * | + +---+---+ + + Where the '*' patch will be determined by the `fill` parameter. + + Parameters + ---------- + arr_in : ndarray, shape=[n_images, height, width] + 3-dimensional input array representing an ensemble of n_images + of equal shape (i.e. [height, width]). + fill : float or 'mean', optional + How to fill the 2-dimensional output array when sqrt(n_images) + is not an integer. If 'mean' is chosen, then fill = arr_in.mean(). + rescale_intensity : bool, optional + Whether to rescale the intensity of each image to [0, 1]. + grid_shape : tuple, optional + The desired grid shape for the montage (tiles_y, tiles_x). + The default aspect ratio is square. + padding_width : int, optional + The size of the spacing between the tiles to make the + boundaries of individual frames easier to see. + + Returns + ------- + arr_out : ndarray, shape=[alpha * height, alpha * width] + Output array where 'alpha' has been determined automatically to + fit (at least) the `n_images` in `arr_in`. + + Examples + -------- + >>> import numpy as np + >>> from skimage.util import montage2d + >>> arr_in = np.arange(3 * 2 * 2).reshape(3, 2, 2) + >>> arr_in # doctest: +NORMALIZE_WHITESPACE + array([[[ 0, 1], + [ 2, 3]], + [[ 4, 5], + [ 6, 7]], + [[ 8, 9], + [10, 11]]]) + >>> arr_out = montage2d(arr_in) + >>> arr_out.shape + (4, 4) + >>> arr_out + array([[ 0, 1, 4, 5], + [ 2, 3, 6, 7], + [ 8, 9, 5, 5], + [10, 11, 5, 5]]) + >>> arr_in.mean() + 5.5 + >>> arr_out_nonsquare = montage2d(arr_in, grid_shape=(1, 3)) + >>> arr_out_nonsquare + array([[ 0, 1, 4, 5, 8, 9], + [ 2, 3, 6, 7, 10, 11]]) + >>> arr_out_nonsquare.shape + (2, 6) + """ + + assert arr_in.ndim == 3 + + # -- fill missing patches (needs to be calculated before border padding) + if fill == 'mean': + fill = arr_in.mean() + + # -- add border padding, np.pad does all dimensions + # so we remove the padding from the first + if padding_width > 0: + # only pad after to make the width correct + bef_aft = (0, padding_width) + arr_in = np.pad(arr_in, ((0, 0), bef_aft, bef_aft), mode='constant') + else: + arr_in = arr_in.copy() + + n_images, height, width = arr_in.shape + + # -- rescale intensity if necessary + if rescale_intensity: + for i in range(n_images): + arr_in[i] = exposure.rescale_intensity(arr_in[i]) + + # -- determine alpha + if grid_shape: + alpha_y, alpha_x = grid_shape + else: + alpha_y = alpha_x = int(np.ceil(np.sqrt(n_images))) + + n_missing = int((alpha_y * alpha_x) - n_images) + # sometimes the mean returns a float, this ensures the missing + # has the same type for non-float images + missing = (np.ones((n_missing, height, width), dtype=arr_in.dtype) * + fill).astype(arr_in.dtype) + arr_out = np.vstack((arr_in, missing)) + + # -- reshape to 2d montage, step by step + arr_out = arr_out.reshape(alpha_y, alpha_x, height, width) + arr_out = arr_out.swapaxes(1, 2) + arr_out = arr_out.reshape(alpha_y * height, alpha_x * width) + + return arr_out diff -Nru skimage-0.13.1/skimage/util/montage.py skimage-0.14.0/skimage/util/montage.py --- skimage-0.13.1/skimage/util/montage.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/util/montage.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -__all__ = ['montage2d'] - -import numpy as np -from .. import exposure - -EPSILON = 1e-6 - - -def montage2d(arr_in, fill='mean', rescale_intensity=False, grid_shape=None): - """Create a 2-dimensional 'montage' from a 3-dimensional input array - representing an ensemble of equally shaped 2-dimensional images. - - For example, ``montage2d(arr_in, fill)`` with the following `arr_in` - - +---+---+---+ - | 1 | 2 | 3 | - +---+---+---+ - - will return: - - +---+---+ - | 1 | 2 | - +---+---+ - | 3 | * | - +---+---+ - - Where the '*' patch will be determined by the `fill` parameter. - - Parameters - ---------- - arr_in: ndarray, shape=[n_images, height, width] - 3-dimensional input array representing an ensemble of n_images - of equal shape (i.e. [height, width]). - fill: float or 'mean', optional - How to fill the 2-dimensional output array when sqrt(n_images) - is not an integer. If 'mean' is chosen, then fill = arr_in.mean(). - rescale_intensity: bool, optional - Whether to rescale the intensity of each image to [0, 1]. - grid_shape: tuple, optional - The desired grid shape for the montage (tiles_y, tiles_x). - The default aspect ratio is square. - - Returns - ------- - arr_out: ndarray, shape=[alpha * height, alpha * width] - Output array where 'alpha' has been determined automatically to - fit (at least) the `n_images` in `arr_in`. - - Examples - -------- - >>> import numpy as np - >>> from skimage.util.montage import montage2d - >>> arr_in = np.arange(3 * 2 * 2).reshape(3, 2, 2) - >>> arr_in # doctest: +NORMALIZE_WHITESPACE - array([[[ 0, 1], - [ 2, 3]], - [[ 4, 5], - [ 6, 7]], - [[ 8, 9], - [10, 11]]]) - >>> arr_out = montage2d(arr_in) - >>> arr_out.shape - (4, 4) - >>> arr_out - array([[ 0. , 1. , 4. , 5. ], - [ 2. , 3. , 6. , 7. ], - [ 8. , 9. , 5.5, 5.5], - [ 10. , 11. , 5.5, 5.5]]) - >>> arr_in.mean() - 5.5 - >>> arr_out_nonsquare = montage2d(arr_in, grid_shape=(1, 3)) - >>> arr_out_nonsquare - array([[ 0., 1., 4., 5., 8., 9.], - [ 2., 3., 6., 7., 10., 11.]]) - >>> arr_out_nonsquare.shape - (2, 6) - - """ - - assert arr_in.ndim == 3 - - n_images, height, width = arr_in.shape - - arr_in = arr_in.copy() - - # -- rescale intensity if necessary - if rescale_intensity: - for i in range(n_images): - arr_in[i] = exposure.rescale_intensity(arr_in[i]) - - # -- determine alpha - if grid_shape: - alpha_y, alpha_x = grid_shape - else: - alpha_y = alpha_x = int(np.ceil(np.sqrt(n_images))) - - # -- fill missing patches - if fill == 'mean': - fill = arr_in.mean() - - n_missing = int((alpha_y * alpha_x) - n_images) - missing = np.ones((n_missing, height, width), dtype=arr_in.dtype) * fill - arr_out = np.vstack((arr_in, missing)) - - # -- reshape to 2d montage, step by step - arr_out = arr_out.reshape(alpha_y, alpha_x, height, width) - arr_out = arr_out.swapaxes(1, 2) - arr_out = arr_out.reshape(alpha_y * height, alpha_x * width) - - return arr_out diff -Nru skimage-0.13.1/skimage/util/tests/test_apply_parallel.py skimage-0.14.0/skimage/util/tests/test_apply_parallel.py --- skimage-0.13.1/skimage/util/tests/test_apply_parallel.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/util/tests/test_apply_parallel.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,14 +1,14 @@ from __future__ import absolute_import import numpy as np -from numpy.testing import assert_array_almost_equal -from numpy.testing.decorators import skipif +from skimage._shared import testing +from skimage._shared.testing import assert_array_almost_equal from skimage.filters import threshold_local, gaussian from skimage.util.apply_parallel import apply_parallel, dask_available -@skipif(not dask_available) +@testing.skipif(not dask_available, reason="dask not installed") def test_apply_parallel(): # data a = np.arange(144).reshape(12, 12).astype(float) @@ -30,7 +30,7 @@ assert_array_almost_equal(result2, expected2) -@skipif(not dask_available) +@testing.skipif(not dask_available, reason="dask not installed") def test_no_chunks(): a = np.ones(1 * 4 * 8 * 9).reshape(1, 4, 8, 9) @@ -43,7 +43,7 @@ assert_array_almost_equal(result, expected) -@skipif(not dask_available) +@testing.skipif(not dask_available, reason="dask not installed") def test_apply_parallel_wrap(): def wrapped(arr): return gaussian(arr, 1, mode='wrap') @@ -54,7 +54,7 @@ assert_array_almost_equal(result, expected) -@skipif(not dask_available) +@testing.skipif(not dask_available, reason="dask not installed") def test_apply_parallel_nearest(): def wrapped(arr): return gaussian(arr, 1, mode='nearest') diff -Nru skimage-0.13.1/skimage/util/tests/test_arraycrop.py skimage-0.14.0/skimage/util/tests/test_arraycrop.py --- skimage-0.13.1/skimage/util/tests/test_arraycrop.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/util/tests/test_arraycrop.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,9 +1,8 @@ -"""Tests for array cropping.""" from __future__ import division, absolute_import, print_function import numpy as np -from numpy.testing import (assert_array_equal, assert_equal) from skimage.util import crop +from skimage._shared.testing import (assert_array_equal, assert_equal) def test_multi_crop(): @@ -48,7 +47,3 @@ arr = np.arange(45).reshape(9, 5) out = crop(arr, 0) assert out.shape == (9, 5) - - -if __name__ == "__main__": - np.testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/util/tests/test_arraypad.py skimage-0.14.0/skimage/util/tests/test_arraypad.py --- skimage-0.13.1/skimage/util/tests/test_arraypad.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/util/tests/test_arraypad.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,13 +1,12 @@ -"""Tests for the array padding functions. - -""" from __future__ import division, absolute_import, print_function import numpy as np -from numpy.testing import (assert_array_equal, assert_raises, assert_allclose, - TestCase) from skimage.util import pad +from skimage._shared import testing +from skimage._shared.testing import (assert_array_equal, assert_allclose, + TestCase) + class TestConditionalShortcuts(TestCase): def test_zero_padding_shortcuts(self): @@ -958,22 +957,22 @@ arr = np.arange(30) arr = np.reshape(arr, (6, 5)) kwargs = dict(mode='mean', stat_length=(3, )) - assert_raises(ValueError, pad, arr, ((2, 3), (3, 2), (4, 5)), - **kwargs) + with testing.raises(ValueError): + pad(arr, ((2, 3), (3, 2), (4, 5)), **kwargs) def test_check_negative_stat_length(self): arr = np.arange(30) arr = np.reshape(arr, (6, 5)) kwargs = dict(mode='mean', stat_length=(-3, )) - assert_raises(ValueError, pad, arr, ((2, 3), (3, 2)), - **kwargs) + with testing.raises(ValueError): + pad(arr, ((2, 3), (3, 2)), **kwargs) def test_check_negative_pad_width(self): arr = np.arange(30) arr = np.reshape(arr, (6, 5)) kwargs = dict(mode='mean', stat_length=(3, )) - assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)), - **kwargs) + with testing.raises(ValueError): + pad(arr, ((-2, 3), (3, 2)), **kwargs) class ValueError2(TestCase): @@ -981,67 +980,72 @@ arr = np.arange(30) arr = np.reshape(arr, (6, 5)) kwargs = dict(mode='mean', stat_length=(3, )) - assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)), - **kwargs) + with testing.raises(ValueError): + pad(arr, ((-2, 3), (3, 2)), **kwargs) class ValueError3(TestCase): def test_check_kwarg_not_allowed(self): arr = np.arange(30).reshape(5, 6) - assert_raises(ValueError, pad, arr, 4, mode='mean', - reflect_type='odd') + with testing.raises(ValueError): + pad(arr, 4, mode='mean', reflect_type='odd') def test_mode_not_set(self): arr = np.arange(30).reshape(5, 6) - assert_raises((ValueError, TypeError), pad, arr, 4) + with testing.raises(TypeError): + pad(arr, 4) def test_malformed_pad_amount(self): arr = np.arange(30).reshape(5, 6) - assert_raises(ValueError, pad, arr, (4, 5, 6, 7), mode='constant') + with testing.raises(ValueError): + pad(arr, (4, 5, 6, 7), mode='constant') def test_malformed_pad_amount2(self): arr = np.arange(30).reshape(5, 6) - assert_raises(ValueError, pad, arr, ((3, 4, 5), (0, 1, 2)), - mode='constant') + with testing.raises(ValueError): + pad(arr, ((3, 4, 5), (0, 1, 2)), mode='constant') def test_pad_too_many_axes(self): arr = np.arange(30).reshape(5, 6) # Attempt to pad using a 3D array equivalent bad_shape = (((3,), (4,), (5,)), ((0,), (1,), (2,))) - assert_raises(ValueError, pad, arr, bad_shape, - mode='constant') + with testing.raises(ValueError): + pad(arr, bad_shape, mode='constant') class TypeError1(TestCase): def test_float(self): arr = np.arange(30) - assert_raises(TypeError, pad, arr, ((-2.1, 3), (3, 2))) - assert_raises(TypeError, pad, arr, np.array(((-2.1, 3), (3, 2)))) + with testing.raises(TypeError): + pad(arr, ((-2.1, 3), (3, 2))) + with testing.raises(TypeError): + pad(arr, np.array(((-2.1, 3), (3, 2)))) def test_str(self): arr = np.arange(30) - assert_raises(TypeError, pad, arr, 'foo') - assert_raises(TypeError, pad, arr, np.array('foo')) + with testing.raises(TypeError): + pad(arr, 'foo') + with testing.raises(TypeError): + pad(arr, np.array('foo')) def test_object(self): class FooBar(object): pass arr = np.arange(30) - assert_raises(TypeError, pad, arr, FooBar()) + with testing.raises(TypeError): + pad(arr, FooBar()) def test_complex(self): arr = np.arange(30) - assert_raises(TypeError, pad, arr, complex(1, -1)) - assert_raises(TypeError, pad, arr, np.array(complex(1, -1))) + with testing.raises(TypeError): + pad(arr, complex(1, -1)) + with testing.raises(TypeError): + pad(arr, np.array(complex(1, -1))) def test_check_wrong_pad_amount(self): arr = np.arange(30) arr = np.reshape(arr, (6, 5)) kwargs = dict(mode='mean', stat_length=(3, )) - assert_raises(TypeError, pad, arr, ((2, 3, 4), (3, 2)), - **kwargs) - - -if __name__ == "__main__": - np.testing.run_module_suite() + with testing.raises(TypeError): + pad(arr, ((2, 3, 4), (3, 2)), **kwargs) diff -Nru skimage-0.13.1/skimage/util/tests/test_dtype.py skimage-0.14.0/skimage/util/tests/test_dtype.py --- skimage-0.13.1/skimage/util/tests/test_dtype.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/util/tests/test_dtype.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,9 +1,14 @@ +import warnings + import numpy as np -from numpy.testing import assert_equal, assert_raises -from skimage import img_as_int, img_as_float, \ - img_as_uint, img_as_ubyte +import itertools +from skimage import (img_as_int, img_as_float, + img_as_uint, img_as_ubyte) from skimage.util.dtype import convert + from skimage._shared._warnings import expected_warnings +from skimage._shared import testing +from skimage._shared.testing import assert_equal, parametrize dtype_range = {np.uint8: (0, 255), @@ -14,62 +19,64 @@ np.float64: (-1.0, 1.0)} +img_funcs = (img_as_int, img_as_float, img_as_uint, img_as_ubyte) +dtypes_for_img_funcs = (np.int16, np.float64, np.uint16, np.ubyte) +img_funcs_and_types = zip(img_funcs, dtypes_for_img_funcs) + + def _verify_range(msg, x, vmin, vmax, dtype): assert_equal(x[0], vmin) assert_equal(x[-1], vmax) assert x.dtype == dtype -def test_range(): - for dtype in dtype_range: - imin, imax = dtype_range[dtype] - x = np.linspace(imin, imax, 10).astype(dtype) +@parametrize("dtype, f_and_dt", + itertools.product(dtype_range, img_funcs_and_types)) +def test_range(dtype, f_and_dt): + imin, imax = dtype_range[dtype] + x = np.linspace(imin, imax, 10).astype(dtype) + + f, dt = f_and_dt + + with expected_warnings(['precision loss|sign loss|\A\Z']): + y = f(x) - for (f, dt) in [(img_as_int, np.int16), - (img_as_float, np.float64), - (img_as_uint, np.uint16), - (img_as_ubyte, np.ubyte)]: + omin, omax = dtype_range[dt] - with expected_warnings(['precision loss|sign loss|\A\Z']): - y = f(x) + if imin == 0 or omin == 0: + omin = 0 + imin = 0 - omin, omax = dtype_range[dt] + _verify_range("From %s to %s" % (np.dtype(dtype), np.dtype(dt)), + y, omin, omax, np.dtype(dt)) - if imin == 0 or omin == 0: - omin = 0 - imin = 0 - yield (_verify_range, - "From %s to %s" % (np.dtype(dtype), np.dtype(dt)), - y, omin, omax, np.dtype(dt)) +# Add non-standard data types that are allowed by the `convert` function. +dtype_range_extra = dtype_range.copy() +dtype_range_extra.update({np.int32: (-2147483648, 2147483647), + np.uint32: (0, 4294967295)}) +dtype_pairs = [(np.uint8, np.uint32), + (np.int8, np.uint32), + (np.int8, np.int32), + (np.int32, np.int8), + (np.float64, np.float32), + (np.int32, np.float32)] -def test_range_extra_dtypes(): + +@parametrize("dtype_in, dt", dtype_pairs) +def test_range_extra_dtypes(dtype_in, dt): """Test code paths that are not skipped by `test_range`""" - # Add non-standard data types that are allowed by the `convert` function. - dtype_range_extra = dtype_range.copy() - dtype_range_extra.update({np.int32: (-2147483648, 2147483647), - np.uint32: (0, 4294967295)}) - - dtype_pairs = [(np.uint8, np.uint32), - (np.int8, np.uint32), - (np.int8, np.int32), - (np.int32, np.int8), - (np.float64, np.float32), - (np.int32, np.float32)] - - for dtype_in, dt in dtype_pairs: - imin, imax = dtype_range_extra[dtype_in] - x = np.linspace(imin, imax, 10).astype(dtype_in) - - with expected_warnings(['precision loss|sign loss|\A\Z']): - y = convert(x, dt) - - omin, omax = dtype_range_extra[dt] - yield (_verify_range, - "From %s to %s" % (np.dtype(dtype_in), np.dtype(dt)), - y, omin, omax, np.dtype(dt)) + imin, imax = dtype_range_extra[dtype_in] + x = np.linspace(imin, imax, 10).astype(dtype_in) + + with expected_warnings(['precision loss|sign loss|\A\Z']): + y = convert(x, dt) + + omin, omax = dtype_range_extra[dt] + _verify_range("From %s to %s" % (np.dtype(dtype_in), np.dtype(dt)), + y, omin, omax, np.dtype(dt)) def test_downcast(): @@ -82,9 +89,11 @@ def test_float_out_of_range(): too_high = np.array([2], dtype=np.float32) - assert_raises(ValueError, img_as_int, too_high) + with testing.raises(ValueError): + img_as_int(too_high) too_low = np.array([-2], dtype=np.float32) - assert_raises(ValueError, img_as_int, too_low) + with testing.raises(ValueError): + img_as_int(too_low) def test_copy(): @@ -102,13 +111,26 @@ img_[1, 1] = True img8[1, 1] = True for (func, dt) in [(img_as_int, np.int16), - (img_as_float, np.float64), - (img_as_uint, np.uint16), - (img_as_ubyte, np.ubyte)]: + (img_as_float, np.float64), + (img_as_uint, np.uint16), + (img_as_ubyte, np.ubyte)]: converted_ = func(img_) assert np.sum(converted_) == dtype_range[dt][1] converted8 = func(img8) assert np.sum(converted8) == dtype_range[dt][1] -if __name__ == '__main__': - np.testing.run_module_suite() + +def test_clobber(): + # The `img_as_*` functions should never modify input arrays. + for func_input_type in img_funcs: + for func_output_type in img_funcs: + img = np.random.rand(5, 5) + + with warnings.catch_warnings(): + # UserWarning for possible precision loss, expected + warnings.simplefilter('ignore', UserWarning) + img_in = func_input_type(img) + img_in_before = img_in.copy() + img_out = func_output_type(img_in) + + assert_equal(img_in, img_in_before) diff -Nru skimage-0.13.1/skimage/util/tests/test_invert.py skimage-0.14.0/skimage/util/tests/test_invert.py --- skimage-0.13.1/skimage/util/tests/test_invert.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/util/tests/test_invert.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,8 +1,10 @@ import numpy as np -from numpy.testing import assert_array_equal from skimage import dtype_limits +from skimage.util.dtype import dtype_range from skimage.util import invert +from skimage._shared.testing import assert_array_equal + def test_invert_bool(): dtype = 'bool' @@ -29,24 +31,47 @@ def test_invert_int8(): dtype = 'int8' image = np.zeros((3, 3), dtype=dtype) - upper_dtype_limit = dtype_limits(image, clip_negative=False)[1] - image[1, :] = upper_dtype_limit - expected = np.zeros((3, 3), dtype=dtype) + upper_dtype_limit - expected[1, :] = 0 + lower_dtype_limit, upper_dtype_limit = \ + dtype_limits(image, clip_negative=False) + image[1, :] = lower_dtype_limit + image[2, :] = upper_dtype_limit + expected = np.zeros((3, 3), dtype=dtype) + expected[2, :] = lower_dtype_limit + expected[1, :] = upper_dtype_limit + expected[0, :] = -1 result = invert(image) assert_array_equal(expected, result) -def test_invert_float64(): +def test_invert_float64_signed(): dtype = 'float64' image = np.zeros((3, 3), dtype=dtype) - upper_dtype_limit = dtype_limits(image, clip_negative=False)[1] - image[1, :] = upper_dtype_limit - expected = np.zeros((3, 3), dtype=dtype) + upper_dtype_limit - expected[1, :] = 0 + lower_dtype_limit, upper_dtype_limit = \ + dtype_limits(image, clip_negative=False) + image[1, :] = lower_dtype_limit + image[2, :] = upper_dtype_limit + expected = np.zeros((3, 3), dtype=dtype) + expected[2, :] = lower_dtype_limit + expected[1, :] = upper_dtype_limit + result = invert(image, signed_float=True) + assert_array_equal(expected, result) + + +def test_invert_float64_unsigned(): + dtype = 'float64' + image = np.zeros((3, 3), dtype=dtype) + lower_dtype_limit, upper_dtype_limit = \ + dtype_limits(image, clip_negative=True) + image[2, :] = upper_dtype_limit + expected = np.zeros((3, 3), dtype=dtype) + expected[0, :] = upper_dtype_limit + expected[1, :] = upper_dtype_limit result = invert(image) assert_array_equal(expected, result) -if __name__ == '__main__': - np.testing.run_module_suite() +def test_invert_roundtrip(): + for t, limits in dtype_range.items(): + image = np.array(limits, dtype=t) + expected = invert(invert(image)) + assert_array_equal(image, expected) diff -Nru skimage-0.13.1/skimage/util/tests/test_montage.py skimage-0.14.0/skimage/util/tests/test_montage.py --- skimage-0.13.1/skimage/util/tests/test_montage.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/util/tests/test_montage.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,14 +1,21 @@ -from nose.tools import assert_equal, raises -from numpy.testing import assert_array_equal +from skimage._shared import testing +from skimage._shared.testing import (assert_equal, assert_array_equal, + expected_warnings) import numpy as np -from skimage.util.montage import montage2d +from skimage.util import montage +from skimage.util import montage2d as montage2d_deprecated -def test_simple(): +def montage2d(*args, **kwargs): + with expected_warnings(['deprecated']): + return montage2d_deprecated(*args, **kwargs) + + +def test_montage2d_simple(): n_images = 3 height, width = 2, 3, - arr_in = np.arange(n_images * height * width) + arr_in = np.arange(n_images * height * width, dtype='float') arr_in = arr_in.reshape(n_images, height, width) arr_out = montage2d(arr_in) @@ -23,7 +30,7 @@ assert_array_equal(arr_out, gt) -def test_fill(): +def test_montage2d_fill(): n_images = 3 height, width = 2, 3, arr_in = np.arange(n_images * height * width) @@ -41,7 +48,7 @@ assert_array_equal(arr_out, gt) -def test_shape(): +def test_montage2d_shape(): n_images = 15 height, width = 11, 7 arr_in = np.arange(n_images * height * width) @@ -51,14 +58,14 @@ arr_out = montage2d(arr_in) assert_equal(arr_out.shape, (alpha * height, alpha * width)) - - -def test_grid_shape(): + + +def test_montage2d_grid_shape(): n_images = 6 height, width = 2, 2 arr_in = np.arange(n_images * height * width, dtype=np.float32) arr_in = arr_in.reshape(n_images, height, width) - arr_out = montage2d(arr_in, grid_shape=(3,2)) + arr_out = montage2d(arr_in, grid_shape=(3, 2)) correct_arr_out = np.array( [[ 0., 1., 4., 5.], [ 2., 3., 6., 7.], @@ -70,7 +77,7 @@ assert_array_equal(arr_out, correct_arr_out) -def test_rescale_intensity(): +def test_montage2d_rescale_intensity(): n_images = 4 height, width = 3, 3 arr_in = np.arange(n_images * height * width, dtype=np.float32) @@ -92,11 +99,166 @@ assert_array_equal(arr_out, gt) -@raises(AssertionError) -def test_error_ndim(): +def test_montage2d_simple_padding(): + n_images = 2 + height, width = 2, 2, + arr_in = np.arange(n_images * height * width) + arr_in = arr_in.reshape(n_images, height, width) + + arr_out = montage2d(arr_in, padding_width=1) + + gt = np.array( + [[0, 1, 0, 4, 5, 0], + [2, 3, 0, 6, 7, 0], + [0, 0, 0, 0, 0, 0], + [3, 3, 3, 3, 3, 3], + [3, 3, 3, 3, 3, 3], + [3, 3, 3, 3, 3, 3]] + ) + + assert_array_equal(arr_out, gt) + + +def test_montage2d_error_ndim(): arr_error = np.random.randn(1, 2, 3, 4) - montage2d(arr_error) + with testing.raises(AssertionError): + montage2d(arr_error) + + +def test_montage_simple_gray(): + n_images, n_rows, n_cols = 3, 2, 3 + arr_in = np.arange(n_images * n_rows * n_cols, dtype=np.float) + arr_in = arr_in.reshape(n_images, n_rows, n_cols) + + arr_out = montage(arr_in) + arr_ref = np.array( + [[ 0. , 1. , 2. , 6. , 7. , 8. ], + [ 3. , 4. , 5. , 9. , 10. , 11. ], + [ 12. , 13. , 14. , 8.5, 8.5, 8.5], + [ 15. , 16. , 17. , 8.5, 8.5, 8.5]] + ) + assert_array_equal(arr_out, arr_ref) + + +def test_montage_simple_rgb(): + n_images, n_rows, n_cols, n_channels = 2, 2, 2, 2 + arr_in = np.arange(n_images * n_rows * n_cols * n_channels, dtype=np.float) + arr_in = arr_in.reshape(n_images, n_rows, n_cols, n_channels) + + arr_out = montage(arr_in, multichannel=True) + arr_ref = np.array( + [[[ 0, 1], + [ 2, 3], + [ 8, 9], + [10, 11]], + [[ 4, 5], + [ 6, 7], + [12, 13], + [14, 15]], + [[ 7, 8], + [ 7, 8], + [ 7, 8], + [ 7, 8]], + [[ 7, 8], + [ 7, 8], + [ 7, 8], + [ 7, 8]]] + ) + assert_array_equal(arr_out, arr_ref) + + +def test_montage_fill_gray(): + n_images, n_rows, n_cols = 3, 2, 3 + arr_in = np.arange(n_images*n_rows*n_cols, dtype=np.float) + arr_in = arr_in.reshape(n_images, n_rows, n_cols) + + arr_out = montage(arr_in, fill=0) + arr_ref = np.array( + [[ 0. , 1. , 2. , 6. , 7. , 8. ], + [ 3. , 4. , 5. , 9. , 10. , 11. ], + [ 12. , 13. , 14. , 0. , 0. , 0. ], + [ 15. , 16. , 17. , 0. , 0. , 0. ]] + ) + assert_array_equal(arr_out, arr_ref) + + +def test_montage_grid_default_gray(): + n_images, n_rows, n_cols = 15, 11, 7 + arr_in = np.arange(n_images * n_rows * n_cols, dtype=np.float) + arr_in = arr_in.reshape(n_images, n_rows, n_cols) + + n_tiles = int(np.ceil(np.sqrt(n_images))) + arr_out = montage(arr_in) + assert_equal(arr_out.shape, (n_tiles * n_rows, n_tiles * n_cols)) + +def test_montage_grid_custom_gray(): + n_images, n_rows, n_cols = 6, 2, 2 + arr_in = np.arange(n_images * n_rows * n_cols, dtype=np.float32) + arr_in = arr_in.reshape(n_images, n_rows, n_cols) + + arr_out = montage(arr_in, grid_shape=(3, 2)) + arr_ref = np.array( + [[ 0., 1., 4., 5.], + [ 2., 3., 6., 7.], + [ 8., 9., 12., 13.], + [ 10., 11., 14., 15.], + [ 16., 17., 20., 21.], + [ 18., 19., 22., 23.]] + ) + assert_array_equal(arr_out, arr_ref) + + +def test_montage_rescale_intensity_gray(): + n_images, n_rows, n_cols = 4, 3, 3 + arr_in = np.arange(n_images * n_rows * n_cols, dtype=np.float32) + arr_in = arr_in.reshape(n_images, n_rows, n_cols) + + arr_out = montage(arr_in, rescale_intensity=True) + arr_ref = np.array( + [[ 0. , 0.125, 0.25 , 0. , 0.125, 0.25 ], + [ 0.375, 0.5 , 0.625, 0.375, 0.5 , 0.625], + [ 0.75 , 0.875, 1. , 0.75 , 0.875, 1. ], + [ 0. , 0.125, 0.25 , 0. , 0.125, 0.25 ], + [ 0.375, 0.5 , 0.625, 0.375, 0.5 , 0.625], + [ 0.75 , 0.875, 1. , 0.75 , 0.875, 1. ]] + ) + assert_equal(arr_out.min(), 0.0) + assert_equal(arr_out.max(), 1.0) + assert_array_equal(arr_out, arr_ref) + + +def test_montage_simple_padding_gray(): + n_images, n_rows, n_cols = 2, 2, 2 + arr_in = np.arange(n_images * n_rows * n_cols) + arr_in = arr_in.reshape(n_images, n_rows, n_cols) + + arr_out = montage(arr_in, padding_width=1) + arr_ref = np.array( + [[3, 3, 3, 3, 3, 3, 3], + [3, 0, 1, 3, 4, 5, 3], + [3, 2, 3, 3, 6, 7, 3], + [3, 3, 3, 3, 3, 3, 3], + [3, 3, 3, 3, 3, 3, 3], + [3, 3, 3, 3, 3, 3, 3], + [3, 3, 3, 3, 3, 3, 3]] + ) + assert_array_equal(arr_out, arr_ref) + + +def test_error_ndim(): + arr_error = np.random.randn(1, 2) + with testing.raises(ValueError): + montage(arr_error) + + arr_error = np.random.randn(1, 2, 3, 4) + with testing.raises(ValueError): + montage(arr_error) -if __name__ == '__main__': - np.testing.run_module_suite() + arr_error = np.random.randn(1, 2, 3) + with testing.raises(ValueError): + montage(arr_error, multichannel=True) + + arr_error = np.random.randn(1, 2, 3, 4, 5) + with testing.raises(ValueError): + montage(arr_error, multichannel=True) diff -Nru skimage-0.13.1/skimage/util/tests/test_random_noise.py skimage-0.14.0/skimage/util/tests/test_random_noise.py --- skimage-0.13.1/skimage/util/tests/test_random_noise.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/util/tests/test_random_noise.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,4 +1,5 @@ -from numpy.testing import assert_array_equal, assert_allclose, assert_raises +from skimage._shared import testing +from skimage._shared.testing import assert_array_equal, assert_allclose import numpy as np from skimage.data import camera @@ -113,12 +114,14 @@ # Ensure local variance bounds checking works properly bad_local_vars = np.zeros_like(data) - assert_raises(ValueError, random_noise, data, mode='localvar', seed=seed, - local_vars=bad_local_vars) + with testing.raises(ValueError): + random_noise(data, mode='localvar', seed=seed, + local_vars=bad_local_vars) bad_local_vars += 0.1 bad_local_vars[0, 0] = -1 - assert_raises(ValueError, random_noise, data, mode='localvar', seed=seed, - local_vars=bad_local_vars) + with testing.raises(ValueError): + random_noise(data, mode='localvar', seed=seed, + local_vars=bad_local_vars) def test_speckle(): @@ -207,8 +210,5 @@ def test_bad_mode(): data = np.zeros((64, 64)) - assert_raises(KeyError, random_noise, data, 'perlin') - - -if __name__ == '__main__': - np.testing.run_module_suite() + with testing.raises(KeyError): + random_noise(data, 'perlin') diff -Nru skimage-0.13.1/skimage/util/tests/test_regular_grid.py skimage-0.14.0/skimage/util/tests/test_regular_grid.py --- skimage-0.13.1/skimage/util/tests/test_regular_grid.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/util/tests/test_regular_grid.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,6 +1,6 @@ import numpy as np -from numpy.testing import assert_equal from skimage.util import regular_grid +from skimage._shared.testing import assert_equal def test_regular_grid_full(): @@ -34,7 +34,3 @@ slice(5.0, None, 10.0)]) ar[g] = 1 assert_equal(ar.sum(), 8) - - -if __name__ == '__main__': - np.testing.run_module_suite() diff -Nru skimage-0.13.1/skimage/util/tests/test_shape.py skimage-0.14.0/skimage/util/tests/test_shape.py --- skimage-0.13.1/skimage/util/tests/test_shape.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/util/tests/test_shape.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,39 +1,38 @@ import numpy as np -from nose.tools import raises -from numpy.testing import assert_equal, assert_warns +from skimage._shared import testing +from skimage._shared.testing import assert_equal, assert_warns from skimage.util.shape import view_as_blocks, view_as_windows -from skimage._shared._warnings import expected_warnings -@raises(TypeError) def test_view_as_blocks_block_not_a_tuple(): A = np.arange(10) - view_as_blocks(A, [5]) + with testing.raises(TypeError): + view_as_blocks(A, [5]) -@raises(ValueError) def test_view_as_blocks_negative_shape(): A = np.arange(10) - view_as_blocks(A, (-2,)) + with testing.raises(ValueError): + view_as_blocks(A, (-2,)) -@raises(ValueError) def test_view_as_blocks_block_too_large(): A = np.arange(10) - view_as_blocks(A, (11,)) + with testing.raises(ValueError): + view_as_blocks(A, (11,)) -@raises(ValueError) def test_view_as_blocks_wrong_block_dimension(): A = np.arange(10) - view_as_blocks(A, (2, 2)) + with testing.raises(ValueError): + view_as_blocks(A, (2, 2)) -@raises(ValueError) def test_view_as_blocks_1D_array_wrong_block_shape(): A = np.arange(10) - view_as_blocks(A, (3,)) + with testing.raises(ValueError): + view_as_blocks(A, (3,)) def test_view_as_blocks_1D_array(): @@ -61,34 +60,34 @@ [82, 83]]]])) -@raises(TypeError) def test_view_as_windows_input_not_array(): A = [1, 2, 3, 4, 5] - view_as_windows(A, (2,)) + with testing.raises(TypeError): + view_as_windows(A, (2,)) -@raises(ValueError) def test_view_as_windows_wrong_window_dimension(): A = np.arange(10) - view_as_windows(A, (2, 2)) + with testing.raises(ValueError): + view_as_windows(A, (2, 2)) -@raises(ValueError) def test_view_as_windows_negative_window_length(): A = np.arange(10) - view_as_windows(A, (-1,)) + with testing.raises(ValueError): + view_as_windows(A, (-1,)) -@raises(ValueError) def test_view_as_windows_window_too_large(): A = np.arange(10) - view_as_windows(A, (11,)) + with testing.raises(ValueError): + view_as_windows(A, (11,)) -@raises(ValueError) def test_view_as_windows_step_below_one(): A = np.arange(10) - view_as_windows(A, (11,), step=0.9) + with testing.raises(ValueError): + view_as_windows(A, (11,), step=0.9) def test_view_as_windows_1D(): @@ -169,12 +168,8 @@ [6, 7], [10, 11]]], [[[12, 13], - [16, 17], - [20, 21]], + [16, 17], + [20, 21]], [[14, 15], - [18, 19], - [22, 23]]]]) - - -if __name__ == '__main__': - np.testing.run_module_suite() + [18, 19], + [22, 23]]]]) diff -Nru skimage-0.13.1/skimage/util/tests/test_unique_rows.py skimage-0.14.0/skimage/util/tests/test_unique_rows.py --- skimage-0.13.1/skimage/util/tests/test_unique_rows.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/util/tests/test_unique_rows.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,6 +1,7 @@ import numpy as np -from numpy.testing import assert_equal, assert_raises from skimage.util import unique_rows +from skimage._shared import testing +from skimage._shared.testing import assert_equal def test_discontiguous_array(): @@ -28,13 +29,11 @@ def test_1d_array(): ar = np.array([1, 0, 1, 1], np.uint8) - assert_raises(ValueError, unique_rows, ar) + with testing.raises(ValueError): + unique_rows(ar) def test_3d_array(): ar = np.arange(8).reshape((2, 2, 2)) - assert_raises(ValueError, unique_rows, ar) - - -if __name__ == '__main__': - np.testing.run_module_suite() + with testing.raises(ValueError): + unique_rows(ar) diff -Nru skimage-0.13.1/skimage/viewer/plugins/canny.py skimage-0.14.0/skimage/viewer/plugins/canny.py --- skimage-0.13.1/skimage/viewer/plugins/canny.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/viewer/plugins/canny.py 2018-05-29 01:27:44.000000000 +0000 @@ -17,7 +17,7 @@ def attach(self, image_viewer): image = image_viewer.image imin, imax = skimage.dtype_limits(image, clip_negative=False) - itype = 'float' if np.issubdtype(image.dtype, float) else 'int' + itype = 'float' if np.issubdtype(image.dtype, np.floating) else 'int' self.add_widget(Slider('sigma', 0, 5, update_on='release')) self.add_widget(Slider('low threshold', imin, imax, value_type=itype, update_on='release')) diff -Nru skimage-0.13.1/skimage/viewer/tests/test_plugins.py skimage-0.14.0/skimage/viewer/tests/test_plugins.py --- skimage-0.13.1/skimage/viewer/tests/test_plugins.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/viewer/tests/test_plugins.py 2018-05-29 01:27:44.000000000 +0000 @@ -10,8 +10,10 @@ from skimage.viewer.plugins import ( LineProfile, Measure, CannyPlugin, LabelPainter, Crop, ColorHistogram, PlotPlugin) -from numpy.testing import assert_equal, assert_allclose, assert_almost_equal -from numpy.testing.decorators import skipif + +from skimage._shared import testing +from skimage._shared.testing import (assert_equal, assert_allclose, + assert_almost_equal) from skimage._shared._warnings import expected_warnings @@ -22,7 +24,7 @@ return plugin -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_line_profile(): """ Test a line profile using an ndim=2 image""" plugin = setup_line_profile(data.camera()) @@ -36,7 +38,7 @@ assert_allclose(scan_data.mean(), 0.2812, rtol=1e-3) -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_line_profile_rgb(): """ Test a line profile using an ndim=3 image""" plugin = setup_line_profile(data.chelsea(), limits=None) @@ -51,7 +53,7 @@ assert_allclose(scan_data.mean(), 0.4359, rtol=1e-3) -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_line_profile_dynamic(): """Test a line profile updating after an image transform""" image = data.coins()[:-50, :] # shave some off to make the line lower @@ -77,7 +79,7 @@ assert_almost_equal(np.max(line) - np.min(line), 0.639, 1) -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_measure(): image = data.camera() viewer = ImageViewer(image) @@ -89,7 +91,7 @@ assert_equal(str(m._angle.text[:5]), '135.0') -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_canny(): image = data.camera() viewer = ImageViewer(image) @@ -102,7 +104,7 @@ assert edges.sum() == 2852 -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_label_painter(): image = data.camera() moon = data.moon() @@ -120,7 +122,7 @@ assert_equal(lp.paint_tool.shape, moon.shape) -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_crop(): image = data.camera() viewer = ImageViewer(image) @@ -131,7 +133,7 @@ assert_equal(viewer.image.shape, (101, 101)) -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_color_histogram(): image = skimage.img_as_float(data.load('color.png')) viewer = ImageViewer(image) @@ -143,7 +145,7 @@ assert_almost_equal(viewer.image.std(), 0.325, 3) -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_plot_plugin(): viewer = ImageViewer(data.moon()) plugin = PlotPlugin(image_filter=lambda x: x) @@ -155,7 +157,7 @@ viewer.close() -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_plugin(): img = skimage.img_as_float(data.moon()) viewer = ImageViewer(img) diff -Nru skimage-0.13.1/skimage/viewer/tests/test_tools.py skimage-0.14.0/skimage/viewer/tests/test_tools.py --- skimage-0.13.1/skimage/viewer/tests/test_tools.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/viewer/tests/test_tools.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,13 +1,15 @@ from collections import namedtuple import numpy as np -from numpy.testing import assert_equal -from numpy.testing.decorators import skipif from skimage import data from skimage.viewer import ImageViewer, has_qt from skimage.viewer.canvastools import ( LineTool, ThickLineTool, RectangleTool, PaintTool) from skimage.viewer.canvastools.base import CanvasToolBase + +from skimage._shared import testing +from skimage._shared.testing import assert_equal + try: from matplotlib.testing.decorators import cleanup except ImportError: @@ -78,7 +80,7 @@ @cleanup -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_line_tool(): img = data.camera() viewer = ImageViewer(img) @@ -104,7 +106,7 @@ @cleanup -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_thick_line_tool(): img = data.camera() viewer = ImageViewer(img) @@ -128,7 +130,7 @@ @cleanup -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_rect_tool(): img = data.camera() viewer = ImageViewer(img) @@ -147,7 +149,7 @@ do_event(viewer, 'mouse_press', xdata=100, ydata=100) do_event(viewer, 'move', xdata=120, ydata=120) do_event(viewer, 'mouse_release') - #assert_equal(tool.geometry, [120, 150, 120, 150]) + # assert_equal(tool.geometry, [120, 150, 120, 150]) # create a new line do_event(viewer, 'mouse_press', xdata=10, ydata=10) @@ -157,7 +159,7 @@ @cleanup -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_paint_tool(): img = data.moon() viewer = ImageViewer(img) @@ -191,7 +193,7 @@ @cleanup -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_base_tool(): img = data.moon() viewer = ImageViewer(img) diff -Nru skimage-0.13.1/skimage/viewer/tests/test_utils.py skimage-0.14.0/skimage/viewer/tests/test_utils.py --- skimage-0.13.1/skimage/viewer/tests/test_utils.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/viewer/tests/test_utils.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,19 +1,19 @@ # -*- coding: utf-8 -*- from skimage.viewer import utils from skimage.viewer.utils import dialogs -from skimage.viewer.qt import QtCore, QtGui, has_qt -from numpy.testing.decorators import skipif +from skimage.viewer.qt import QtCore, QtWidgets, has_qt +from skimage._shared import testing -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_event_loop(): utils.init_qtapp() timer = QtCore.QTimer() - timer.singleShot(10, QtGui.QApplication.quit) + timer.singleShot(10, QtWidgets.QApplication.quit) utils.start_qtapp() -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_format_filename(): fname = dialogs._format_filename(('apple', 2)) assert fname == 'apple' @@ -21,19 +21,21 @@ assert fname is None -@skipif(not has_qt) +@testing.skipif(True, reason="Can't automatically close window. See #3081.") +@testing.skipif(not has_qt, reason="Qt not installed") def test_open_file_dialog(): - utils.init_qtapp() + QApp = utils.init_qtapp() timer = QtCore.QTimer() - timer.singleShot(100, lambda: QtGui.QApplication.quit()) + timer.singleShot(100, lambda: QApp.quit()) filename = dialogs.open_file_dialog() assert filename is None -@skipif(not has_qt) +@testing.skipif(True, reason="Can't automatically close window. See #3081.") +@testing.skipif(not has_qt, reason="Qt not installed") def test_save_file_dialog(): - utils.init_qtapp() + QApp = utils.init_qtapp() timer = QtCore.QTimer() - timer.singleShot(100, lambda: QtGui.QApplication.quit()) + timer.singleShot(100, lambda: QApp.quit()) filename = dialogs.save_file_dialog() assert filename is None diff -Nru skimage-0.13.1/skimage/viewer/tests/test_viewer.py skimage-0.14.0/skimage/viewer/tests/test_viewer.py --- skimage-0.13.1/skimage/viewer/tests/test_viewer.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/viewer/tests/test_viewer.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,19 +1,18 @@ - from skimage import data +from skimage.transform import pyramid_gaussian +from skimage.filters import sobel from skimage.viewer.qt import QtGui, QtCore, has_qt from skimage.viewer import ImageViewer, CollectionViewer from skimage.viewer.plugins import OverlayPlugin -from skimage.transform import pyramid_gaussian -from skimage.filters import sobel -from numpy.testing import assert_equal -from numpy.testing.decorators import skipif from skimage._shared.version_requirements import is_installed +from skimage._shared import testing +from skimage._shared.testing import assert_equal from skimage._shared._warnings import expected_warnings -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_viewer(): astro = data.astronaut() coins = data.coins() @@ -40,11 +39,11 @@ QtCore.Qt.NoModifier) -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_collection_viewer(): img = data.astronaut() - img_collection = tuple(pyramid_gaussian(img)) + img_collection = tuple(pyramid_gaussian(img, multichannel=True)) view = CollectionViewer(img_collection) make_key_event(48) @@ -56,8 +55,9 @@ view._format_coord(10, 10) -@skipif(not has_qt) -@skipif(not is_installed('matplotlib', '>=1.2')) +@testing.skipif(not has_qt, reason="Qt not installed") +@testing.skipif(not is_installed('matplotlib', '>=1.2'), + reason="matplotlib < 1.2") def test_viewer_with_overlay(): img = data.coins() ov = OverlayPlugin(image_filter=sobel) diff -Nru skimage-0.13.1/skimage/viewer/tests/test_widgets.py skimage-0.14.0/skimage/viewer/tests/test_widgets.py --- skimage-0.13.1/skimage/viewer/tests/test_widgets.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/viewer/tests/test_widgets.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,16 +1,16 @@ - import os + from skimage import data, img_as_float, io, img_as_uint from skimage.viewer import ImageViewer -from skimage.viewer.qt import QtGui, QtCore, has_qt +from skimage.viewer.qt import QtWidgets, QtCore, has_qt from skimage.viewer.widgets import ( Slider, OKCancelButtons, SaveButtons, ComboBox, CheckBox, Text) from skimage.viewer.plugins.base import Plugin -from numpy.testing import assert_almost_equal, assert_equal -from numpy.testing.decorators import skipif from skimage._shared._warnings import expected_warnings +from skimage._shared import testing +from skimage._shared.testing import assert_almost_equal, assert_equal def get_image_viewer(): @@ -20,7 +20,7 @@ return viewer -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_check_box(): viewer = get_image_viewer() cb = CheckBox('hello', value=True, alignment='left') @@ -35,7 +35,7 @@ assert_equal(cb.val, False) -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_combo_box(): viewer = get_image_viewer() cb = ComboBox('hello', ('a', 'b', 'c')) @@ -48,7 +48,7 @@ assert_equal(cb.index, 2) -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_text_widget(): viewer = get_image_viewer() txt = Text('hello', 'hello, world!') @@ -59,7 +59,7 @@ assert_equal(str(txt.text), 'goodbye, world!') -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_slider_int(): viewer = get_image_viewer() sld = Slider('radius', 2, 10, value_type='int') @@ -73,7 +73,7 @@ assert_equal(sld.val, 5) -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_slider_float(): viewer = get_image_viewer() sld = Slider('alpha', 2.1, 3.1, value=2.1, value_type='float', @@ -88,7 +88,8 @@ assert_almost_equal(sld.val, 2.5, 2) -@skipif(not has_qt) +@testing.skipif(True, reason="Can't automatically close window. See #3081.") +@testing.skipif(not has_qt, reason="Qt not installed") def test_save_buttons(): viewer = get_image_viewer() sv = SaveButtons() @@ -99,7 +100,7 @@ os.close(fid) timer = QtCore.QTimer() - timer.singleShot(100, QtGui.QApplication.quit) + timer.singleShot(100, QtWidgets.QApplication.quit) # exercise the button clicks sv.save_stack.click() @@ -121,7 +122,7 @@ os.remove(filename) -@skipif(not has_qt) +@testing.skipif(not has_qt, reason="Qt not installed") def test_ok_buttons(): viewer = get_image_viewer() ok = OKCancelButtons() @@ -129,4 +130,3 @@ ok.update_original_image(), ok.close_plugin() - diff -Nru skimage-0.13.1/skimage/viewer/utils/dialogs.py skimage-0.14.0/skimage/viewer/utils/dialogs.py --- skimage-0.13.1/skimage/viewer/utils/dialogs.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/viewer/utils/dialogs.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,6 +1,6 @@ import os -from ..qt import QtGui +from ..qt import QtWidgets __all__ = ['open_file_dialog', 'save_file_dialog'] @@ -17,18 +17,18 @@ def open_file_dialog(): """Return user-selected file path.""" - filename = QtGui.QFileDialog.getOpenFileName() + filename = QtWidgets.QFileDialog.getOpenFileName() filename = _format_filename(filename) return filename def save_file_dialog(default_format='png'): """Return user-selected file path.""" - filename = QtGui.QFileDialog.getSaveFileName() + filename = QtWidgets.QFileDialog.getSaveFileName() filename = _format_filename(filename) if filename is None: return None - #TODO: io plugins should assign default image formats + # TODO: io plugins should assign default image formats basename, ext = os.path.splitext(filename) if not ext: filename = '%s.%s' % (filename, default_format) diff -Nru skimage-0.13.1/skimage/viewer/viewers/core.py skimage-0.14.0/skimage/viewer/viewers/core.py --- skimage-0.13.1/skimage/viewer/viewers/core.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/viewer/viewers/core.py 2018-05-29 01:27:44.000000000 +0000 @@ -6,7 +6,7 @@ from ... import io, img_as_float from ...util.dtype import dtype_range from ...exposure import rescale_intensity -from ..qt import QtWidgets, Qt, Signal +from ..qt import QtWidgets, QtGui, Qt, Signal from ..widgets import Slider from ..utils import (dialogs, init_qtapp, figimage, start_qtapp, update_axes_image) @@ -93,7 +93,7 @@ init_qtapp() super(ImageViewer, self).__init__() - #TODO: Add ImageViewer to skimage.io window manager + # TODO: Add ImageViewer to skimage.io window manager self.setAttribute(Qt.WA_DeleteOnClose) self.setWindowTitle("Image Viewer") @@ -361,7 +361,7 @@ self.slider = Slider('frame', **slider_kws) self.layout.addWidget(self.slider) - #TODO: Adjust height to accomodate slider; the following doesn't work + # TODO: Adjust height to accomodate slider; the following doesn't work # s_size = self.slider.sizeHint() # cs_size = self.canvas.sizeHint() # self.resize(cs_size.width(), cs_size.height() + s_size.height()) @@ -382,7 +382,7 @@ self.update_image(self.image_collection[index]) def keyPressEvent(self, event): - if type(event) == QtWidgets.QKeyEvent: + if type(event) == QtGui.QKeyEvent: key = event.key() # Number keys (code: 0 = key 48, 9 = key 57) move to deciles if 48 <= key < 58: diff -Nru skimage-0.13.1/skimage/viewer/widgets/core.py skimage-0.14.0/skimage/viewer/widgets/core.py --- skimage-0.13.1/skimage/viewer/widgets/core.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/viewer/widgets/core.py 2018-05-29 01:27:44.000000000 +0000 @@ -5,7 +5,6 @@ __all__ = ['BaseWidget', 'Slider', 'ComboBox', 'CheckBox', 'Text', 'Button'] - class BaseWidget(QtWidgets.QWidget): plugin = RequiredAttr("Widget is not attached to a Plugin.") @@ -75,6 +74,7 @@ update_on : {'release' | 'move'}, optional Control when callback function is called: on slider move or release. """ + def __init__(self, name, low=0.0, high=1.0, value=None, value_type='float', ptype='kwarg', callback=None, max_edit_width=60, orientation='horizontal', update_on='release'): @@ -84,7 +84,7 @@ value = (high - low) / 2. # Set widget orientation - #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if orientation == 'vertical': self.slider = QtWidgets.QSlider(Qt.Vertical) alignment = QtCore.Qt.AlignHCenter @@ -100,10 +100,10 @@ else: msg = "Unexpected value %s for 'orientation'" raise ValueError(msg % orientation) - #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Set slider behavior for float and int values. - #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if value_type == 'float': # divide slider into 1000 discrete values slider_max = 1000 @@ -116,7 +116,7 @@ else: msg = "Expected `value_type` to be 'float' or 'int'; received: %s" raise ValueError(msg % value_type) - #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ self.value_type = value_type self._low = low @@ -299,10 +299,11 @@ callback : callable f() Function to call when button is clicked. """ + def __init__(self, name, callback): super(Button, self).__init__(self) - self._button = QtGui.QPushButton(name) + self._button = QtWidgets.QPushButton(name) self._button.clicked.connect(callback) - self.layout = QtGui.QHBoxLayout(self) + self.layout = QtWidgets.QHBoxLayout(self) self.layout.addWidget(self._button) diff -Nru skimage-0.13.1/skimage/viewer/widgets/history.py skimage-0.14.0/skimage/viewer/widgets/history.py --- skimage-0.13.1/skimage/viewer/widgets/history.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/skimage/viewer/widgets/history.py 2018-05-29 01:27:44.000000000 +0000 @@ -1,6 +1,6 @@ from textwrap import dedent -from ..qt import QtGui, QtCore +from ..qt import QtGui, QtCore, QtWidgets import numpy as np import skimage @@ -18,20 +18,21 @@ OK will replace the original image with the current (filtered) image. Cancel will just close the plugin. """ + def __init__(self, button_width=80): name = 'OK/Cancel' super(OKCancelButtons, self).__init__(name) - self.ok = QtGui.QPushButton('OK') + self.ok = QtWidgets.QPushButton('OK') self.ok.clicked.connect(self.update_original_image) self.ok.setMaximumWidth(button_width) self.ok.setFocusPolicy(QtCore.Qt.NoFocus) - self.cancel = QtGui.QPushButton('Cancel') + self.cancel = QtWidgets.QPushButton('Cancel') self.cancel.clicked.connect(self.close_plugin) self.cancel.setMaximumWidth(button_width) self.cancel.setFocusPolicy(QtCore.Qt.NoFocus) - self.layout = QtGui.QHBoxLayout(self) + self.layout = QtWidgets.QHBoxLayout(self) self.layout.addStretch() self.layout.addWidget(self.cancel) self.layout.addWidget(self.ok) @@ -54,17 +55,17 @@ self.default_format = default_format - self.name_label = QtGui.QLabel() + self.name_label = QtWidgets.QLabel() self.name_label.setText(name) - self.save_file = QtGui.QPushButton('File') + self.save_file = QtWidgets.QPushButton('File') self.save_file.clicked.connect(self.save_to_file) self.save_file.setFocusPolicy(QtCore.Qt.NoFocus) - self.save_stack = QtGui.QPushButton('Stack') + self.save_stack = QtWidgets.QPushButton('Stack') self.save_stack.clicked.connect(self.save_to_stack) self.save_stack.setFocusPolicy(QtCore.Qt.NoFocus) - self.layout = QtGui.QHBoxLayout(self) + self.layout = QtWidgets.QHBoxLayout(self) self.layout.addWidget(self.name_label) self.layout.addWidget(self.save_stack) self.layout.addWidget(self.save_file) @@ -86,18 +87,18 @@ return image = self.plugin.filtered_image if image.dtype == np.bool: - #TODO: This check/conversion should probably be in `imsave`. + # TODO: This check/conversion should probably be in `imsave`. image = img_as_ubyte(image) io.imsave(filename, image) def notify(msg): - msglabel = QtGui.QLabel(msg) - dialog = QtGui.QDialog() - ok = QtGui.QPushButton('OK', dialog) + msglabel = QtWidgets.QLabel(msg) + dialog = QtWidgets.QDialog() + ok = QtWidgets.QPushButton('OK', dialog) ok.clicked.connect(dialog.accept) ok.setDefault(True) - dialog.layout = QtGui.QGridLayout(dialog) + dialog.layout = QtWidgets.QGridLayout(dialog) dialog.layout.addWidget(msglabel, 0, 0, 1, 3) dialog.layout.addWidget(ok, 1, 1) dialog.exec_() diff -Nru skimage-0.13.1/TASKS.txt skimage-0.14.0/TASKS.txt --- skimage-0.13.1/TASKS.txt 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/TASKS.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ - .. role:: strike - - -.. _howto_contribute: - -How to contribute to ``skimage`` -====================================== - -.. toctree:: - :hidden: - - gitwash/index - gsoc2011 - cell_profiler - - -Developing Open Source is great fun! Join us on the `scikit-image mailing -list `_ and tell us -which of the following challenges you'd like to solve. - -* Mentoring is available for those new to scientific programming in Python. -* If you're looking for something to implement, you can find a list of `requested features on GitHub `__. In addition, you can browse the `open issues on GitHub `__. -* The technical detail of the `development process`_ is summed up below. - Refer to the :doc:`gitwash ` for a step-by-step tutorial. - -.. contents:: - :local: - diff -Nru skimage-0.13.1/TODO.txt skimage-0.14.0/TODO.txt --- skimage-0.13.1/TODO.txt 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/TODO.txt 2018-05-29 01:27:44.000000000 +0000 @@ -1,29 +1,10 @@ Remember to list any API changes below in `doc/source/api_changes.txt`. -Version 0.14 ------------- -* Remove deprecated ``ntiles_*` kwargs in ``equalize_adapthist``. -* Remove deprecated ``skimage.restoration.nl_means_denoising``. -* Remove deprecated ``skimage.filters.gaussian_filter``. -* Remove deprecated ``skimage.filters.gabor_filter``. -* Remove deprecated ``skimage.measure.LineModel`` and - add an alias LineModel = LineModelND. While the deprecated LineModel has for - parameters `(dist, theta)`, LineModelND has the more general parameters - `(origin, direction)`. -* Remove deprecated old syntax support for ``skimage.transform.integrate``. -* Remove deprecated ``skimage.measure.structural_similarity`` alias and - deprecation warning test for this alias. -* Remove deprecated ``sigma_range`` kwargs in ``skimage.restoration.denoise_bilateral`` - and corresponding tests. -* Remove deprecation error on the usage of ``ntiles_x``, ``ntiles_y`` in - ``skimage.exposure.equalize_adapthist`` and the corresponding test. -* Remove the freeimage plugin shim to imageio. -* Remove deprecated `normalise` and corresponding test for ``skimage.feature.hog``. -* Remove deprecated ``marching_cubes`` from ``skimage.measure``. - Version 0.15 ------------ +* Finalize ``skimage.future.graph`` API. +* Finalize ``skimage.future.manual_segmentation`` API. * In ``skimage.util.dtype_limits``, set default behavior of `clip_negative` to `False`. * In ``skimage.transform.radon``, set default behavior of `circle` to `True`. * In ``skimage.transform.iradon``, set default behavior of `circle` to `True`. @@ -40,3 +21,37 @@ ``'reflect'``. * In ``skimage.transform.rescale``, set default value of ``mode`` to ``'reflect'``. +* Remove deprecated ``skimage.util.montage2d`` and corresponding tests. +* In ``skimage.transform.resize`` change default argument from + ``anti_aliasing=None`` to ``anti_aliasing=True``. +* In ``skimage/restoration/tests/test_denoise.py``, there is an optional + warning that needs to be made mandatory when we move on from python 2.7 + +Version 0.16 +------------ +* In ``skimage.transform.resize``, ``skimage.transform.pyramid_reduce``, + ``skimage.transform.pyramid_laplacian``, + ``skimage.transform.pyramid_gaussian``, + ``skimage.transform.pyramid_expandset``, set default value of + ``multichannel`` to False +* Remove ``_multichannel_default`` from ``skimage.transform._warps.py``, and no + longer call it from within the ``resize`` or ``pyramid_*`` transforms. +* Remove checks for the ``multichannel`` deprecation warnings in several tests + in ``skimage.transform.tests.test_pyramids.py`` and + ``skimage.transform.tests.test_warps.py``. +* Remove `flatten` for `imread` in ``skimage.io._io.py``. +* Remove `as_grey` for `load` in ``skimage.data.__init__.py``. +* Remove deprecated argument ``visualise`` from function skimage.feature.hog +* Remove deprecated module ``skimage.novice`` +* In ``skimage.measure._regionprops``, remove all references to + ``coordinates=``, ``_xycoordinates``, and ``_use_xy_warning``. +* In ``skimage.measure.moments_central``, remove ``cc`` and ``**kwargs`` + arguments. +* In ``skimage.morphology.remove_small_holes``, remove ``min_size`` argument. + +Other +----- +* Remove legacy pretty printing workaround for ``pytest`` in ``conftest.py`` + once minimal required ``numpy`` is set to >= 1.14.0. +* Remove deprecated ``Hxx, Hxy, Hyy`` API of ``hessian_matrix_eigvals`` in + ``skimage.feature.corner``. diff -Nru skimage-0.13.1/tools/appveyor/install.ps1 skimage-0.14.0/tools/appveyor/install.ps1 --- skimage-0.13.1/tools/appveyor/install.ps1 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/tools/appveyor/install.ps1 1970-01-01 00:00:00.000000000 +0000 @@ -1,203 +0,0 @@ -# Sample script to install Python and pip under Windows -# Authors: Olivier Grisel, Jonathan Helmus and Kyle Kastner -# License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/ - -$MINICONDA_URL = "http://repo.continuum.io/miniconda/" -$BASE_URL = "https://www.python.org/ftp/python/" -$GET_PIP_URL = "https://bootstrap.pypa.io/get-pip.py" -$GET_PIP_PATH = "C:\get-pip.py" - - -function DownloadPython ($python_version, $platform_suffix) { - $webclient = New-Object System.Net.WebClient - $filename = "python-" + $python_version + $platform_suffix + ".msi" - $url = $BASE_URL + $python_version + "/" + $filename - - $basedir = $pwd.Path + "\" - $filepath = $basedir + $filename - if (Test-Path $filename) { - Write-Host "Reusing" $filepath - return $filepath - } - - # Download and retry up to 3 times in case of network transient errors. - Write-Host "Downloading" $filename "from" $url - $retry_attempts = 2 - for($i=0; $i -lt $retry_attempts; $i++){ - try { - $webclient.DownloadFile($url, $filepath) - break - } - Catch [Exception]{ - Start-Sleep 1 - } - } - if (Test-Path $filepath) { - Write-Host "File saved at" $filepath - } else { - # Retry once to get the error message if any at the last try - $webclient.DownloadFile($url, $filepath) - } - return $filepath -} - - -function InstallPython ($python_version, $architecture, $python_home) { - Write-Host "Installing Python" $python_version "for" $architecture "bit architecture to" $python_home - if (Test-Path $python_home) { - Write-Host $python_home "already exists, skipping." - return $false - } - if ($architecture -eq "32") { - $platform_suffix = "" - } else { - $platform_suffix = ".amd64" - } - $msipath = DownloadPython $python_version $platform_suffix - Write-Host "Installing" $msipath "to" $python_home - $install_log = $python_home + ".log" - $install_args = "/qn /log $install_log /i $msipath TARGETDIR=$python_home" - $uninstall_args = "/qn /x $msipath" - RunCommand "msiexec.exe" $install_args - if (-not(Test-Path $python_home)) { - Write-Host "Python seems to be installed else-where, reinstalling." - RunCommand "msiexec.exe" $uninstall_args - RunCommand "msiexec.exe" $install_args - } - if (Test-Path $python_home) { - Write-Host "Python $python_version ($architecture) installation complete" - } else { - Write-Host "Failed to install Python in $python_home" - Get-Content -Path $install_log - Exit 1 - } -} - - -function RunCommand ($command, $command_args) { - Write-Host $command $command_args - Start-Process -FilePath $command -ArgumentList $command_args -Wait -Passthru -} - - -function InstallPip ($python_home) { - $pip_path = $python_home + "\Scripts\pip.exe" - $python_path = $python_home + "\python.exe" - if (-not(Test-Path $pip_path)) { - Write-Host "Installing pip..." - $webclient = New-Object System.Net.WebClient - $webclient.DownloadFile($GET_PIP_URL, $GET_PIP_PATH) - Write-Host "Executing:" $python_path $GET_PIP_PATH - Start-Process -FilePath "$python_path" -ArgumentList "$GET_PIP_PATH" -Wait -Passthru - } else { - Write-Host "pip already installed." - } -} - - -function DownloadMiniconda ($python_version, $platform_suffix) { - $webclient = New-Object System.Net.WebClient - if ($python_version -eq "3.4") { - $filename = "Miniconda3-3.5.5-Windows-" + $platform_suffix + ".exe" - } else { - $filename = "Miniconda-3.5.5-Windows-" + $platform_suffix + ".exe" - } - $url = $MINICONDA_URL + $filename - - $basedir = $pwd.Path + "\" - $filepath = $basedir + $filename - if (Test-Path $filename) { - Write-Host "Reusing" $filepath - return $filepath - } - - # Download and retry up to 3 times in case of network transient errors. - Write-Host "Downloading" $filename "from" $url - $retry_attempts = 2 - for($i=0; $i -lt $retry_attempts; $i++){ - try { - $webclient.DownloadFile($url, $filepath) - break - } - Catch [Exception]{ - Start-Sleep 1 - } - } - if (Test-Path $filepath) { - Write-Host "File saved at" $filepath - } else { - # Retry once to get the error message if any at the last try - $webclient.DownloadFile($url, $filepath) - } - return $filepath -} - - -function InstallMiniconda ($python_version, $architecture, $python_home) { - Write-Host "Installing Python" $python_version "for" $architecture "bit architecture to" $python_home - if (Test-Path $python_home) { - Write-Host $python_home "already exists, skipping." - return $false - } - if ($architecture -eq "32") { - $platform_suffix = "x86" - } else { - $platform_suffix = "x86_64" - } - $filepath = DownloadMiniconda $python_version $platform_suffix - Write-Host "Installing" $filepath "to" $python_home - $install_log = $python_home + ".log" - $args = "/S /D=$python_home" - Write-Host $filepath $args - Start-Process -FilePath $filepath -ArgumentList $args -Wait -Passthru - if (Test-Path $python_home) { - Write-Host "Python $python_version ($architecture) installation complete" - } else { - Write-Host "Failed to install Python in $python_home" - Get-Content -Path $install_log - Exit 1 - } -} - - -function InstallMinicondaPip ($python_home) { - $pip_path = $python_home + "\Scripts\pip.exe" - $conda_path = $python_home + "\Scripts\conda.exe" - if (-not(Test-Path $pip_path)) { - Write-Host "Installing pip..." - $args = "install --yes pip" - Write-Host $conda_path $args - Start-Process -FilePath "$conda_path" -ArgumentList $args -Wait -Passthru - } else { - Write-Host "pip already installed." - } -} - - -function UpdateConda ($python_home) { - $conda_path = $python_home + "\Scripts\conda.exe" - Write-Host "Updating conda..." - $args = "update --yes conda" - Write-Host $conda_path $args - Start-Process -FilePath "$conda_path" -ArgumentList $args -Wait -Passthru -} - - -function InstallCondaPackages ($python_home, $spec) { - $conda_path = $python_home + "\Scripts\conda.exe" - $args = "install --yes " + $spec - Write-Host ("conda " + $args) - Start-Process -FilePath "$conda_path" -ArgumentList $args -Wait -Passthru -} - - -function main () { - #InstallPython $env:PYTHON_VERSION $env:PYTHON_ARCH $env:PYTHON - #InstallPip $env:PYTHON - InstallMiniconda $env:PYTHON_VERSION $env:PYTHON_ARCH $env:PYTHON - InstallMinicondaPip $env:PYTHON - UpdateConda $env:PYTHON - InstallCondaPackages $env:PYTHON "numpy scipy cython wheel nose msvc_runtime numpydoc" -} - -main diff -Nru skimage-0.13.1/tools/appveyor/run_with_env.cmd skimage-0.14.0/tools/appveyor/run_with_env.cmd --- skimage-0.13.1/tools/appveyor/run_with_env.cmd 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/tools/appveyor/run_with_env.cmd 1970-01-01 00:00:00.000000000 +0000 @@ -1,47 +0,0 @@ -:: To build extensions for 64 bit Python 3, we need to configure environment -:: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of: -:: MS Windows SDK for Windows 7 and .NET Framework 4 (SDK v7.1) -:: -:: To build extensions for 64 bit Python 2, we need to configure environment -:: variables to use the MSVC 2008 C++ compilers from GRMSDKX_EN_DVD.iso of: -:: MS Windows SDK for Windows 7 and .NET Framework 3.5 (SDK v7.0) -:: -:: 32 bit builds do not require specific environment configurations. -:: -:: Note: this script needs to be run with the /E:ON and /V:ON flags for the -:: cmd interpreter, at least for (SDK v7.0) -:: -:: More details at: -:: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows -:: http://stackoverflow.com/a/13751649/163740 -:: -:: Author: Olivier Grisel -:: License: BSD 3 clause -@ECHO OFF - -SET COMMAND_TO_RUN=%* -SET WIN_SDK_ROOT=C:\Program Files\Microsoft SDKs\Windows - -SET MAJOR_PYTHON_VERSION="%PYTHON_VERSION:~0,1%" -IF %MAJOR_PYTHON_VERSION% == "2" ( - SET WINDOWS_SDK_VERSION="v7.0" -) ELSE IF %MAJOR_PYTHON_VERSION% == "3" ( - SET WINDOWS_SDK_VERSION="v7.1" -) ELSE ( - ECHO Unsupported Python version: "%MAJOR_PYTHON_VERSION%" - EXIT 1 -) - -IF "%PYTHON_ARCH%"=="64" ( - ECHO Configuring Windows SDK %WINDOWS_SDK_VERSION% for Python %MAJOR_PYTHON_VERSION% on a 64 bit architecture - SET DISTUTILS_USE_SDK=1 - SET MSSdk=1 - "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Setup\WindowsSdkVer.exe" -q -version:%WINDOWS_SDK_VERSION% - "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Bin\SetEnv.cmd" /x64 /release - ECHO Executing: %COMMAND_TO_RUN% - call %COMMAND_TO_RUN% || EXIT 1 -) ELSE ( - ECHO Using default MSVC build environment for 32 bit architecture - ECHO Executing: %COMMAND_TO_RUN% - call %COMMAND_TO_RUN% || EXIT 1 -) diff -Nru skimage-0.13.1/tools/check_bento_build.py skimage-0.14.0/tools/check_bento_build.py --- skimage-0.13.1/tools/check_bento_build.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/tools/check_bento_build.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,100 +0,0 @@ -#!/usr/bin/env python -""" -Check that Cython extensions in setup.py files match those in bento.info. -""" -import os -import re -import sys - - -RE_CYTHON = re.compile("config.add_extension\(\s*['\"]([\S]+)['\"]") - -BENTO_TEMPLATE = """ - Extension: {module_path} - Sources: - {dir_path}.pyx""" - - -def each_setup_in_pkg(top_dir): - """Yield path and file object for each setup.py file""" - for dir_path, dir_names, filenames in os.walk(top_dir): - for fname in filenames: - if fname == 'setup.py': - with open(os.path.join(dir_path, 'setup.py')) as f: - yield dir_path, f - - -def each_cy_in_setup(top_dir): - """Yield path for each cython extension package's setup file.""" - for dir_path, f in each_setup_in_pkg(top_dir): - text = f.read() - match = RE_CYTHON.findall(text) - if match: - for cy_file in match: - # if cython files in different directory than setup.py - if '.' in cy_file: - parts = cy_file.split('.') - cy_file = parts[-1] - # Don't overwrite dir_path for subsequent iterations. - path = os.path.join(dir_path, *parts[:-1]) - else: - path = dir_path - full_path = os.path.join(path, cy_file) - yield full_path - - -def each_cy_in_bento(bento_file='bento.info'): - """Yield path for each cython extension in bento info file.""" - with open(bento_file) as f: - for line in f: - line = line.strip() - if line.startswith('Extension:'): - path = line.lstrip('Extension:').strip() - yield path - - -def remove_common_extensions(cy_bento, cy_setup): - # normalize so that cy_setup and cy_bento have the same separator - cy_setup = set(ext.replace('/', '.') for ext in cy_setup) - cy_setup_diff = cy_setup.difference(cy_bento) - cy_setup_diff = set(ext.replace('.', '/') for ext in cy_setup_diff) - cy_bento_diff = cy_bento.difference(cy_setup) - return cy_bento_diff, cy_setup_diff - - -def print_results(cy_bento, cy_setup): - def info(text): - print('') - print(text) - print('-' * len(text)) - - if not (cy_bento or cy_setup): - print("bento.info and setup.py files match.") - - if cy_bento: - info("Extensions found in 'bento.info' but not in any 'setup.py:") - print('\n'.join(cy_bento)) - - - if cy_setup: - info("Extensions found in a 'setup.py' but not in any 'bento.info:") - print('\n'.join(cy_setup)) - info("Consider adding the following to the 'bento.info' Library:") - for dir_path in cy_setup: - module_path = dir_path.replace('/', '.') - print(BENTO_TEMPLATE.format(module_path=module_path, - dir_path=dir_path)) - - -if __name__ == '__main__': - # All cython extensions defined in 'setup.py' files. - cy_setup = set(each_cy_in_setup('skimage')) - - # All cython extensions defined 'bento.info' file. - cy_bento = set(each_cy_in_bento()) - - cy_bento, cy_setup = remove_common_extensions(cy_bento, cy_setup) - print_results(cy_bento, cy_setup) - - if cy_setup or cy_bento: - sys.exit(1) diff -Nru skimage-0.13.1/tools/check_sdist.py skimage-0.14.0/tools/check_sdist.py --- skimage-0.13.1/tools/check_sdist.py 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/tools/check_sdist.py 2018-05-29 01:27:44.000000000 +0000 @@ -23,8 +23,8 @@ './downloads', './scikit_image.egg-info'] ignore_files = ['./TODO.md', './README.md', './MANIFEST', './.gitignore', './.travis.yml', './.gitmodules', - './.mailmap', './.coveragerc', './appveyor.yml', - './tools/check_bento_build.py', + './.mailmap', './.coveragerc', './.appveyor.yml', + './.pep8speaks.yml', './skimage/filters/rank/README.rst'] diff -Nru skimage-0.13.1/tools/deploy_docs.sh skimage-0.14.0/tools/deploy_docs.sh --- skimage-0.13.1/tools/deploy_docs.sh 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/tools/deploy_docs.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -#!/bin/bash -if [[ $TRAVIS_PULL_REQUEST == false && $TRAVIS_BRANCH == "master" && - $TRAVIS_PYTHON_VERSION == 3.4 ]] -then - # See https://help.github.com/articles/creating-an-access-token-for-command-line-use/ for how to generate a token - # See http://docs.travis-ci.com/user/encryption-keys/ for how to generate - # a secure variable on Travis - echo "-- pushing docs --" - - ( - git config --global user.email "travis@travis-ci.com" - git config --global user.name "Travis Bot" - - git clone --quiet --branch=gh-pages https://${GH_REF} doc_build - cd doc_build - - git rm -r dev - cp -r ../doc/build/html dev - git add dev - - git commit -m "Deployed to GitHub Pages" - git push --force --quiet "https://${GH_TOKEN}@${GH_REF}" gh-pages > /dev/null 2>&1 - ) -else - echo "-- will only push docs from master --" -fi diff -Nru skimage-0.13.1/tools/travis/before_install.sh skimage-0.14.0/tools/travis/before_install.sh --- skimage-0.13.1/tools/travis/before_install.sh 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/tools/travis/before_install.sh 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,88 @@ +#!/usr/bin/env bash +set -ex + +export PIP_DEFAULT_TIMEOUT=60 + +# This URL is for any extra wheels that are not available on pypi. As of 14 +# Jan 2017, the major packages such as numpy and matplotlib are up for all +# platforms. The URL points to a Rackspace CDN belonging to the scikit-learn +# team. Please contact Olivier Grisel or Matthew Brett if you need +# permissions for this folder. +EXTRA_WHEELS="https://5cf40426d9f06eb7461d-6fe47d9331aba7cd62fc36c7196769e4.ssl.cf2.rackcdn.com" +WHEELHOUSE="--find-links=$EXTRA_WHEELS" + +if [[ "$TRAVIS_OS_NAME" != "osx" ]]; then + sh -e /etc/init.d/xvfb start + # This one is for wheels we can only build on the travis precise container. + # As of 14 Jan 2017, this is only pyside. Also on Rackspace, see above. + # To build new wheels for this container, consider using: + # https://github.com/matthew-brett/travis-wheel-builder . The wheels from + # that building repo upload to the container "travis-wheels" available at + # https://8167b5c3a2af93a0a9fb-13c6eee0d707a05fa610c311eec04c66.ssl.cf2.rackcdn.com + # You then need to transfer them to the container pointed to by the URL + # below (called "precise-wheels" on the Rackspace interface). + PRECISE_WHEELS="https://7d8d0debcc2964ae0517-cec8b1780d3c0de237cc726d565607b4.ssl.cf2.rackcdn.com" + WHEELHOUSE="--find-links=$PRECISE_WHEELS $WHEELHOUSE" +fi +export WHEELHOUSE + +export DISPLAY=:99.0 +export PYTHONWARNINGS="d,all:::skimage" +export TEST_ARGS="-v --doctest-modules" +WHEELBINARIES="matplotlib scipy pillow cython" + +retry () { + # https://gist.github.com/fungusakafungus/1026804 + local retry_max=3 + local count=$retry_max + while [ $count -gt 0 ]; do + "$@" && break + count=$(($count - 1)) + sleep 1 + done + + [ $count -eq 0 ] && { + echo "Retry failed [$retry_max]: $@" >&2 + return 1 + } + return 0 +} + +# add build dependencies +echo "cython>=0.23.4" >> requirements/default.txt +echo "numpydoc>=0.6" >> requirements/default.txt + +if [[ $MINIMUM_REQUIREMENTS == 1 ]]; then + sed -i 's/>=/==/g' requirements/default.txt +fi + +python -m pip install --upgrade pip +pip install --retries 3 -q wheel flake8 codecov pytest pytest-cov +# install numpy from PyPI instead of our wheelhouse +pip install --retries 3 -q wheel numpy + +# install wheels +for requirement in $WHEELBINARIES; do + WHEELS="$WHEELS $(grep $requirement requirements/default.txt)" +done +pip install --retries 3 -q $PIP_FLAGS $WHEELHOUSE $WHEELS + +pip install --retries 3 -q $PIP_FLAGS -r requirements.txt + +# Show what's installed +pip list + +section () { + echo -en "travis_fold:start:$1\r" + tools/header.py $1 +} + +section_end () { + echo -en "travis_fold:end:$1\r" +} + +export -f section +export -f section_end +export -f retry + +set +ex diff -Nru skimage-0.13.1/tools/travis/deploy_docs.sh skimage-0.14.0/tools/travis/deploy_docs.sh --- skimage-0.13.1/tools/travis/deploy_docs.sh 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/tools/travis/deploy_docs.sh 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,29 @@ +#!/bin/bash +if [[ $TRAVIS_PULL_REQUEST == false && $TRAVIS_BRANCH == "master" && + $BUILD_DOCS == 1 && $DEPLOY_DOCS == 1 ]] +then + # See https://help.github.com/articles/creating-an-access-token-for-command-line-use/ for how to generate a token + # See http://docs.travis-ci.com/user/encryption-keys/ for how to generate + # a secure variable on Travis + echo "-- pushing docs --" + + ( + git config --global user.email "travis@travis-ci.com" + git config --global user.name "Travis Bot" + + # build docs a second time to fix links to Javascript + (cd doc && make html) + + git clone --quiet --branch=gh-pages https://${GH_REF} doc_build + cd doc_build + + git rm -r dev + cp -r ../doc/build/html dev + git add dev + + git commit -m "Deployed to GitHub Pages" + git push --force --quiet "https://${GH_TOKEN}@${GH_REF}" gh-pages > /dev/null 2>&1 + ) +else + echo "-- will only push docs from master --" +fi diff -Nru skimage-0.13.1/tools/travis/install_qt.sh skimage-0.14.0/tools/travis/install_qt.sh --- skimage-0.13.1/tools/travis/install_qt.sh 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/tools/travis/install_qt.sh 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +set -ex + +if [[ "${TRAVIS_OS_NAME}" == "osx" ]]; then + echo "backend : Template" > $MPL_DIR/matplotlibrc +fi +# Now configure Matplotlib to use Qt4 +if [[ "${QT}" == "PyQt4" ]]; then + # only do this for python 2.7 + # http://stackoverflow.com/a/9716100 + LIBS=( PyQt4 sip.so ) + + VAR=( $(which -a python$PY) ) + + GET_PYTHON_LIB_CMD="from distutils.sysconfig import get_python_lib; print (get_python_lib())" + LIB_VIRTUALENV_PATH=$(python -c "$GET_PYTHON_LIB_CMD") + LIB_SYSTEM_PATH=$(${VAR[-1]} -c "$GET_PYTHON_LIB_CMD") + + for LIB in ${LIBS[@]} + do + ln -sf $LIB_SYSTEM_PATH/$LIB $LIB_VIRTUALENV_PATH/$LIB + done + + MPL_QT_API=PyQt4 + export QT_API=pyqt +elif [[ "${QT}" == "PySide" ]]; then + python ~/venv/bin/pyside_postinstall.py -install + MPL_QT_API=PySide + export QT_API=pyside +elif [[ "${QT}" == "PyQt5" ]]; then + pip install --retries 3 -q $PIP_FLAGS pyqt5 + MPL_QT_API=PyQt5 + export QT_API=pyqt5 +elif [[ "${QT}" == "PySide2" ]]; then + pip install--retries 3 -q $PIP_FLAGS pyside2 + MPL_QT_API=PySide2 + export QT_API=pyside2 +else + echo 'backend: Template' > $MPL_DIR/matplotlibrc +fi +if [[ "${QT}" == "PyQt4" || "${QT}" == "PySide" ]]; then + echo 'backend: Qt4Agg' > $MPL_DIR/matplotlibrc + echo 'backend.qt4 : '$MPL_QT_API >> $MPL_DIR/matplotlibrc +elif [[ "${QT}" == "PyQt5" || "${QT}" == "PySide2" ]]; then + # Is this correct for PySide2? + echo 'backend: Qt5Agg' > $MPL_DIR/matplotlibrc + echo 'backend.qt5 : '$MPL_QT_API >> $MPL_DIR/matplotlibrc +fi + +set -ex diff -Nru skimage-0.13.1/tools/travis/notes.txt skimage-0.14.0/tools/travis/notes.txt --- skimage-0.13.1/tools/travis/notes.txt 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/tools/travis/notes.txt 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,23 @@ +- Use http://yaml-online-parser.appspot.com/ to make sure the yaml file is valid. + http://lint.travis-ci.org/ is recommended elsewhere but does not give helpful + error reports. +- Make sure all of your "-" lines start on the same column +- Use bash scripts for `before_install` and `script` or any part that + has conditional statements + - Make sure they are "executable" (chmod +x) + - Use the following header: + + ``` + #!/usr/bin/env bash + set -ex + ``` + +- Use the `retry` bash function from `before_install.sh` before a command to + have it try 3 times before failing. +- Use `pip install --retries N` for retrying package downloads. +- Use the `section` function to start a folded section of the script. + Section names must have underscores or dots instead of spaces and must be + accompanied by a corresponding `section_end` call. +- Feel free to cancel a build rather than waiting for it to go to completion + if you have made a change to that branch. +- A VM with 64bit Ubuntu 14.04 is a huge help for debugging. diff -Nru skimage-0.13.1/tools/travis/osx_install.sh skimage-0.14.0/tools/travis/osx_install.sh --- skimage-0.13.1/tools/travis/osx_install.sh 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/tools/travis/osx_install.sh 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,14 @@ +#!/bin/bash +brew update +brew install ccache +brew tap caskroom/cask +brew cask install basictex + +export PATH="$PATH:/Library/TeX/texbin" +sudo tlmgr update --self +sudo tlmgr install ucs dvipng anyfontsize + +# Set up virtualenv on OSX +git clone https://github.com/matthew-brett/multibuild ~/multibuild +source ~/multibuild/osx_utils.sh +get_macpython_environment $TRAVIS_PYTHON_VERSION ~/venv diff -Nru skimage-0.13.1/tools/travis/script.sh skimage-0.14.0/tools/travis/script.sh --- skimage-0.13.1/tools/travis/script.sh 1970-01-01 00:00:00.000000000 +0000 +++ skimage-0.14.0/tools/travis/script.sh 2018-05-29 01:27:44.000000000 +0000 @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +export PY=${TRAVIS_PYTHON_VERSION} + +section "Tests.flake8" +flake8 --exit-zero --exclude=test_*,six.py skimage doc/examples viewer_examples +section_end "Tests.flake8" + + +section "Tests.pytest" +# run tests. If running with optional dependencies, report coverage +if [[ "$OPTIONAL_DEPS" == "1" ]]; then + export TEST_ARGS="${TEST_ARGS} --cov=skimage" +fi +# Show what's installed +pip list +pytest ${TEST_ARGS} skimage +section_end "Tests.pytest" + + +section "Tests.examples" +# Run example applications +echo Build or run examples +if [[ "${BUILD_DOCS}" == "1" ]]; then + # requirements/docs.txt fails on Travis OSX + pip install --retries 3 -q -r ./requirements/docs.txt + export SPHINXCACHE=${HOME}/.cache/sphinx; make html +elif [[ "${TEST_EXAMPLES}" != "0" ]]; then + # OSX Can't install sphinx-gallery. + # I think all it needs is scikit-learn from that requirements doc + # to run the tests. See Issue #3084 + if [[ "${TRAVIS_OS_NAME}" == "osx" ]]; then + pip install --retries 3 -q scikit-learn + else + pip install --retries 3 -q -r ./requirements/docs.txt + fi + cp $MPL_DIR/matplotlibrc $MPL_DIR/matplotlibrc_backup + echo 'backend : Template' > $MPL_DIR/matplotlibrc + for f in doc/examples/*/*.py; do + python "${f}" + if [ $? -ne 0 ]; then + exit 1 + fi + done + mv $MPL_DIR/matplotlibrc_backup $MPL_DIR/matplotlibrc +fi +section_end "Tests.examples" diff -Nru skimage-0.13.1/tools/travis_before_install.sh skimage-0.14.0/tools/travis_before_install.sh --- skimage-0.13.1/tools/travis_before_install.sh 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/tools/travis_before_install.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -#!/usr/bin/env bash -set -ex - - -export COVERALLS_REPO_TOKEN=7LdFN9232ZbSY3oaXHbQIzLazrSf6w2pQ -export PIP_DEFAULT_TIMEOUT=60 - -# This URL is for any extra wheels that are not available on pypi. As of 14 -# Jan 2017, the major packages such as numpy and matplotlib are up for all -# platforms. The URL points to a Rackspace CDN belonging to the scikit-learn -# team. Please contact Olivier Grisel or Matthew Brett if you need -# permissions for this folder. -EXTRA_WHEELS="https://5cf40426d9f06eb7461d-6fe47d9331aba7cd62fc36c7196769e4.ssl.cf2.rackcdn.com" -WHEELHOUSE="--find-links=$EXTRA_WHEELS" - -if [[ "$TRAVIS_OS_NAME" != "osx" ]]; then - sh -e /etc/init.d/xvfb start - # This one is for wheels we can only build on the travis precise container. - # As of 14 Jan 2017, this is only pyside. Also on Rackspace, see above. - # To build new wheels for this container, consider using: - # https://github.com/matthew-brett/travis-wheel-builder . The wheels from - # that building repo upload to the container "travis-wheels" available at - # https://8167b5c3a2af93a0a9fb-13c6eee0d707a05fa610c311eec04c66.ssl.cf2.rackcdn.com - # You then need to transfer them to the container pointed to by the URL - # below (called "precise-wheels" on the Rackspace interface). - PRECISE_WHEELS="https://7d8d0debcc2964ae0517-cec8b1780d3c0de237cc726d565607b4.ssl.cf2.rackcdn.com" - WHEELHOUSE="--find-links=$PRECISE_WHEELS $WHEELHOUSE" -fi -export WHEELHOUSE - -export DISPLAY=:99.0 -export PYTHONWARNINGS="d,all:::skimage" -export TEST_ARGS="--exe --ignore-files=^_test -v --with-doctest \ - --ignore-files=^setup.py$" -WHEELBINARIES="matplotlib scipy pillow cython" - -retry () { - # https://gist.github.com/fungusakafungus/1026804 - local retry_max=3 - local count=$retry_max - while [ $count -gt 0 ]; do - "$@" && break - count=$(($count - 1)) - sleep 1 - done - - [ $count -eq 0 ] && { - echo "Retry failed [$retry_max]: $@" >&2 - return 1 - } - return 0 -} - -# add build dependencies -echo "cython>=0.23.4" >> requirements.txt -echo "numpydoc>=0.6" >> requirements.txt - -if [[ $MINIMUM_REQUIREMENTS == 1 ]]; then - sed -i 's/>=/==/g' requirements.txt -fi - -# create new empty venv -virtualenv -p python ~/venv -source ~/venv/bin/activate - -python -m pip install --upgrade pip -pip install --retries 3 -q wheel flake8 codecov nose -# install numpy from PyPI instead of our wheelhouse -pip install --retries 3 -q wheel numpy - -# install wheels -for requirement in $WHEELBINARIES; do - WHEELS="$WHEELS $(grep $requirement requirements.txt)" -done -pip install --retries 3 -q $PIP_FLAGS $WHEELHOUSE $WHEELS - -pip install --retries 3 -q $PIP_FLAGS -r requirements.txt - -# Show what's installed -pip list - -section () { - echo -en "travis_fold:start:$1\r" - tools/header.py $1 -} - -section_end () { - echo -en "travis_fold:end:$1\r" -} - -export -f section -export -f section_end -export -f retry - -set +ex diff -Nru skimage-0.13.1/tools/travis_notes.txt skimage-0.14.0/tools/travis_notes.txt --- skimage-0.13.1/tools/travis_notes.txt 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/tools/travis_notes.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ - -- Use http://yaml-online-parser.appspot.com/ to make sure -the yaml file is valid. - http://lint.travis-ci.org/ is recommended elsewhere but does not give helpful - error reports. -- Make sure all of your "-" lines start on the same column -- Use bash scripts for `before_install` and `script` or any part that - has conditional statements - - Make sure they are "executable" (chmod +x) - - Use the following header: - - ``` - #!/usr/bin/env bash - set -ex - ``` - -- Use the `retry` bash function from `before_install.sh` before a command to -have it try 3 times before failing. -- Use `pip install --retries N` for retrying package downloads. -- Use the `section` function to start a folded section of the script. -Section names must have underscores or dots instead of spaces and must be -accompanied by a corresponding `section_end` call. -- Feel free to cancel a build rather than waiting for it to go to completion - if you have made a change to that branch. -- A VM with 64bit Ubuntu 12.04 is a huge help for debugging. diff -Nru skimage-0.13.1/tools/travis_osx_install.sh skimage-0.14.0/tools/travis_osx_install.sh --- skimage-0.13.1/tools/travis_osx_install.sh 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/tools/travis_osx_install.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ -#!/bin/bash -brew update -brew install ccache -brew tap caskroom/cask -brew cask install basictex - -export PATH="$PATH:/Library/TeX/texbin" -sudo tlmgr update --self -sudo tlmgr install ucs dvipng anyfontsize - -git clone https://github.com/MacPython/terryfy.git ~/terryfy -source ~/terryfy/travis_tools.sh -get_python_environment macpython $TRAVIS_PYTHON_VERSION ~/macpython_venv -source ~/macpython_venv/bin/activate -pip install virtualenv - diff -Nru skimage-0.13.1/tools/travis_script.sh skimage-0.14.0/tools/travis_script.sh --- skimage-0.13.1/tools/travis_script.sh 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/tools/travis_script.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,123 +0,0 @@ -#!/usr/bin/env bash -set -ex - -PY=$TRAVIS_PYTHON_VERSION - -# Matplotlib settings - do not show figures during doc examples -if [[ $MINIMUM_REQUIREMENTS == 1 || $TRAVIS_OS_NAME == "osx" ]]; then - MPL_DIR=$HOME/.matplotlib -else - MPL_DIR=$HOME/.config/matplotlib -fi - -mkdir -p $MPL_DIR -touch $MPL_DIR/matplotlibrc - -if [[ $TRAVIS_OS_NAME == "osx" ]]; then - echo 'backend : Template' > $MPL_DIR/matplotlibrc -fi - -section "Test.with.min.requirements" -nosetests $TEST_ARGS skimage -section_end "Test.with.min.requirements" - -section "Build.docs" -if [[ $NO_SPHINX != 1 ]]; then - export SPHINXCACHE=$HOME/.cache/sphinx; make html -fi -section_end "Build.docs" - -section "Flake8.test" -flake8 --exit-zero --exclude=test_*,six.py skimage doc/examples viewer_examples -section_end "Flake8.test" - - -section "Install.optional.dependencies" - -# Install most of the optional packages -if [[ $OPTIONAL_DEPS == 1 ]]; then - pip install --retries 3 -q -r ./optional_requirements.txt $WHEELHOUSE -fi - -# Install Qt and then update the Matplotlib settings -if [[ $WITH_QT == 1 ]]; then - # http://stackoverflow.com/a/9716100 - LIBS=( PyQt4 sip.so ) - - VAR=( $(which -a python$PY) ) - - GET_PYTHON_LIB_CMD="from distutils.sysconfig import get_python_lib; print (get_python_lib())" - LIB_VIRTUALENV_PATH=$(python -c "$GET_PYTHON_LIB_CMD") - LIB_SYSTEM_PATH=$(${VAR[-1]} -c "$GET_PYTHON_LIB_CMD") - - for LIB in ${LIBS[@]} - do - ln -sf $LIB_SYSTEM_PATH/$LIB $LIB_VIRTUALENV_PATH/$LIB - done - -elif [ "$WITH_PYSIDE" == 1 ] && [ -e ~/venv/bin/pyside_postinstall.py ]; then - python ~/venv/bin/pyside_postinstall.py -install -fi - -if [[ $WITH_PYAMG == 1 ]]; then - pip install --retries 3 -q pyamg -fi - -# Show what's installed -pip list - -section_end "Install.optional.dependencies" - - -section "Run.doc.examples" -echo 'backend : Template' > $MPL_DIR/matplotlibrc - - -for f in doc/examples/*/*.py; do - python "$f" - if [ $? -ne 0 ]; then - exit 1 - fi -done - -section_end "Run.doc.examples" - - -section "Run.doc.applications" - -for f in doc/examples/xx_applications/*.py; do - python "$f" - if [ $? -ne 0 ]; then - exit 1 - fi -done - -# Now configure Matplotlib to use Qt4 -if [[ $WITH_QT == 1 ]]; then - MPL_QT_API=PyQt4 - export QT_API=pyqt -elif [[ $WITH_PYSIDE == 1 ]]; then - MPL_QT_API=PySide - export QT_API=pyside -fi -if [[ $WITH_QT == 1 || $WITH_PYSIDE == 1 ]]; then - echo 'backend: Qt4Agg' > $MPL_DIR/matplotlibrc - echo 'backend.qt4 : '$MPL_QT_API >> $MPL_DIR/matplotlibrc -fi - -section_end "Run.doc.applications" - - -section "Test.with.optional.dependencies" - -# run tests again with optional dependencies to get more coverage -if [[ $OPTIONAL_DEPS == 1 ]]; then - TEST_ARGS="$TEST_ARGS --with-cov --cover-package skimage" -fi -nosetests $TEST_ARGS - -section_end "Test.with.optional.dependencies" - -section "Prepare.release" -doc/release/contribs.py HEAD~10 -section_end "Prepare.release" diff -Nru skimage-0.13.1/.travis.yml skimage-0.14.0/.travis.yml --- skimage-0.13.1/.travis.yml 2017-09-26 23:38:27.000000000 +0000 +++ skimage-0.14.0/.travis.yml 2018-05-29 01:27:44.000000000 +0000 @@ -3,7 +3,7 @@ # After changing this file, check it on: # http://yaml-online-parser.appspot.com/ -# See tools/travis_notes.txt for some guidelines +# See tools/travis/notes.txt for some guidelines language: python sudo: false @@ -17,11 +17,11 @@ apt: packages: - ccache - - libfreeimage3 - texlive - texlive-latex-extra - dvipng - python-qt4 + env: global: - GH_REF: github.com/scikit-image/docs.git @@ -37,47 +37,72 @@ matrix: include: + # 2.7 build (until 0.14 is release) + - os: linux + python: 2.7 + env: WITH_PYAMG=1 QT=PyQt4 - os: linux python: 2.7 - env: PIP_FLAGS="--pre" + env: WITH_PYAMG=1 QT=PyQt4 PIP_FLAGS="--pre" - os: linux python: 2.7 - env: WITH_PYAMG=1 MINIMUM_REQUIREMENTS=1 WITH_QT=1 + env: MINIMUM_REQUIREMENTS=1 - os: linux python: 3.4 - env: OPTIONAL_DEPS=1 WITH_PYSIDE=1 - os: linux python: 3.5 - os: linux - python: 3.5 - env: PIP_FLAGS="--pre" + python: 3.6 + env: QT=PyQt5 WITH_PYAMG=1 OPTIONAL_DEPS=1 BUILD_DOCS=1 DEPLOY_DOCS=1 - os: linux python: 3.6 + env: QT=PyQt5 WITH_PYAMG=1 OPTIONAL_DEPS=1 PIP_FLAGS="--pre" - os: osx - osx_image: xcode7.3 + osx_image: xcode9 language: objective-c + # OS X has a hard time installing the docs dependencies env: TRAVIS_PYTHON_VERSION=3.5 before_install: - - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then source tools/travis_osx_install.sh ; fi + - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then + source tools/travis/osx_install.sh; + else + virtualenv -p python ~/venv; + source ~/venv/bin/activate; + fi - ccache -s - export PATH=/usr/lib/ccache:${PATH} - # Attempt to fix bug making virtualenvs for Python 3.6 - - python -m pip install -U pip - - pip install -U virtualenv - - source tools/travis_before_install.sh + - source tools/travis/before_install.sh - which python; python --version - - tools/check_bento_build.py - tools/build_versions.py - tools/check_sdist.py install: - - section build - python setup.py develop - - section_end build + # Matplotlib settings - do not show figures during doc examples + - | + if [[ "${TRAVIS_OS_NAME}" == "osx" ]]; then + export MPL_DIR=${HOME}/.matplotlib + else + export MPL_DIR=${HOME}/.config/matplotlib + fi + - mkdir -p ${MPL_DIR} + - touch ${MPL_DIR}/matplotlibrc + # Install most of the optional packages + - | + if [[ "${OPTIONAL_DEPS}" == "1" ]]; then + pip install --retries 3 -q -r ./requirements/optional.txt $WHEELHOUSE + fi + - | + if [[ "${WITH_PYAMG}" == "1" ]]; then + pip install --retries 3 -q pyamg + fi + - source tools/travis/install_qt.sh -script: tools/travis_script.sh +script: tools/travis/script.sh after_success: - codecov - - bash tools/deploy_docs.sh + # Prepare.release + - doc/release/contribs.py HEAD~10 + - bash tools/travis/deploy_docs.sh