diff -Nru python-numpy-1.13.3/debian/changelog python-numpy-1.14.5/debian/changelog --- python-numpy-1.13.3/debian/changelog 2017-12-05 14:32:02.000000000 +0000 +++ python-numpy-1.14.5/debian/changelog 2018-06-13 13:01:53.000000000 +0000 @@ -1,3 +1,86 @@ +python-numpy (1:1.14.5-1ubuntu1) cosmic; urgency=low + + * Merge from Debian unstable. Remaining changes: + - debian/patches/20_disable-plot-extension.patch + Disable plot_directive extension, and catch ImportErrors when + matplotlib cannot be imported, which allows us to remove + python-matplotlib from dependencies. This is required because + python-numpy is in main, while python-matplotlib is in universe. + + -- Gianfranco Costamagna Wed, 13 Jun 2018 15:01:53 +0200 + +python-numpy (1:1.14.5-1) unstable; urgency=medium + + * New upstream release + + -- Sandro Tosi Tue, 12 Jun 2018 19:33:00 -0400 + +python-numpy (1:1.14.4-1) unstable; urgency=medium + + [ OndÅ™ej Nový ] + * d/control: Remove ancient X-Python-Version field + * d/control: Remove ancient X-Python3-Version field + + [ Sandro Tosi ] + * New upstream release; Closes: #898776 + + -- Sandro Tosi Thu, 07 Jun 2018 21:02:36 -0400 + +python-numpy (1:1.14.3-2ubuntu2) cosmic; urgency=medium + + * Fix alignment issue causing FTBFS on armhf + + -- Graham Inggs Tue, 29 May 2018 00:17:49 +0000 + +python-numpy (1:1.14.3-2ubuntu1) cosmic; urgency=low + + * Merge from Debian unstable. Remaining changes: + - debian/patches/20_disable-plot-extension.patch + Disable plot_directive extension, and catch ImportErrors when + matplotlib cannot be imported, which allows us to remove + python-matplotlib from dependencies. This is required because + python-numpy is in main, while python-matplotlib is in universe. + + -- Gianfranco Costamagna Wed, 09 May 2018 14:13:15 +0200 + +python-numpy (1:1.14.3-2) unstable; urgency=medium + + * debian/control + - move docutils, matplotlib, sphinx from b-d-i to b-d, needed to build doc + + -- Sandro Tosi Sat, 05 May 2018 18:57:59 -0400 + +python-numpy (1:1.14.3-1) unstable; urgency=medium + + [ OndÅ™ej Nový ] + * d/control: Set Vcs-* to salsa.debian.org + * d/tests: Use AUTOPKGTEST_TMP instead of ADTTMP + * d/rules: Remove trailing whitespaces + + [ Stuart Prescott ] + * add debhelper sequence to support --with numpy,numpy3; Closes: #897602 + + [ Sandro Tosi ] + * New upstream release; Closes: #872459, #896633 + * debian/watch + - switch back to use pypi.debian.net redirector + * debian/patches + - refresh for new upstream release + * debian/copyright + - extend packaging copyright years + * debian/versions + - bump API version to 12 + * debian/rules + - comment f2py.1 installation, no longer available upstream (hopefully only + temporarily) + * bump compat to 11, and fix debian/rules to use proper dh sequencer + * debian/control + - bump Standards-Version to 4.1.4 (no changes needed) + * debian/patches/0007-ENH-Add-support-for-the-64-bit-RISC-V-architecture.patch + - add support for riscv64 architecture; Closes: #894574 + + -- Sandro Tosi Sat, 05 May 2018 17:06:18 -0400 + python-numpy (1:1.13.3-2ubuntu1) bionic; urgency=low * Merge from Debian unstable. Remaining changes: @@ -1924,3 +2007,4 @@ * Initial release Closes. -- Marco Presi (Zufus) Tue, 14 Feb 2006 00:40:53 +0100 + diff -Nru python-numpy-1.13.3/debian/compat python-numpy-1.14.5/debian/compat --- python-numpy-1.13.3/debian/compat 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/compat 2018-06-13 04:20:22.000000000 +0000 @@ -1 +1 @@ -7 +11 diff -Nru python-numpy-1.13.3/debian/control python-numpy-1.14.5/debian/control --- python-numpy-1.13.3/debian/control 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/control 2018-06-13 13:01:53.000000000 +0000 @@ -5,30 +5,29 @@ XSBC-Original-Maintainer: Sandro Tosi Uploaders: Debian Python Modules Team Build-Depends: cython (>= 0.26-2.1), - debhelper (>= 8.9.7~), + debhelper (>= 11), dh-python, gfortran (>= 4:4.2), libblas-dev [!arm !m68k], liblapack-dev [!arm !m68k], + python2.7-dev (>= 2.7.14~rc1-1), + python3.6-dev (>= 3.6.2-3), python-all-dbg, - python-all-dev, - python-nose (>= 1.0), - python-tz, python3-all-dbg, + python-all-dev, python3-all-dev, + python-docutils, + python3-matplotlib, + python-nose (>= 1.0), python3-nose (>= 1.0), - python3-tz, python-setuptools, python3-setuptools, - python2.7-dev (>= 2.7.14~rc1-1), - python3.6-dev (>= 3.6.2-3), -Build-Depends-Indep: python-docutils, - python3-sphinx, -X-Python-Version: >= 2.7 -X-Python3-Version: >= 3.4 -Standards-Version: 4.1.2 -Vcs-Git: https://anonscm.debian.org/git/python-modules/packages/python-numpy.git -Vcs-Browser: https://anonscm.debian.org/cgit/python-modules/packages/python-numpy.git + python3-sphinx, + python-tz, + python3-tz, +Standards-Version: 4.1.4 +Vcs-Git: https://salsa.debian.org/python-team/modules/python-numpy.git +Vcs-Browser: https://salsa.debian.org/python-team/modules/python-numpy Homepage: http://www.numpy.org/ Package: python-numpy diff -Nru python-numpy-1.13.3/debian/copyright python-numpy-1.14.5/debian/copyright --- python-numpy-1.13.3/debian/copyright 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/copyright 2018-06-13 04:20:22.000000000 +0000 @@ -37,7 +37,7 @@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -The Debian packaging is Copyright (C) 2010-2017, Sandro Tosi +The Debian packaging is Copyright (C) 2010-2018, Sandro Tosi and is licensed under the same terms as upstream code. doc/scipy-sphinx-theme/_theme/scipy/static/js/copybutton.js diff -Nru python-numpy-1.13.3/debian/.git-dpm python-numpy-1.14.5/debian/.git-dpm --- python-numpy-1.13.3/debian/.git-dpm 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/.git-dpm 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -# see git-dpm(1) from git-dpm package -5266f217bc304c886f0f046584aa905bd2c9e0e8 -5266f217bc304c886f0f046584aa905bd2c9e0e8 -91cce07f4d5710256bb8773227edc1acc82b3e45 -91cce07f4d5710256bb8773227edc1acc82b3e45 -python-numpy_1.13.3.orig.tar.gz -87215725cb1f870903831b85afd3761fb7674564 -4520295 -debianTag="debian/%e%v" -patchedTag="patched/%e%v" -upstreamTag="upstream/%e%u" diff -Nru python-numpy-1.13.3/debian/numpy3.pm python-numpy-1.14.5/debian/numpy3.pm --- python-numpy-1.13.3/debian/numpy3.pm 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/debian/numpy3.pm 2018-06-12 23:33:00.000000000 +0000 @@ -0,0 +1,8 @@ +#!/usr/bin/perl +use warnings; +use strict; +use Debian::Debhelper::Dh_Lib; + +insert_before("dh_gencontrol", "dh_numpy3"); + +1 diff -Nru python-numpy-1.13.3/debian/numpy.pm python-numpy-1.14.5/debian/numpy.pm --- python-numpy-1.13.3/debian/numpy.pm 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/debian/numpy.pm 2018-06-12 23:33:00.000000000 +0000 @@ -0,0 +1,8 @@ +#!/usr/bin/perl +use warnings; +use strict; +use Debian::Debhelper::Dh_Lib; + +insert_before("dh_gencontrol", "dh_numpy"); + +1 diff -Nru python-numpy-1.13.3/debian/patches/0005-Dont-fail-if-we-cant-import-mingw32.patch python-numpy-1.14.5/debian/patches/0005-Dont-fail-if-we-cant-import-mingw32.patch --- python-numpy-1.13.3/debian/patches/0005-Dont-fail-if-we-cant-import-mingw32.patch 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/patches/0005-Dont-fail-if-we-cant-import-mingw32.patch 2018-06-13 04:20:22.000000000 +0000 @@ -1,4 +1,3 @@ -From 79503e5c6938ed7e489a4a6c7f2ba262fef3a79e Mon Sep 17 00:00:00 2001 From: Sandro Tosi Date: Sun, 10 Jan 2016 22:16:03 +0000 Subject: Dont fail if we cant import mingw32 @@ -12,7 +11,7 @@ 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py -index 1b39840..6ea4959 100644 +index bd093c5..7b74828 100644 --- a/numpy/core/setup_common.py +++ b/numpy/core/setup_common.py @@ -6,7 +6,10 @@ import warnings diff -Nru python-numpy-1.13.3/debian/patches/0006-disable-asserts-on-ppc-with-broken-malloc-only-longd.patch python-numpy-1.14.5/debian/patches/0006-disable-asserts-on-ppc-with-broken-malloc-only-longd.patch --- python-numpy-1.13.3/debian/patches/0006-disable-asserts-on-ppc-with-broken-malloc-only-longd.patch 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/patches/0006-disable-asserts-on-ppc-with-broken-malloc-only-longd.patch 2018-06-13 04:20:22.000000000 +0000 @@ -1,4 +1,3 @@ -From 5266f217bc304c886f0f046584aa905bd2c9e0e8 Mon Sep 17 00:00:00 2001 From: Sandro Tosi Date: Mon, 11 Jan 2016 23:51:23 +0000 Subject: disable asserts on ppc with broken malloc only longdouble affected diff -Nru python-numpy-1.13.3/debian/patches/0007-ENH-Add-support-for-the-64-bit-RISC-V-architecture.patch python-numpy-1.14.5/debian/patches/0007-ENH-Add-support-for-the-64-bit-RISC-V-architecture.patch --- python-numpy-1.13.3/debian/patches/0007-ENH-Add-support-for-the-64-bit-RISC-V-architecture.patch 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/debian/patches/0007-ENH-Add-support-for-the-64-bit-RISC-V-architecture.patch 2018-06-12 23:33:00.000000000 +0000 @@ -0,0 +1,53 @@ +From: David Abdurachmanov +Date: Sun, 1 Apr 2018 14:50:53 +0200 +Subject: ENH: Add support for the 64-bit RISC-V architecture + +RISC-V (pronounced "RISC Five") is an open source instruction set +architecture (ISA). The 64-bit version may run the Linux kernel and the +usual stack of applications on top of it, including Python. + +This patch adds support for riscv64 to Numpy. With it the full testsuite +passes for both Python 2.7 and 3.6, with the only exception of +test_float (test_numeric.TestBoolCmp). See #8213 for details. + +Closes: #8213 +--- + numpy/core/include/numpy/npy_cpu.h | 3 +++ + numpy/core/include/numpy/npy_endian.h | 3 ++- + 2 files changed, 5 insertions(+), 1 deletion(-) + +diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h +index 84653ea..106ffa4 100644 +--- a/numpy/core/include/numpy/npy_cpu.h ++++ b/numpy/core/include/numpy/npy_cpu.h +@@ -17,6 +17,7 @@ + * NPY_CPU_SH_BE + * NPY_CPU_ARCEL + * NPY_CPU_ARCEB ++ * NPY_CPU_RISCV64 + */ + #ifndef _NPY_CPUARCH_H_ + #define _NPY_CPUARCH_H_ +@@ -82,6 +83,8 @@ + #define NPY_CPU_ARCEL + #elif defined(__arc__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_ARCEB ++#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64 ++ #define NPY_CPU_RISCV64 + #else + #error Unknown CPU, please report this to numpy maintainers with \ + information about your platform (OS, CPU and compiler) +diff --git a/numpy/core/include/numpy/npy_endian.h b/numpy/core/include/numpy/npy_endian.h +index 1a42121..320b791 100644 +--- a/numpy/core/include/numpy/npy_endian.h ++++ b/numpy/core/include/numpy/npy_endian.h +@@ -46,7 +46,8 @@ + || defined(NPY_CPU_SH_LE) \ + || defined(NPY_CPU_MIPSEL) \ + || defined(NPY_CPU_PPC64LE) \ +- || defined(NPY_CPU_ARCEL) ++ || defined(NPY_CPU_ARCEL) \ ++ || defined(NPY_CPU_RISCV64) + #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN + #elif defined(NPY_CPU_PPC) \ + || defined(NPY_CPU_SPARC) \ diff -Nru python-numpy-1.13.3/debian/patches/03_force_f2py_version.patch python-numpy-1.14.5/debian/patches/03_force_f2py_version.patch --- python-numpy-1.13.3/debian/patches/03_force_f2py_version.patch 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/patches/03_force_f2py_version.patch 2018-06-13 04:20:22.000000000 +0000 @@ -1,4 +1,3 @@ -From 994109547758aa44fabf905b72c89a77b9c6a9fa Mon Sep 17 00:00:00 2001 From: SVN-Git Migration Date: Sun, 11 Oct 2015 10:12:15 -0700 Subject: force generation f2py postfixed with interpreter version diff -Nru python-numpy-1.13.3/debian/patches/10_use_local_python.org_object.inv_sphinx.diff python-numpy-1.14.5/debian/patches/10_use_local_python.org_object.inv_sphinx.diff --- python-numpy-1.13.3/debian/patches/10_use_local_python.org_object.inv_sphinx.diff 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/patches/10_use_local_python.org_object.inv_sphinx.diff 2018-06-13 04:20:22.000000000 +0000 @@ -1,8 +1,7 @@ -From bd80f7b5ae35c749c7008d4d243ce2623443f532 Mon Sep 17 00:00:00 2001 From: SVN-Git Migration Date: Sun, 11 Oct 2015 10:12:16 -0700 -Subject: Use a local copy of object.inv from doc.python.org, instead of - downloading it each time from the internet +Subject: Use a local copy of object.inv from doc.python.org, + instead of downloading it each time from the internet Patch-Name: 10_use_local_python.org_object.inv_sphinx.diff --- @@ -10,10 +9,10 @@ 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py -index 91d413b..7fa8c63 100644 +index 7c34a62..688c40c 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py -@@ -203,7 +203,7 @@ texinfo_documents = [ +@@ -210,7 +210,7 @@ texinfo_documents = [ # Intersphinx configuration # ----------------------------------------------------------------------------- intersphinx_mapping = { diff -Nru python-numpy-1.13.3/debian/patches/20_disable-plot-extension.patch python-numpy-1.14.5/debian/patches/20_disable-plot-extension.patch --- python-numpy-1.13.3/debian/patches/20_disable-plot-extension.patch 2017-11-01 08:33:47.000000000 +0000 +++ python-numpy-1.14.5/debian/patches/20_disable-plot-extension.patch 2018-06-13 13:01:53.000000000 +0000 @@ -3,19 +3,21 @@ as a dependency. Author: Barry Warsaw Bug-Ubuntu: https://launchpad.net/bugs/664276 ---- a/doc/source/conf.py -+++ b/doc/source/conf.py -@@ -22,8 +22,7 @@ sys.path.insert(0, os.path.abspath('../s - extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc', +Index: python-numpy-1.14.5-1ubuntu1/doc/source/conf.py +=================================================================== +--- python-numpy-1.14.5-1ubuntu1.orig/doc/source/conf.py ++++ python-numpy-1.14.5-1ubuntu1/doc/source/conf.py +@@ -22,8 +22,7 @@ + extensions = ['sphinx.ext.autodoc', 'numpydoc', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.doctest', 'sphinx.ext.autosummary', - 'sphinx.ext.graphviz', - 'matplotlib.sphinxext.plot_directive'] + 'sphinx.ext.graphviz'] - # Add any paths that contain templates here, relative to this directory. - templates_path = ['_templates'] -@@ -215,7 +214,7 @@ + if sphinx.__version__ >= "1.4": + extensions.append('sphinx.ext.imgmath') +@@ -224,7 +223,7 @@ phantom_import_file = 'dump.xml' # Make numpydoc to generate plots for example sections diff -Nru python-numpy-1.13.3/debian/patches/adapt_swig_docs_to_debian.patch python-numpy-1.14.5/debian/patches/adapt_swig_docs_to_debian.patch --- python-numpy-1.13.3/debian/patches/adapt_swig_docs_to_debian.patch 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/patches/adapt_swig_docs_to_debian.patch 2018-06-13 04:20:22.000000000 +0000 @@ -1,4 +1,3 @@ -From 42a86cb0131a92d1315a4debac592e2ea5914db2 Mon Sep 17 00:00:00 2001 From: Sandro Tosi Date: Sun, 11 Oct 2015 10:12:18 -0700 Subject: Adapt SWIG documentation to Debian diff -Nru python-numpy-1.13.3/debian/patches/python3-soabi.patch python-numpy-1.14.5/debian/patches/python3-soabi.patch --- python-numpy-1.13.3/debian/patches/python3-soabi.patch 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/patches/python3-soabi.patch 2018-06-13 04:20:22.000000000 +0000 @@ -1,4 +1,3 @@ -From 1fb6a5d6af6160eadd9abaed2d1787f28be4e7f8 Mon Sep 17 00:00:00 2001 From: Julian Taylor Date: Sun, 11 Oct 2015 10:12:17 -0700 Subject: adapt to python3 multiarch soabi @@ -16,7 +15,7 @@ 2 files changed, 9 insertions(+) diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py -index 7332822..582c157 100644 +index b8457c7..2af343f 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -134,6 +134,14 @@ else: @@ -35,10 +34,10 @@ libname_ext = [libname] diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py -index 2c58f11..05ba15e 100644 +index e8043d0..3fa30b0 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py -@@ -34,6 +34,7 @@ class TestLoadLibrary(TestCase): +@@ -34,6 +34,7 @@ class TestLoadLibrary(object): " (import error was: %s)" % str(e)) print(msg) diff -Nru python-numpy-1.13.3/debian/patches/series python-numpy-1.14.5/debian/patches/series --- python-numpy-1.13.3/debian/patches/series 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/patches/series 2018-06-13 13:01:53.000000000 +0000 @@ -4,4 +4,5 @@ adapt_swig_docs_to_debian.patch 0005-Dont-fail-if-we-cant-import-mingw32.patch 0006-disable-asserts-on-ppc-with-broken-malloc-only-longd.patch +0007-ENH-Add-support-for-the-64-bit-RISC-V-architecture.patch 20_disable-plot-extension.patch diff -Nru python-numpy-1.13.3/debian/python3-numpy.install python-numpy-1.14.5/debian/python3-numpy.install --- python-numpy-1.13.3/debian/python3-numpy.install 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/python3-numpy.install 2018-06-13 04:20:22.000000000 +0000 @@ -1,4 +1,5 @@ debian/dh_numpy3 usr/bin +debian/numpy3.pm usr/share/perl5/Debian/Debhelper/Sequence/ debian/versions usr/share/numpy3/ usr/bin/f2py3 usr/bin/f2py3.? diff -Nru python-numpy-1.13.3/debian/python-numpy.install python-numpy-1.14.5/debian/python-numpy.install --- python-numpy-1.13.3/debian/python-numpy.install 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/python-numpy.install 2018-06-13 04:20:22.000000000 +0000 @@ -1,4 +1,5 @@ debian/dh_numpy usr/bin +debian/numpy.pm usr/share/perl5/Debian/Debhelper/Sequence/ debian/versions usr/share/numpy/ usr/bin/f2py usr/bin/f2py2.? diff -Nru python-numpy-1.13.3/debian/python-numpy.manpages python-numpy-1.14.5/debian/python-numpy.manpages --- python-numpy-1.13.3/debian/python-numpy.manpages 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/python-numpy.manpages 2018-06-13 04:20:22.000000000 +0000 @@ -1,2 +1,2 @@ -doc/f2py/f2py.1 +#doc/f2py/f2py.1 debian/dh_numpy.1 diff -Nru python-numpy-1.13.3/debian/rules python-numpy-1.14.5/debian/rules --- python-numpy-1.13.3/debian/rules 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/rules 2018-06-13 04:20:22.000000000 +0000 @@ -25,6 +25,9 @@ python$$v-dbg setup.py build; \ done + # build doc only for default python version + (export MPLCONFIGDIR=. ; make -C doc html PYTHON=python3 PYTHONPATH=../$(PY3LIBPATH)) + override_dh_auto_clean: rm -rf build/ rm -rf `find -name build -type d` @@ -34,22 +37,22 @@ # cython generated rm -f numpy/random/mtrand/mtrand.c -override_dh_installman: - dh_installman -ppython-numpy doc/f2py/f2py.1 - dh_installman -ppython3-numpy doc/f2py/f2py.1 - -mv debian/python3-numpy/usr/share/man/man1/f2py.1 \ - debian/python3-numpy/usr/share/man/man1/f2py3.1 - # link manpage for versioned and dbg incarnations of f2py - set -e; for v in $(PY2VERS); do \ - dh_link -ppython-numpy /usr/share/man/man1/f2py.1.gz /usr/share/man/man1/f2py$$v.1.gz; \ - dh_link -ppython-numpy-dbg /usr/share/man/man1/f2py.1.gz /usr/share/man/man1/f2py$$v-dbg.1.gz; \ - done - set -e; for v in $(PY3VERS); do \ - dh_link -ppython3-numpy /usr/share/man/man1/f2py3.1.gz /usr/share/man/man1/f2py$$v.1.gz; \ - dh_link -ppython3-numpy-dbg /usr/share/man/man1/f2py3.1.gz /usr/share/man/man1/f2py$$v-dbg.1.gz; \ - done - dh_link -ppython-numpy-dbg /usr/share/man/man1/f2py.1.gz /usr/share/man/man1/f2py-dbg.1.gz; - dh_link -ppython3-numpy-dbg /usr/share/man/man1/f2py3.1.gz /usr/share/man/man1/f2py3-dbg.1.gz; +#override_dh_installman: +# dh_installman -ppython-numpy doc/f2py/f2py.1 +# dh_installman -ppython3-numpy doc/f2py/f2py.1 +# -mv debian/python3-numpy/usr/share/man/man1/f2py.1 \ +# debian/python3-numpy/usr/share/man/man1/f2py3.1 +# # link manpage for versioned and dbg incarnations of f2py +# set -e; for v in $(PY2VERS); do \ +# dh_link -ppython-numpy /usr/share/man/man1/f2py.1.gz /usr/share/man/man1/f2py$$v.1.gz; \ +# dh_link -ppython-numpy-dbg /usr/share/man/man1/f2py.1.gz /usr/share/man/man1/f2py$$v-dbg.1.gz; \ +# done +# set -e; for v in $(PY3VERS); do \ +# dh_link -ppython3-numpy /usr/share/man/man1/f2py3.1.gz /usr/share/man/man1/f2py$$v.1.gz; \ +# dh_link -ppython3-numpy-dbg /usr/share/man/man1/f2py3.1.gz /usr/share/man/man1/f2py$$v-dbg.1.gz; \ +# done +# dh_link -ppython-numpy-dbg /usr/share/man/man1/f2py.1.gz /usr/share/man/man1/f2py-dbg.1.gz; +# dh_link -ppython3-numpy-dbg /usr/share/man/man1/f2py3.1.gz /usr/share/man/man1/f2py3-dbg.1.gz; override_dh_install: # add shebang information to f2py script @@ -133,16 +136,6 @@ dh_installdocs -i dh_sphinxdoc -i -build: build-arch build-indep ; - -build-arch: - dh build --with=python2,python3 - - -build-indep: - # build doc only for default python version - (export MPLCONFIGDIR=. ; make -C doc html PYTHON=python3 PYTHONPATH=../$(PY3LIBPATH)) - update_intersphinx_mapping: wget http://docs.python.org/dev/objects.inv -O debian/python.org_objects.inv diff -Nru python-numpy-1.13.3/debian/tests/capi python-numpy-1.14.5/debian/tests/capi --- python-numpy-1.13.3/debian/tests/capi 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/tests/capi 2018-06-13 04:20:22.000000000 +0000 @@ -3,7 +3,7 @@ PYS=$(pyversions -r 2>/dev/null)" "$(py3versions -r 2>/dev/null) -cd "$ADTTMP" +cd "$AUTOPKGTEST_TMP" cat << EOF > setup.py def configuration(parent_package='', top_path=None): diff -Nru python-numpy-1.13.3/debian/tests/distutils python-numpy-1.14.5/debian/tests/distutils --- python-numpy-1.13.3/debian/tests/distutils 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/tests/distutils 2018-06-13 04:20:22.000000000 +0000 @@ -3,7 +3,7 @@ PYS=$(pyversions -r 2>/dev/null) -cd "$ADTTMP" +cd "$AUTOPKGTEST_TMP" for py in $PYS; do # check distutils copes with multi arch diff -Nru python-numpy-1.13.3/debian/tests/f2py python-numpy-1.14.5/debian/tests/f2py --- python-numpy-1.13.3/debian/tests/f2py 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/tests/f2py 2018-06-13 04:20:22.000000000 +0000 @@ -3,7 +3,7 @@ PYS=$(pyversions -rv 2>/dev/null)" "$(py3versions -rv 2>/dev/null) -cd "$ADTTMP" +cd "$AUTOPKGTEST_TMP" cat << EOF > hello.f C File hello.f diff -Nru python-numpy-1.13.3/debian/tests/python2 python-numpy-1.14.5/debian/tests/python2 --- python-numpy-1.13.3/debian/tests/python2 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/tests/python2 2018-06-13 04:20:22.000000000 +0000 @@ -4,7 +4,7 @@ PYS=${PYS:-"$(pyversions -r 2>/dev/null)"} TESTPKG=${TESTPKG:-numpy} -cd "$ADTTMP" +cd "$AUTOPKGTEST_TMP" for py in $PYS; do echo "=== $py ===" diff -Nru python-numpy-1.13.3/debian/tests/python3 python-numpy-1.14.5/debian/tests/python3 --- python-numpy-1.13.3/debian/tests/python3 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/tests/python3 2018-06-13 04:20:22.000000000 +0000 @@ -4,7 +4,7 @@ PYS=${PYS:-"$(py3versions -r 2>/dev/null)"} TESTPKG=${TESTPKG:-numpy} -cd "$ADTTMP" +cd "$AUTOPKGTEST_TMP" for py in $PYS; do echo "=== $py ===" diff -Nru python-numpy-1.13.3/debian/versions python-numpy-1.14.5/debian/versions --- python-numpy-1.13.3/debian/versions 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/versions 2018-06-13 04:20:22.000000000 +0000 @@ -7,8 +7,8 @@ # This number must be changed every time C_API_VERSION changes. # It's should be normally equal to C_API_VERSION. # Description of the changes at core/code_generators/cversions.txt -api 11 +api 12 # Minimum version of Numpy that shares this minor API version. # This version must be updated every time C_API_VERSION changes. -api-min-version 1:1.13.1 +api-min-version 1:1.14.3 diff -Nru python-numpy-1.13.3/debian/watch python-numpy-1.14.5/debian/watch --- python-numpy-1.13.3/debian/watch 2017-12-05 04:44:50.000000000 +0000 +++ python-numpy-1.14.5/debian/watch 2018-06-13 04:20:22.000000000 +0000 @@ -1,7 +1,6 @@ -#version=3 -#opts=uversionmangle=s/(rc|a|b|c)/~$1/,pgpsigurlmangle=s/$/.asc/ \ -#https://pypi.debian.net/numpy/numpy-(.+)\.(?:zip|tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz))) version=4 -opts="pgpmode=none" \ - https://pypi.python.org/pypi/numpy/ \ - https://pypi.python.org/packages/.*/.*/.*/numpy-([\d\.]+).zip#.* +opts=uversionmangle=s/(rc|a|b|c)/~$1/ \ +https://pypi.debian.net/numpy/numpy-(.+)\.(?:zip|tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz))) +#opts="pgpmode=none" \ +# https://pypi.python.org/pypi/numpy/ \ +# https://pypi.python.org/packages/.*/.*/.*/numpy-([\d\.]+).zip#.* diff -Nru python-numpy-1.13.3/doc/f2py/apps.tex python-numpy-1.14.5/doc/f2py/apps.tex --- python-numpy-1.13.3/doc/f2py/apps.tex 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/apps.tex 1970-01-01 00:00:00.000000000 +0000 @@ -1,71 +0,0 @@ - -\section{Applications} -\label{sec:apps} - - -\subsection{Example: wrapping C library \texttt{fftw}} -\label{sec:wrapfftw} - -Here follows a simple example how to use \fpy to generate a wrapper -for C functions. Let us create a FFT code using the functions in FFTW -library. I'll assume that the library \texttt{fftw} is configured with -\texttt{-{}-enable-shared} option. - -Here is the wrapper for the typical usage of FFTW: -\begin{verbatim} -/* File: wrap_dfftw.c */ -#include - -extern void dfftw_one(fftw_complex *in,fftw_complex *out,int *n) { - fftw_plan p; - p = fftw_create_plan(*n,FFTW_FORWARD,FFTW_ESTIMATE); - fftw_one(p,in,out); - fftw_destroy_plan(p); -} -\end{verbatim} -and here follows the corresponding signature file (created manually): -\begin{verbatim} -!%f90 -! File: fftw.f90 -module fftw - interface - subroutine dfftw_one(in,out,n) - integer n - complex*16 in(n),out(n) - intent(out) out - intent(hide) n - end subroutine dfftw_one - end interface -end module fftw -\end{verbatim} - -Now let us generate the Python C/API module with \fpy: -\begin{verbatim} -f2py fftw.f90 -\end{verbatim} -and compile it -\begin{verbatim} -gcc -shared -I/numeric/include -I`f2py -I` -L/numeric/lib -ldfftw \ - -o fftwmodule.so -DNO_APPEND_FORTRAN fftwmodule.c wrap_dfftw.c -\end{verbatim} - -In Python: -\begin{verbatim} ->>> from Numeric import * ->>> from fftw import * ->>> print dfftw_one.__doc__ -Function signature: - out = dfftw_one(in) -Required arguments: - in : input rank-1 array('D') with bounds (n) -Return objects: - out : rank-1 array('D') with bounds (n) ->>> print dfftw_one([1,2,3,4]) -[ 10.+0.j -2.+2.j -2.+0.j -2.-2.j] ->>> -\end{verbatim} - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "f2py2e" -%%% End: diff -Nru python-numpy-1.13.3/doc/f2py/bugs.tex python-numpy-1.14.5/doc/f2py/bugs.tex --- python-numpy-1.13.3/doc/f2py/bugs.tex 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/bugs.tex 1970-01-01 00:00:00.000000000 +0000 @@ -1,109 +0,0 @@ - -\section{Bugs, Plans, and Feedback} -\label{sec:bugs} - -Currently no bugs have found that I was not able to fix. I will be -happy to receive bug reports from you (so that I could fix them and -keep the first sentence of this paragraph as true as possible ;-). -Note that \fpy is developed to work properly with gcc/g77 -compilers. -\begin{description} -\item[NOTE:] Wrapping callback functions returning \texttt{COMPLEX} - may fail on some systems. Workaround: avoid it by using callback - subroutines. -\end{description} - -Here follows a list of things that I plan to implement in (near) future: -\begin{enumerate} -\item recognize file types by their extension (signatures: - \texttt{*.pyf}, Fortran 77, Fortran 90 fixed: \texttt{*.f, *.for, *.F, *.FOR}, - Fortran 90 free: \texttt{*.F90, *.f90, *.m, *.f95, *.F95}); [DONE] -\item installation using \texttt{distutils} (when it will be stable); -\item put out to the web examples of \fpy usages in real situations: - wrapping \texttt{vode}, for example; -\item implement support for \texttt{PARAMETER} statement; [DONE] -\item rewrite test-site; -\item ... -\end{enumerate} -and here are things that I plan to do in future: -\begin{enumerate} -\item implement \texttt{intent(cache)} attribute for an optional work - arrays with a feature of allocating additional memory if needed; -\item use \fpy for wrapping Fortran 90/95 codes. \fpy should scan - Fortran 90/95 codes with no problems, what needs to be done is find - out how to call a Fortran 90/95 function (from a module) from - C. Anybody there willing to test \fpy with Fortran 90/95 modules? [DONE] -\item implement support for Fortran 90/95 module data; [DONE] -\item implement support for \texttt{BLOCK DATA} blocks (if needed); -\item test/document \fpy for \texttt{CHARACTER} arrays; -\item decide whether internal transposition of multi-dimensional - arrays is reasonable (need efficient code then), even if this is - controlled by the user trough some additional keyword; need - consistent and safe policy here; -\item use \fpy for generating wrapper functions also for C programs (a - kind of SWIG, only between Python and C). For that \fpy needs a - command line switch to inform itself that C scalars are passed in by - their value, not by their reference, for instance; -\item introduce a counter that counts the number of inefficient usages - of wrapper functions (copying caused by type-casting, non-contiguous - arrays); -\item if needed, make \texttt{DATA} statement to work properly for - arrays; -\item rewrite \texttt{COMMON} wrapper; [DONE] -\item ... -\end{enumerate} -I'll appreciate any feedback that will improve \fpy (bug reports, -suggestions, etc). If you find a correct Fortran code that fails with -\fpy, try to send me a minimal version of it so that I could track -down the cause of the failure. Note also that there is no sense to -send me files that are auto-generated with \fpy (I can generate them -myself); the version of \fpy that you are using (run \texttt{\fpy\ - -v}), and the relevant fortran codes or modified signature files -should be enough information to fix the bugs. Also add some -information on compilers and linkers that you use to the bug report. - - -\section{History of \fpy} -\label{sec:history} - -\begin{enumerate} -\item I was driven to start developing a tool such as \fpy after I had - wrote several Python C/API modules for interfacing various Fortran - routines from the Netlib. This work was tedious (some of functions - had more than 20 arguments, only few of them made sense for the - problems that they solved). I realized that most of the writing - could be done automatically. -\item On 9th of July, 1999, the first lines of the tool was written. A - prototype of the tool was ready to use in only three weeks. During - this time Travis Oliphant joined to the project and shared his - valuable knowledge and experience; the call-back mechanism is his - major contribution. Then I gave the tool to public under the name - FPIG --- \emph{Fortran to Python Interface Generator}. The tool contained - only one file \texttt{f2py.py}. -\item By autumn, it was clear that a better implementation was needed - as the debugging process became very tedious. So, I reserved some - time and rewrote the tool from scratch. The most important result of - this rewriting was the code that reads real Fortran codes and - determines the signatures of the Fortran routines. The main - attention was paid in particular to this part so that the tool - could read arbitrary Fortran~77/90/95 codes. As a result, the other - side of the tools task, that is, generating Python C/API functions, - was not so great. In public, this version of the tool was called - \texttt{f2py2e} --- \emph{Fortran to Python C/API generator, the - Second Edition}. -\item So, a month before The New Year 2000, I started the third - iteration of the \fpy development. Now the main attention was to - have a good C/API module constructing code. By 21st of January, - 2000, the tool of generating wrapper functions for Fortran routines - was ready. It had many new features and was more robust than ever. -\item In 25th of January, 2000, the first public release of \fpy was - announced (version 1.116). -\item In 12th of September, 2000, the second public release of \fpy was - announced (version 2.264). It now has among other changes a support - for Fortran 90/95 module routines. -\end{enumerate} - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "f2py2e" -%%% End: diff -Nru python-numpy-1.13.3/doc/f2py/BUGS.txt python-numpy-1.14.5/doc/f2py/BUGS.txt --- python-numpy-1.13.3/doc/f2py/BUGS.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/BUGS.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -December 1, 2002: - -C FILE: STRING.F - SUBROUTINE FOO - END -C END OF FILE STRING.F -does not build with - f2py -c -m string string.f -Cause: string is mapped to string_bn -************************************************************************** -August 16, 2001: -1) re in Python 2.x is **three** times slower than the re in Python 1.5. -************************************************************************** -HP-UX B.10.20 A 9000/780: -Fortran function returning character*(*) (id=7) ... failed(core dump) -Fortran function returning logical*8 (id=21) ... expected .true. but got 0 -Callback function returning real (id=45) ... expected 34.56 but got 14087495680.0 -Callback function returning real*4 (id=46) ... expected 34.56 but got 14087495680.0 -Callback function returning logical*8 (id=55) ... expected .true. but got 0 - C compiler: gcc ('gcc 2.x.x' 2.95.2) (from .f2py_get_compiler_CC) - Fortran compiler: g77 ('g77 2.x.x' 2.95.2) (from .f2py_get_compiler_FC) - Linker: ld ('HP-UX ld' 92453-07 linker linker ld B.10.24 961204) (from .f2py_get_compiler_LD) -************************************************************************** -Linux 2.2.13-0.9 #1 Thu Dec 9 17:03:57 EST 1999 alpha unknown: -Fortran function returning character*(*) (id=7) ... expected 'abcdefgh' but got 'abcdefgh \201' (o?k) -Callback function returning complex (id=48) ... failed(core dump) - Trying with -DF2PY_CB_RETURNCOMPLEX ... failed(core dump) -Callback function returning complex*8 (id=49) ... failed(core dump) - Trying with -DF2PY_CB_RETURNCOMPLEX ... failed(core dump) -Callback function returning complex*16 (id=50) ... failed(core dump) - Trying with -DF2PY_CB_RETURNCOMPLEX ... failed(core dump) - C compiler: cc ('Compaq C' V6.2-002) (from .f2py_get_compiler_CC) - Fortran compiler: fort ('Compaq Fortran' V1.0-920) (from .f2py_get_compiler_FC) - Linker: fort ('Compaq Fortran' V1.0-920) (from .f2py_get_compiler_LD) -************************************************************************** -Linux 2.2.14-15mdk #1 Tue Jan 4 22:24:20 CET 2000 i686 unknown: -Callback function returning logical*8 (id=55) ... failed - C compiler: cc ('gcc 2.x.x' 2.95.2) - Fortran compiler: f90 ('Absoft F90' 3.0) - Linker: ld ('GNU ld' 2.9.5) -************************************************************************** -IRIX64 6.5 04151556 IP30: -Testing integer, intent(inout) ...failed # not f2py problem -Testing integer, intent(inout,out) ...failed -Testing integer*1, intent(inout) ...failed -Testing integer*1, intent(inout,out) ...failed -Testing integer*8, intent(inout) ...failed -Testing integer*8, intent(inout,out) ...failed -cc-1140 cc: WARNING File = genmodule.c, Line = 114 - A value of type "void *" cannot be used to initialize an entity of type - "void (*)()". - {"foo",-1,{-1},0,(char *)F_FUNC(foo,FOO),(void *)gen_foo,doc_gen_foo}, - C compiler: cc ('MIPSpro 7 Compilers' 7.30) - Fortran compiler: f77 ('MIPSpro 7 Compilers' 7.30) - Linker: ld ('Linker for MIPSpro 7 Compilers' 7.30.) diff -Nru python-numpy-1.13.3/doc/f2py/collectinput.py python-numpy-1.14.5/doc/f2py/collectinput.py --- python-numpy-1.13.3/doc/f2py/collectinput.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/collectinput.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,83 +0,0 @@ -#!/usr/bin/env python -r""" -collectinput - Collects all files that are included to a main Latex document - with \input or \include commands. These commands must be - in separate lines. - -Copyright 1999 Pearu Peterson all rights reserved, -Pearu Peterson -Permission to use, modify, and distribute this software is given under the -terms of the NumPy License - -NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -Pearu Peterson - -Usage: - collectinput - collectinput # =inputless_ - collectinput # in and out are stdin and stdout - -""" -from __future__ import division, absolute_import, print_function - -__version__ = "0.0" - -stdoutflag=0 -import sys -import fileinput -import re - -if sys.version_info[0] >= 3: - from subprocess import getoutput -else: - from commands import getoutput - -try: fn=sys.argv[2] -except: - try: fn='inputless_'+sys.argv[1] - except: stdoutflag=1 -try: fi=sys.argv[1] -except: fi=() -if not stdoutflag: - sys.stdout=open(fn, 'w') - -nonverb=r'[\w\s\\&=\^\*\.\{\(\)\[\?\+\$/]*(?!\\verb.)' -input=re.compile(nonverb+r'\\(input|include)\*?\s*\{?.*}?') -comment=re.compile(r'[^%]*%') - -for l in fileinput.input(fi): - l=l[:-1] - l1='' - if comment.match(l): - m=comment.match(l) - l1=l[m.end()-1:] - l=l[:m.end()-1] - m=input.match(l) - if m: - l=l.strip() - if l[-1]=='}': l=l[:-1] - i=m.end()-2 - sys.stderr.write('>>>>>>') - while i>-1 and (l[i] not in [' ', '{']): i=i-1 - if i>-1: - fn=l[i+1:] - try: f=open(fn, 'r'); flag=1; f.close() - except: - try: f=open(fn+'.tex', 'r'); flag=1;fn=fn+'.tex'; f.close() - except: flag=0 - if flag==0: - sys.stderr.write('Could not open a file: '+fn+'\n') - print(l+l1) - continue - elif flag==1: - sys.stderr.write(fn+'\n') - print('%%%%% Begin of '+fn) - print(getoutput(sys.argv[0]+' < '+fn)) - print('%%%%% End of '+fn) - else: - sys.stderr.write('Could not extract a file name from: '+l) - print(l+l1) - else: - print(l+l1) -sys.stdout.close() diff -Nru python-numpy-1.13.3/doc/f2py/commands.tex python-numpy-1.14.5/doc/f2py/commands.tex --- python-numpy-1.13.3/doc/f2py/commands.tex 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/commands.tex 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -\usepackage{xspace} -\usepackage{verbatim} - -%%tth:\newcommand{\xspace}{ } - -\newcommand{\fpy}{\texttt{f2py}\xspace} - -\newcommand{\bs}{\symbol{`\\}} -% need bs here: -%%tth:\newcommand{\bs}{\texttt{}} - -\newcommand{\shell}[1]{\hspace*{1em}\texttt{sh> \begin{minipage}[t]{0.8\textwidth}#1\end{minipage}}} - - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "f2py2e" -%%% End: - - diff -Nru python-numpy-1.13.3/doc/f2py/default.css python-numpy-1.14.5/doc/f2py/default.css --- python-numpy-1.13.3/doc/f2py/default.css 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/default.css 1970-01-01 00:00:00.000000000 +0000 @@ -1,180 +0,0 @@ -/* -:Author: David Goodger -:Contact: goodger@users.sourceforge.net -:date: $Date: 2002/08/01 20:52:44 $ -:version: $Revision: 1.1 $ -:copyright: This stylesheet has been placed in the public domain. - -Default cascading style sheet for the HTML output of Docutils. -*/ - -body { - background: #FFFFFF ; - color: #000000 -} - -a.footnote-reference { - font-size: smaller ; - vertical-align: super } - -a.target { - color: blue } - -a.toc-backref { - text-decoration: none ; - color: black } - -dd { - margin-bottom: 0.5em } - -div.abstract { - margin: 2em 5em } - -div.abstract p.topic-title { - font-weight: bold ; - text-align: center } - -div.attention, div.caution, div.danger, div.error, div.hint, -div.important, div.note, div.tip, div.warning { - margin: 2em ; - border: medium outset ; - padding: 1em } - -div.attention p.admonition-title, div.caution p.admonition-title, -div.danger p.admonition-title, div.error p.admonition-title, -div.warning p.admonition-title { - color: red ; - font-weight: bold ; - font-family: sans-serif } - -div.hint p.admonition-title, div.important p.admonition-title, -div.note p.admonition-title, div.tip p.admonition-title { - font-weight: bold ; - font-family: sans-serif } - -div.dedication { - margin: 2em 5em ; - text-align: center ; - font-style: italic } - -div.dedication p.topic-title { - font-weight: bold ; - font-style: normal } - -div.figure { - margin-left: 2em } - -div.footer, div.header { - font-size: smaller } - -div.system-messages { - margin: 5em } - -div.system-messages h1 { - color: red } - -div.system-message { - border: medium outset ; - padding: 1em } - -div.system-message p.system-message-title { - color: red ; - font-weight: bold } - -div.topic { - margin: 2em } - -h1.title { - text-align: center } - -h2.subtitle { - text-align: center } - -hr { - width: 75% } - -ol.simple, ul.simple { - margin-bottom: 1em } - -ol.arabic { - list-style: decimal } - -ol.loweralpha { - list-style: lower-alpha } - -ol.upperalpha { - list-style: upper-alpha } - -ol.lowerroman { - list-style: lower-roman } - -ol.upperroman { - list-style: upper-roman } - -p.caption { - font-style: italic } - -p.credits { - font-style: italic ; - font-size: smaller } - -p.first { - margin-top: 0 } - -p.label { - white-space: nowrap } - -p.topic-title { - font-weight: bold } - -pre.literal-block, pre.doctest-block { - margin-left: 2em ; - margin-right: 2em ; - background-color: #eeeeee } - -span.classifier { - font-family: sans-serif ; - font-style: oblique } - -span.classifier-delimiter { - font-family: sans-serif ; - font-weight: bold } - -span.field-argument { - font-style: italic } - -span.interpreted { - font-family: sans-serif } - -span.option-argument { - font-style: italic } - -span.problematic { - color: red } - -table { - margin-top: 0.5em ; - margin-bottom: 0.5em } - -table.citation { - border-left: solid thin gray ; - padding-left: 0.5ex } - -table.docinfo { - margin: 2em 4em } - -table.footnote { - border-left: solid thin black ; - padding-left: 0.5ex } - -td, th { - padding-left: 0.5em ; - padding-right: 0.5em ; - vertical-align: baseline } - -td.docinfo-name { - font-weight: bold ; - text-align: right } - -td.field-name { - font-weight: bold } diff -Nru python-numpy-1.13.3/doc/f2py/docutils.conf python-numpy-1.14.5/doc/f2py/docutils.conf --- python-numpy-1.13.3/doc/f2py/docutils.conf 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/docutils.conf 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ -[general] - -# These entries affect all processing: -#source-link: 1 -datestamp: %Y-%m-%d %H:%M UTC -generator: 1 - -# These entries affect HTML output: -#stylesheet-path: pearu_style.css -output-encoding: latin-1 - -# These entries affect reStructuredText-style PEPs: -#pep-template: pep-html-template -#pep-stylesheet-path: stylesheets/pep.css -#python-home: http://www.python.org -#no-random: 1 diff -Nru python-numpy-1.13.3/doc/f2py/ex1/arr.f python-numpy-1.14.5/doc/f2py/ex1/arr.f --- python-numpy-1.13.3/doc/f2py/ex1/arr.f 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/ex1/arr.f 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ - subroutine arr(l,m,n,a) - integer l,m,n - real*8 a(l,m,n) - end diff -Nru python-numpy-1.13.3/doc/f2py/ex1/bar.f python-numpy-1.14.5/doc/f2py/ex1/bar.f --- python-numpy-1.13.3/doc/f2py/ex1/bar.f 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/ex1/bar.f 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ - function bar(a,b) - integer a,b,bar - bar = a + b - end diff -Nru python-numpy-1.13.3/doc/f2py/ex1/foobar.f90 python-numpy-1.14.5/doc/f2py/ex1/foobar.f90 --- python-numpy-1.13.3/doc/f2py/ex1/foobar.f90 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/ex1/foobar.f90 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ -!%f90 -module foobar ! in - interface ! in :foobar - subroutine foo(a) ! in :foobar:foo.f - integer intent(inout) :: a - end subroutine foo - function bar(a,b) ! in :foobar:bar.f - integer :: a - integer :: b - integer :: bar - end function bar - end interface -end module foobar - -! This file was auto-generated with f2py (version:0.95). -! See http://cens.ioc.ee/projects/f2py2e/ diff -Nru python-numpy-1.13.3/doc/f2py/ex1/foobarmodule.tex python-numpy-1.14.5/doc/f2py/ex1/foobarmodule.tex --- python-numpy-1.13.3/doc/f2py/ex1/foobarmodule.tex 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/ex1/foobarmodule.tex 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -% This file is auto-generated with f2py (version:2.266) -\section{Module \texttt{foobar}} - -This module contains two examples that are used in \texttt{f2py} documentation. - -\subsection{Wrapper function \texttt{foo}} - - -\noindent{{}\verb@foo@{}}\texttt{(a)} ---- Example of a wrapper function of a Fortran subroutine. - -\noindent Required arguments: -\begin{description} -\item[]{{}\verb@a : in/output rank-0 array(int,'i')@{}} ---- 5 is added to the variable {{}\verb@a@{}} ``in place''. -\end{description} - -\subsection{Wrapper function \texttt{bar}} - - -\noindent{{}\verb@bar = bar@{}}\texttt{(a, b)} ---- Add two values. - -\noindent Required arguments: -\begin{description} -\item[]{{}\verb@a : input int@{}} ---- The first value. -\item[]{{}\verb@b : input int@{}} ---- The second value. -\end{description} -\noindent Return objects: -\begin{description} -\item[]{{}\verb@bar : int@{}} ---- See elsewhere. -\end{description} - diff -Nru python-numpy-1.13.3/doc/f2py/ex1/foobar-smart.f90 python-numpy-1.14.5/doc/f2py/ex1/foobar-smart.f90 --- python-numpy-1.13.3/doc/f2py/ex1/foobar-smart.f90 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/ex1/foobar-smart.f90 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -!%f90 -module foobar ! in - note(This module contains two examples that are used in & - \texttt{f2py} documentation.) foobar - interface ! in :foobar - subroutine foo(a) ! in :foobar:foo.f - note(Example of a wrapper function of a Fortran subroutine.) foo - integer intent(inout),& - note(5 is added to the variable {{}\verb@a@{}} ``in place''.) :: a - end subroutine foo - function bar(a,b) result (ab) ! in :foobar:bar.f - integer :: a - integer :: b - integer :: ab - note(The first value.) a - note(The second value.) b - note(Add two values.) bar - note(The result.) ab - end function bar - end interface -end module foobar - -! This file was auto-generated with f2py (version:0.95). -! See http://cens.ioc.ee/projects/f2py2e/ diff -Nru python-numpy-1.13.3/doc/f2py/ex1/foo.f python-numpy-1.14.5/doc/f2py/ex1/foo.f --- python-numpy-1.13.3/doc/f2py/ex1/foo.f 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/ex1/foo.f 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ - subroutine foo(a) - integer a -cf2py intent(in,out) :: a - a = a + 5 - end diff -Nru python-numpy-1.13.3/doc/f2py/ex1/runme python-numpy-1.14.5/doc/f2py/ex1/runme --- python-numpy-1.13.3/doc/f2py/ex1/runme 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/ex1/runme 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -#!/bin/sh - -f2py2e='python ../../f2py2e.py' -PYINC=`$f2py2e -pyinc` -$f2py2e foobar-smart.pyf --short-latex --overwrite-makefile -makefile foo.f bar.f -gmake -f Makefile-foobar -#gcc -O3 -I$PYINC -I$PYINC/Numeric -shared -o foobarmodule.so foobarmodule.c foo.f bar.f -python -c ' -import foobar -print foobar.__doc__ -print foobar.bar(2,3) -from Numeric import * -a=array(3) -print a,foobar.foo(a),a -print foobar.foo.__doc__ -print foobar.bar.__doc__ -print "ok" -' diff -Nru python-numpy-1.13.3/doc/f2py/f2py.1 python-numpy-1.14.5/doc/f2py/f2py.1 --- python-numpy-1.13.3/doc/f2py/f2py.1 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/f2py.1 1970-01-01 00:00:00.000000000 +0000 @@ -1,209 +0,0 @@ -.TH "F2PY" 1 -.SH NAME -f2py \- Fortran to Python interface generator -.SH SYNOPSIS -(1) To construct extension module sources: - -.B f2py -[] [[[only:]||[skip:]] ] [: ...] - -(2) To compile fortran files and build extension modules: - -.B f2py -\-c [, , ] - -(3) To generate signature files: - -.B f2py -\-h ...< same options as in (1) > -.SH DESCRIPTION -This program generates a Python C/API file (module.c) -that contains wrappers for given Fortran or C functions so that they -can be called from Python. -With the \-c option the corresponding -extension modules are built. -.SH OPTIONS -.TP -.B \-h -Write signatures of the fortran routines to file and -exit. You can then edit and use it instead of . If ==stdout then the signatures are printed to -stdout. -.TP -.B -Names of fortran routines for which Python C/API functions will be -generated. Default is all that are found in . -.TP -.B skip: -Ignore fortran functions that follow until `:'. -.TP -.B only: -Use only fortran functions that follow until `:'. -.TP -.B : -Get back to mode. -.TP -.B \-m -Name of the module; f2py generates a Python/C API file -module.c or extension module . Default is -\'untitled\'. -.TP -.B \-\-[no\-]lower -Do [not] lower the cases in . By default, \-\-lower is -assumed with \-h key, and \-\-no\-lower without \-h key. -.TP -.B \-\-build\-dir -All f2py generated files are created in . Default is tempfile.mkdtemp(). -.TP -.B \-\-overwrite\-signature -Overwrite existing signature file. -.TP -.B \-\-[no\-]latex\-doc -Create (or not) module.tex. Default is \-\-no\-latex\-doc. -.TP -.B \-\-short\-latex -Create 'incomplete' LaTeX document (without commands \\documentclass, -\\tableofcontents, and \\begin{document}, \\end{document}). -.TP -.B \-\-[no\-]rest\-doc -Create (or not) module.rst. Default is \-\-no\-rest\-doc. -.TP -.B \-\-debug\-capi -Create C/API code that reports the state of the wrappers during -runtime. Useful for debugging. -.TP -.B \-include\'\' -Add CPP #include statement to the C/API code. should be -in the format of either `"filename.ext"' or `'. As a -result will be included just before wrapper functions -part in the C/API code. The option is depreciated, use `usercode` -statement in signature files instead. -.TP -.B \-\-[no\-]wrap\-functions -Create Fortran subroutine wrappers to Fortran 77 -functions. \-\-wrap\-functions is default because it ensures maximum -portability/compiler independence. -.TP -.B \-\-help\-link [..] -List system resources found by system_info.py. [..] may contain -a list of resources names. See also \-\-link\- switch below. -.TP -.B \-\-quiet -Run quietly. -.TP -.B \-\-verbose -Run with extra verbosity. -.TP -.B \-v -Print f2py version ID and exit. -.TP -.B \-\-include_paths path1:path2:... -Search include files (that f2py will scan) from the given directories. -.SH "CONFIG_FC OPTIONS" -The following options are effective only when \-c switch is used. -.TP -.B \-\-help-compiler -List available Fortran compilers [DEPRECIATED]. -.TP -.B \-\-fcompiler= -Specify Fortran compiler type by vendor. -.TP -.B \-\-compiler= -Specify C compiler type (as defined by distutils) -.TP -.B \-\-fcompiler-exec= -Specify the path to F77 compiler [DEPRECIATED]. -.TP -.B \-\-f90compiler\-exec= -Specify the path to F90 compiler [DEPRECIATED]. -.TP -.B \-\-help\-fcompiler -List available Fortran compilers and exit. -.TP -.B \-\-f77exec= -Specify the path to F77 compiler. -.TP -.B \-\-f90exec= -Specify the path to F90 compiler. -.TP -.B \-\-f77flags="..." -Specify F77 compiler flags. -.TP -.B \-\-f90flags="..." -Specify F90 compiler flags. -.TP -.B \-\-opt="..." -Specify optimization flags. -.TP -.B \-\-arch="..." -Specify architecture specific optimization flags. -.TP -.B \-\-noopt -Compile without optimization. -.TP -.B \-\-noarch -Compile without arch-dependent optimization. -.TP -.B \-\-debug -Compile with debugging information. -.SH "EXTRA OPTIONS" -The following options are effective only when \-c switch is used. -.TP -.B \-\-link- -Link extension module with as defined by -numpy_distutils/system_info.py. E.g. to link with optimized LAPACK -libraries (vecLib on MacOSX, ATLAS elsewhere), use -\-\-link\-lapack_opt. See also \-\-help\-link switch. - -.TP -.B -L/path/to/lib/ -l -.TP -.B -D -U -I/path/to/include/ -.TP -.B .o .so .a - -.TP -.B -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN -DUNDERSCORE_G77 -Macros that might be required with non-gcc Fortran compilers. - -.TP -.B -DF2PY_REPORT_ATEXIT -To print out a performance report of F2PY interface when python -exits. Available for Linux. - -.TP -.B -DF2PY_REPORT_ON_ARRAY_COPY= -To send a message to stderr whenever F2PY interface makes a copy of an -array. Integer sets the threshold for array sizes when a message -should be shown. - -.SH REQUIREMENTS -Python 1.5.2 or higher (2.x is supported). - -Numerical Python 13 or higher (20.x,21.x,22.x,23.x are supported). - -Optional Numarray 0.9 or higher partially supported. - -numpy_distutils from Scipy (can be downloaded from F2PY homepage) -.SH "SEE ALSO" -python(1) -.SH BUGS -For instructions on reporting bugs, see - - http://cens.ioc.ee/projects/f2py2e/FAQ.html -.SH AUTHOR -Pearu Peterson -.SH "INTERNET RESOURCES" -Main website: http://cens.ioc.ee/projects/f2py2e/ - -User's Guide: http://cens.ioc.ee/projects/f2py2e/usersguide/ - -Mailing list: http://cens.ioc.ee/mailman/listinfo/f2py-users/ - -Scipy website: http://www.numpy.org -.SH COPYRIGHT -Copyright (c) 1999, 2000, 2001, 2002, 2003, 2004, 2005 Pearu Peterson -.SH LICENSE -NumPy License -.SH VERSION -2.45.241 diff -Nru python-numpy-1.13.3/doc/f2py/f2py2e.tex python-numpy-1.14.5/doc/f2py/f2py2e.tex --- python-numpy-1.13.3/doc/f2py/f2py2e.tex 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/f2py2e.tex 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -\documentclass{article} -\usepackage{a4wide} - -\input commands - -\title{\fpy\\Fortran to Python Interface Generator\\{\large Second Edition}} -\author{Pearu Peterson \texttt{}} -\date{$Revision: 1.16 $\\\today} -\begin{document} -\special{html: If equations does not show Greek letters or large - brackets correctly, then your browser configuration needs some - adjustment. Read the notes for Enabling Symbol - Fonts in Netscape under X . In addition, the browser must be set - to use document fonts. -} - -\maketitle -\begin{abstract} - \fpy is a Python program that generates Python C/API modules for - wrapping Fortran~77/90/95 codes to Python. The user can influence the - process by modifying the signature files that \fpy generates when - scanning the Fortran codes. This document describes the syntax of - the signature files and the ways how the user can dictate the tool - to produce wrapper functions with desired Python signatures. Also - how to call the wrapper functions from Python is discussed. - - See \texttt{http://cens.ioc.ee/projects/f2py2e/} for updates of this - document and the tool. -\end{abstract} - -\tableofcontents - -\input intro -\input signaturefile -\input notes -\input options -\input bugs - -\appendix -\input ex1/foobarmodule -\input apps -\end{document} - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: t -%%% End: - - Binary files /tmp/tmpGKnT1E/GZjCZWjnZO/python-numpy-1.13.3/doc/f2py/f2python9-final/aerostructure.jpg and /tmp/tmpGKnT1E/690WkC224t/python-numpy-1.14.5/doc/f2py/f2python9-final/aerostructure.jpg differ Binary files /tmp/tmpGKnT1E/GZjCZWjnZO/python-numpy-1.13.3/doc/f2py/f2python9-final/flow.jpg and /tmp/tmpGKnT1E/690WkC224t/python-numpy-1.14.5/doc/f2py/f2python9-final/flow.jpg differ diff -Nru python-numpy-1.13.3/doc/f2py/f2python9-final/mk_html.sh python-numpy-1.14.5/doc/f2py/f2python9-final/mk_html.sh --- python-numpy-1.13.3/doc/f2py/f2python9-final/mk_html.sh 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/f2python9-final/mk_html.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -#!/bin/sh -cd src - -test -f aerostructure.eps || convert ../aerostructure.jpg aerostructure.eps -test -f flow.eps || convert ../flow.jpg flow.eps -test -f structure.eps || convert ../structure.jpg structure.eps - -latex python9.tex -latex python9.tex -latex python9.tex - -test `which tth` && cat python9.tex | sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | tth -Lpython9 -i > ../f2python9.html -cd .. diff -Nru python-numpy-1.13.3/doc/f2py/f2python9-final/mk_pdf.sh python-numpy-1.14.5/doc/f2py/f2python9-final/mk_pdf.sh --- python-numpy-1.13.3/doc/f2py/f2python9-final/mk_pdf.sh 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/f2python9-final/mk_pdf.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -#!/bin/sh -cd src - -test -f aerostructure.pdf || convert ../aerostructure.jpg aerostructure.pdf -test -f flow.pdf || convert ../flow.jpg flow.pdf -test -f structure.pdf || convert ../structure.jpg structure.pdf - -cat python9.tex | sed -e "s/eps,/pdf,/g" > python9pdf.tex -pdflatex python9pdf.tex -pdflatex python9pdf.tex -pdflatex python9pdf.tex - -mv python9pdf.pdf ../f2python9.pdf \ No newline at end of file diff -Nru python-numpy-1.13.3/doc/f2py/f2python9-final/mk_ps.sh python-numpy-1.14.5/doc/f2py/f2python9-final/mk_ps.sh --- python-numpy-1.13.3/doc/f2py/f2python9-final/mk_ps.sh 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/f2python9-final/mk_ps.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -#!/bin/sh -cd src - -test -f aerostructure.eps || convert ../aerostructure.jpg aerostructure.eps -test -f flow.eps || convert ../flow.jpg flow.eps -test -f structure.eps || convert ../structure.jpg structure.eps - -latex python9.tex -latex python9.tex -latex python9.tex - -dvips python9.dvi -o ../f2python9.ps -cd .. -gzip -f f2python9.ps diff -Nru python-numpy-1.13.3/doc/f2py/f2python9-final/README.txt python-numpy-1.14.5/doc/f2py/f2python9-final/README.txt --- python-numpy-1.13.3/doc/f2py/f2python9-final/README.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/f2python9-final/README.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ - -This directory contains the source of the paper - - "Fortran to Python Interface Generator with an Application - to Aerospace Engineering" - -by - Pearu Peterson (the corresponding author) - Joaquim R. R. A. Martins - Juan J. Alonso - -for The 9th International Python Conference, March 5-8, 2001, Long Beach, California. - -The paper is provided here is in the HTML format: - - f2python9.html (size=48151 bytes) - -Note that this file includes the following JPG images - - flow.jpg (size=13266) - structure.jpg (size=17860) - aerostructure.jpg (size=72247) - -PS: -The HTML file f2python9.html is generated using TTH (http://hutchinson.belmont.ma.us/tth/) -from the LaTeX source file `python9.tex'. The source can be found in the - src/ -directory. This directory contains also the following EPS files - flow.eps - structure.eps - aerostructure.eps -and the text files - examples/{exp1.f,exp1mess.txt,exp1session.txt,foo.pyf,foom.pyf} -that are used by the LaTeX source python9.tex. - -Regards, - Pearu -January 15, 2001 diff -Nru python-numpy-1.13.3/doc/f2py/f2python9-final/src/examples/exp1.f python-numpy-1.14.5/doc/f2py/f2python9-final/src/examples/exp1.f --- python-numpy-1.13.3/doc/f2py/f2python9-final/src/examples/exp1.f 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/f2python9-final/src/examples/exp1.f 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ - subroutine exp1(l,u,n) -C Input: n is number of iterations -C Output: l,u are such that -C l(1)/l(2) < exp(1) < u(1)/u(2) -C -Cf2py integer*4 :: n = 1 -Cf2py intent(out) l,u - integer*4 n,i - real*8 l(2),u(2),t,t1,t2,t3,t4 - l(2) = 1 - l(1) = 0 - u(2) = 0 - u(1) = 1 - do 10 i=0,n - t1 = 4 + 32*(1+i)*i - t2 = 11 + (40+32*i)*i - t3 = 3 + (24+32*i)*i - t4 = 8 + 32*(1+i)*i - t = u(1) - u(1) = l(1)*t1 + t*t2 - l(1) = l(1)*t3 + t*t4 - t = u(2) - u(2) = l(2)*t1 + t*t2 - l(2) = l(2)*t3 + t*t4 - 10 continue - end diff -Nru python-numpy-1.13.3/doc/f2py/f2python9-final/src/examples/exp1mess.txt python-numpy-1.14.5/doc/f2py/f2python9-final/src/examples/exp1mess.txt --- python-numpy-1.13.3/doc/f2py/f2python9-final/src/examples/exp1mess.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/f2python9-final/src/examples/exp1mess.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,17 +0,0 @@ -Reading fortran codes... - Reading file 'exp1.f' -Post-processing... - Block: foo - Block: exp1 -Creating 'Makefile-foo'... - Linker: ld ('GNU ld' 2.9.5) - Fortran compiler: f77 ('g77 2.x.x' 2.95.2) - C compiler: cc ('gcc 2.x.x' 2.95.2) -Building modules... - Building module "foo"... - Constructing wrapper function "exp1"... - l,u = exp1([n]) - Wrote C/API module "foo" to file "foomodule.c" - Documentation is saved to file "foomodule.tex" -Run GNU make to build shared modules: - gmake -f Makefile- [test] diff -Nru python-numpy-1.13.3/doc/f2py/f2python9-final/src/examples/exp1session.txt python-numpy-1.14.5/doc/f2py/f2python9-final/src/examples/exp1session.txt --- python-numpy-1.13.3/doc/f2py/f2python9-final/src/examples/exp1session.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/f2python9-final/src/examples/exp1session.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ ->>> import foo,Numeric ->>> print foo.exp1.__doc__ -exp1 - Function signature: - l,u = exp1([n]) -Optional arguments: - n := 1 input int -Return objects: - l : rank-1 array('d') with bounds (2) - u : rank-1 array('d') with bounds (2) - ->>> l,u = foo.exp1() ->>> print l,u -[ 1264. 465.] [ 1457. 536.] ->>> print l[0]/l[1], u[0]/u[1]-l[0]/l[1] -2.71827956989 2.25856657199e-06 ->>> l,u = foo.exp1(2) ->>> print l,u -[ 517656. 190435.] [ 566827. 208524.] ->>> print l[0]/l[1], u[0]/u[1]-l[0]/l[1] -2.71828182845 1.36437527942e-11 diff -Nru python-numpy-1.13.3/doc/f2py/f2python9-final/src/examples/foom.pyf python-numpy-1.14.5/doc/f2py/f2python9-final/src/examples/foom.pyf --- python-numpy-1.13.3/doc/f2py/f2python9-final/src/examples/foom.pyf 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/f2python9-final/src/examples/foom.pyf 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -!%f90 -*- f90 -*- -python module foo - interface - subroutine exp1(l,u,n) - real*8 dimension(2) :: l - real*8 dimension(2) :: u - intent(out) l,u - integer*4 optional :: n = 1 - end subroutine exp1 - end interface -end python module foo -! This file was auto-generated with f2py -! (version:2.298) and modified by pearu. -! See http://cens.ioc.ee/projects/f2py2e/ diff -Nru python-numpy-1.13.3/doc/f2py/f2python9-final/src/examples/foo.pyf python-numpy-1.14.5/doc/f2py/f2python9-final/src/examples/foo.pyf --- python-numpy-1.13.3/doc/f2py/f2python9-final/src/examples/foo.pyf 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/f2python9-final/src/examples/foo.pyf 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -!%f90 -*- f90 -*- -python module foo - interface - subroutine exp1(l,u,n) - real*8 dimension(2) :: l - real*8 dimension(2) :: u - integer*4 :: n - end subroutine exp1 - end interface -end python module foo -! This file was auto-generated with f2py -! (version:2.298). -! See http://cens.ioc.ee/projects/f2py2e/ Binary files /tmp/tmpGKnT1E/GZjCZWjnZO/python-numpy-1.13.3/doc/f2py/f2python9-final/structure.jpg and /tmp/tmpGKnT1E/690WkC224t/python-numpy-1.14.5/doc/f2py/f2python9-final/structure.jpg differ diff -Nru python-numpy-1.13.3/doc/f2py/FAQ.txt python-numpy-1.14.5/doc/f2py/FAQ.txt --- python-numpy-1.13.3/doc/f2py/FAQ.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/FAQ.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,603 +0,0 @@ - -====================================================================== - F2PY Frequently Asked Questions -====================================================================== - -.. contents:: - -General information -=================== - -Q: How to get started? ----------------------- - -First, install__ F2PY. Then check that F2PY installation works -properly (see below__). Try out a `simple example`__. - -Read `F2PY Users Guide and Reference Manual`__. It contains lots -of complete examples. - -If you have any questions/problems when using F2PY, don't hesitate to -turn to `F2PY users mailing list`__ or directly to me. - -__ index.html#installation -__ #testing -__ index.html#usage -__ usersguide/index.html -__ index.html#mailing-list - -Q: When to report bugs? ------------------------ - -* If F2PY scanning fails on Fortran sources that otherwise compile - fine. - -* After checking that you have the latest version of F2PY from its - CVS. It is possible that a bug has been fixed already. See also the - log entries in the file `HISTORY.txt`_ (`HISTORY.txt in CVS`_). - -* After checking that your Python and Numerical Python installations - work correctly. - -* After checking that your C and Fortran compilers work correctly. - -Q: How to report bugs? ----------------------- - -F2PY is part of NumPy. Report bugs on the NumPy issue tracker at -__ https://github.com/numpy/numpy/issues -Please, include information about your platform (operating system, -version) and compilers/linkers, e.g. the output (both stdout/stderr) of -:: - - python -c 'import numpy.f2py.diagnose;numpy.f2py.diagnose.run()' - -Feel free to add any other relevant information. However, avoid -sending the output of F2PY generated ``.pyf`` files (unless they are -manually modified) or any binary files like shared libraries or object -codes. - -N.B. You may notice that other F2PY issues are tagged 'f2py'. Only the -admins can add tags to issues, don't waste time trying to work out how -to tag it yourself. - -While reporting bugs, you may find the following notes useful: - -* `How To Ask Questions The Smart Way`__ by E. S. Raymond and R. Moen. - -* `How to Report Bugs Effectively`__ by S. Tatham. - -__ http://www.catb.org/~esr/faqs/smart-questions.html -__ http://www.chiark.greenend.org.uk/~sgtatham/bugs.html - -Installation -============ - -Q: How to use F2PY with different Python versions? --------------------------------------------------- - -Run the installation command using the corresponding Python -executable. For example, -:: - - python2.1 setup.py install - -installs the ``f2py`` script as ``f2py2.1``. - -See `Distutils User Documentation`__ for more information how to -install Python modules to non-standard locations. - -__ http://www.python.org/sigs/distutils-sig/doc/inst/inst.html - - -Q: Why F2PY is not working after upgrading? -------------------------------------------- - -If upgrading from F2PY version 2.3.321 or earlier then remove all f2py -specific files from ``/path/to/python/bin`` directory before -running installation command. - -Q: How to get/upgrade numpy and F2PY from git? ---------------------------------------------------------------- - -The numpy code repository is hosted on GitHub at -__ http://github.com/numpy/numpy - -You can check it out with -:: - git clone git://github.com/numpy/numpy.git numpy - -Installation information is at -__ http://www.scipy.org/scipylib/download.html - -Information for developers is at -__ http://www.scipy.org/scipylib/dev-zone.html - - -Testing -======= - -Q: How to test if F2PY is installed correctly? ----------------------------------------------- - -Run -:: - - f2py - -without arguments. If F2PY is installed correctly then it should print -the usage information for f2py. - -Q: How to test if F2PY is working correctly? --------------------------------------------- - -For a quick test, try out an example problem from Usage__ -section in `README.txt`_. - -__ index.html#usage - -For running F2PY unit tests, see `TESTING.txt`_. - - -Compiler/Platform-specific issues -================================= - -Q: What are supported platforms and compilers? ----------------------------------------------- - -F2PY is developed on Linux system with a GCC compiler (versions -2.95.x, 3.x). Fortran 90 related hooks are tested against Intel -Fortran Compiler. F2PY should work under any platform where Python and -Numeric are installed and has supported Fortran compiler installed. - -To see a list of supported compilers, execute:: - - f2py -c --help-fcompiler - -Example output:: - - List of available Fortran compilers: - --fcompiler=gnu GNU Fortran Compiler (3.3.4) - --fcompiler=intel Intel Fortran Compiler for 32-bit apps (8.0) - List of unavailable Fortran compilers: - --fcompiler=absoft Absoft Corp Fortran Compiler - --fcompiler=compaq Compaq Fortran Compiler - --fcompiler=compaqv DIGITAL|Compaq Visual Fortran Compiler - --fcompiler=hpux HP Fortran 90 Compiler - --fcompiler=ibm IBM XL Fortran Compiler - --fcompiler=intele Intel Fortran Compiler for Itanium apps - --fcompiler=intelev Intel Visual Fortran Compiler for Itanium apps - --fcompiler=intelv Intel Visual Fortran Compiler for 32-bit apps - --fcompiler=lahey Lahey/Fujitsu Fortran 95 Compiler - --fcompiler=mips MIPSpro Fortran Compiler - --fcompiler=nag NAGWare Fortran 95 Compiler - --fcompiler=pg Portland Group Fortran Compiler - --fcompiler=sun Sun|Forte Fortran 95 Compiler - --fcompiler=vast Pacific-Sierra Research Fortran 90 Compiler - List of unimplemented Fortran compilers: - --fcompiler=f Fortran Company/NAG F Compiler - For compiler details, run 'config_fc --verbose' setup command. - - -Q: How to use the F compiler in F2PY? -------------------------------------- - -Read `f2py2e/doc/using_F_compiler.txt`__. It describes why the F -compiler cannot be used in a normal way (i.e. using ``-c`` switch) to -build F2PY generated modules. It also gives a workaround to this -problem. - -__ http://cens.ioc.ee/cgi-bin/viewcvs.cgi/python/f2py2e/doc/using_F_compiler.txt?rev=HEAD&content-type=text/vnd.viewcvs-markup - -Q: How to use F2PY under Windows? ---------------------------------- - -F2PY can be used both within Cygwin__ and MinGW__ environments under -Windows, F2PY can be used also in Windows native terminal. -See the section `Setting up environment`__ for Cygwin and MinGW. - -__ http://cygwin.com/ -__ http://www.mingw.org/ -__ http://cens.ioc.ee/~pearu/numpy/BUILD_WIN32.html#setting-up-environment - -Install numpy_distutils and F2PY. Win32 installers of these packages -are provided in `F2PY Download`__ section. - -__ http://cens.ioc.ee/projects/f2py2e/#download - -Use ``--compiler=`` and ``--fcompiler`` F2PY command line switches to -to specify which C and Fortran compilers F2PY should use, respectively. - -Under MinGW environment, ``mingw32`` is default for a C compiler. - -Supported and Unsupported Features -================================== - -Q: Does F2PY support ``ENTRY`` statements? ------------------------------------------- - -Yes, starting at F2PY version higher than 2.39.235_1706. - -Q: Does F2PY support derived types in F90 code? ------------------------------------------------ - -Not yet. However I do have plans to implement support for F90 TYPE -constructs in future. But note that the task in non-trivial and may -require the next edition of F2PY for which I don't have resources to -work with at the moment. - -Jeffrey Hagelberg from LLNL has made progress on adding -support for derived types to f2py. He writes: - - At this point, I have a version of f2py that supports derived types - for most simple cases. I have multidimensional arrays of derived - types and allocatable arrays of derived types working. I'm just now - starting to work on getting nested derived types to work. I also - haven't tried putting complex number in derived types yet. - -Hopefully he can contribute his changes to f2py soon. - -Q: Does F2PY support pointer data in F90 code? ------------------------------------------------ - -No. I have never needed it and I haven't studied if there are any -obstacles to add pointer data support to F2PY. - -Q: What if Fortran 90 code uses ``(kind=KIND(..))``? ---------------------------------------------------------------- - -Currently, F2PY can handle only ``(kind=)`` -declarations where ```` is a numeric integer (e.g. 1, 2, -4,...) but not a function call ``KIND(..)`` or any other -expression. F2PY needs to know what would be the corresponding C type -and a general solution for that would be too complicated to implement. - -However, F2PY provides a hook to overcome this difficulty, namely, -users can define their own to maps. For -example, if Fortran 90 code contains:: - - REAL(kind=KIND(0.0D0)) ... - -then create a file ``.f2py_f2cmap`` (into the working directory) -containing a Python dictionary:: - - {'real':{'KIND(0.0D0)':'double'}} - -for instance. - -Or more generally, the file ``.f2py_f2cmap`` must contain a dictionary -with items:: - - : {:} - -that defines mapping between Fortran type:: - - ([kind=]) - -and the corresponding ````. ```` can be one of the -following:: - - char - signed_char - short - int - long_long - float - double - long_double - complex_float - complex_double - complex_long_double - string - -For more information, see ``f2py2e/capi_maps.py``. - -Related software -================ - -Q: How F2PY distinguishes from Pyfort? --------------------------------------- - -F2PY and Pyfort have very similar aims and ideology of how they are -targeted. Both projects started to evolve in the same year 1999 -independently. When we discovered each other's projects, a discussion -started to join the projects but that unfortunately failed for -various reasons, e.g. both projects had evolved too far that merging -the tools would have been impractical and giving up the efforts that -the developers of both projects have made was unacceptable to both -parties. And so, nowadays we have two tools for connecting Fortran -with Python and this fact will hardly change in near future. To decide -which one to choose is a matter of taste, I can only recommend to try -out both to make up your choice. - -At the moment F2PY can handle more wrapping tasks than Pyfort, -e.g. with F2PY one can wrap Fortran 77 common blocks, Fortran 90 -module routines, Fortran 90 module data (including allocatable -arrays), one can call Python from Fortran, etc etc. F2PY scans Fortran -codes to create signature (.pyf) files. F2PY is free from most of the -limitations listed in in `the corresponding section of Pyfort -Reference Manual`__. - -__ http://pyfortran.sourceforge.net/pyfort/pyfort_reference.htm#pgfId-296925 - -There is a conceptual difference on how F2PY and Pyfort handle the -issue of different data ordering in Fortran and C multi-dimensional -arrays. Pyfort generated wrapper functions have optional arguments -TRANSPOSE and MIRROR that can be used to control explicitly how the array -arguments and their dimensions are passed to Fortran routine in order -to deal with the C/Fortran data ordering issue. F2PY generated wrapper -functions hide the whole issue from an end-user so that translation -between Fortran and C/Python loops and array element access codes is -one-to-one. How the F2PY generated wrappers deal with the issue is -determined by a person who creates a signature file via using -attributes like ``intent(c)``, ``intent(copy|overwrite)``, -``intent(inout|in,out|inplace)`` etc. - -For example, let's consider a typical usage of both F2PY and Pyfort -when wrapping the following simple Fortran code: - -.. include:: simple.f - :literal: - -The comment lines starting with ``cf2py`` are read by F2PY (so that we -don't need to generate/handwrite an intermediate signature file in -this simple case) while for a Fortran compiler they are just comment -lines. - -And here is a Python version of the Fortran code: - -.. include:: pytest.py - :literal: - -To generate a wrapper for subroutine ``foo`` using F2PY, execute:: - - $ f2py -m f2pytest simple.f -c - -that will generate an extension module ``f2pytest`` into the current -directory. - -To generate a wrapper using Pyfort, create the following file - -.. include:: pyforttest.pyf - :literal: - -and execute:: - - $ pyfort pyforttest - -In Pyfort GUI add ``simple.f`` to the list of Fortran sources and -check that the signature file is in free format. And then copy -``pyforttest.so`` from the build directory to the current directory. - -Now, in Python - -.. include:: simple_session.dat - :literal: - -Q: Can Pyfort .pyf files used with F2PY and vice versa? -------------------------------------------------------- - -After some simple modifications, yes. You should take into account the -following differences in Pyfort and F2PY .pyf files. - -+ F2PY signature file contains ``python module`` and ``interface`` - blocks that are equivalent to Pyfort ``module`` block usage. - -+ F2PY attribute ``intent(inplace)`` is equivalent to Pyfort - ``intent(inout)``. F2PY ``intent(inout)`` is a strict (but safe) - version of ``intent(inplace)``, any mismatch in arguments with - expected type, size, or contiguouness will trigger an exception - while ``intent(inplace)`` (dangerously) modifies arguments - attributes in-place. - -Misc -==== - -Q: How to establish which Fortran compiler F2PY will use? ---------------------------------------------------------- - -This question may be releavant when using F2PY in Makefiles. Here -follows a script demonstrating how to determine which Fortran compiler -and flags F2PY will use:: - - # Using post-0.2.2 numpy_distutils - from numpy_distutils.fcompiler import new_fcompiler - compiler = new_fcompiler() # or new_fcompiler(compiler='intel') - compiler.dump_properties() - - # Using pre-0.2.2 numpy_distutils - import os - from numpy_distutils.command.build_flib import find_fortran_compiler - def main(): - fcompiler = os.environ.get('FC_VENDOR') - fcompiler_exec = os.environ.get('F77') - f90compiler_exec = os.environ.get('F90') - fc = find_fortran_compiler(fcompiler, - fcompiler_exec, - f90compiler_exec, - verbose = 0) - print 'FC=',fc.f77_compiler - print 'FFLAGS=',fc.f77_switches - print 'FOPT=',fc.f77_opt - if __name__ == "__main__": - main() - -Users feedback -============== - -Q: Where to find additional information on using F2PY? ------------------------------------------------------- - -There are several F2PY related tutorials, slides, papers, etc -available: - -+ `Fortran to Python Interface Generator with an Application to - Aerospace Engineering`__ by P. Peterson, J. R. R. A. Martins, and - J. J. Alonso in `In Proceedings of the 9th International Python - Conference`__, Long Beach, California, 2001. - -__ http://www.python9.org/p9-cdrom/07/index.htm -__ http://www.python9.org/ - -+ Section `Adding Fortran90 code`__ in the UG of `The Bolometer Data - Analysis Project`__. - -__ http://www.astro.rub.de/laboca/download/boa_master_doc/7_4Adding_Fortran90_code.html -__ http://www.openboa.de/ - -+ Powerpoint presentation `Python for Scientific Computing`__ by Eric - Jones in `The Ninth International Python Conference`__. - -__ http://www.python9.org/p9-jones.ppt -__ http://www.python9.org/ - -+ Paper `Scripting a Large Fortran Code with Python`__ by Alvaro Caceres - Calleja in `International Workshop on Software Engineering for High - Performance Computing System Applications`__. - -__ http://csdl.ics.hawaii.edu/se-hpcs/pdf/calleja.pdf -__ http://csdl.ics.hawaii.edu/se-hpcs/ - -+ Section `Automatic building of C/Fortran extension for Python`__ by - Simon Lacoste-Julien in `Summer 2002 Report about Hybrid Systems - Modelling`__. - -__ http://moncs.cs.mcgill.ca/people/slacoste/research/report/SummerReport.html#tth_sEc3.4 -__ http://moncs.cs.mcgill.ca/people/slacoste/research/report/SummerReport.html - -+ `Scripting for Computational Science`__ by Hans Petter Langtangen - (see the `Mixed language programming`__ and `NumPy array programming`__ - sections for examples on using F2PY). - -__ http://www.ifi.uio.no/~inf3330/lecsplit/ -__ http://www.ifi.uio.no/~inf3330/lecsplit/slide662.html -__ http://www.ifi.uio.no/~inf3330/lecsplit/slide718.html - -+ Chapters 5 and 9 of `Python Scripting for Computational Science`__ - by H. P. Langtangen for case studies on using F2PY. - -__ http://www.springeronline.com/3-540-43508-5 - -+ Section `Fortran Wrapping`__ in `Continuity`__, a computational tool - for continuum problems in bioengineering and physiology. - -__ http://www.continuity.ucsd.edu/cont6_html/docs_fram.html -__ http://www.continuity.ucsd.edu/ - -+ Presentation `PYFORT and F2PY: 2 ways to bind C and Fortran with Python`__ - by Reiner Vogelsang. - -__ http://www.prism.enes.org/WPs/WP4a/Slides/pyfort/pyfort.html - -+ Lecture slides of `Extending Python: speed it up`__. - -__ http://www.astro.uni-bonn.de/~heith/lecture_pdf/friedrich5.pdf - -+ Wiki topics on `Wrapping Tools`__ and `Wrapping Benchmarks`__ for Climate - System Center at the University of Chicago. - -__ https://geodoc.uchicago.edu/climatewiki/DiscussWrappingTools -__ https://geodoc.uchicago.edu/climatewiki/WrappingBenchmarks - -+ `Performance Python with Weave`__ by Prabhu Ramachandran. - -__ http://www.numpy.org/documentation/weave/weaveperformance.html - -+ `How To Install py-f2py on Mac OSX`__ - -__ http://py-f2py.darwinports.com/ - -Please, let me know if there are any other sites that document F2PY -usage in one or another way. - -Q: What projects use F2PY? --------------------------- - -+ `SciPy: Scientific tools for Python`__ - -__ http://www.numpy.org/ - -+ `The Bolometer Data Analysis Project`__ - -__ http://www.openboa.de/ - -+ `pywavelet`__ - -__ http://www.met.wau.nl/index.html?http://www.met.wau.nl/medewerkers/moenea/python/pywavelet.html - -+ `PyARTS: an ARTS related Python package`__. - -__ http://www.met.ed.ac.uk/~cory/PyARTS/ - -+ `Python interface to PSPLINE`__, a collection of Spline and - Hermite interpolation tools for 1D, 2D, and 3D datasets on - rectilinear grids. - -__ http://pypspline.sourceforge.net - -+ `Markovian Analysis Package for Python`__. - -__ http://pymc.sourceforge.net - -+ `Modular toolkit for Data Processing (MDP)`__ - -__ http://mdp-toolkit.sourceforge.net/ - - -Please, send me a note if you are using F2PY in your project. - -Q: What people think about F2PY? --------------------------------- - -*F2PY is GOOD*: - -Here are some comments people have posted to f2py mailing list and c.l.py: - -+ Ryan Krauss: I really appreciate f2py. It seems weird to say, but I - am excited about relearning FORTRAN to compliment my python stuff. - -+ Fabien Wahl: f2py is great, and is used extensively over here... - -+ Fernando Perez: Anyway, many many thanks for this amazing tool. - - I haven't used pyfort, but I can definitely vouch for the amazing quality of - f2py. And since f2py is actively used by numpy, it won't go unmaintained. - It's quite impressive, and very easy to use. - -+ Kevin Mueller: First off, thanks to those responsible for F2PY; - its been an integral tool of my research for years now. - -+ David Linke: Best regards and thanks for the great tool! - -+ Perrin Meyer: F2Py is really useful! - -+ Hans Petter Langtangen: First of all, thank you for developing - F2py. This is a very important contribution to the scientific - computing community. We are using F2py a lot and are very happy with - it. - -+ Berthold Höllmann: Thank's alot. It seems it is also working in my - 'real' application :-) - -+ John Hunter: At first I wrapped them with f2py (unbelievably easy!)... - -+ Cameron Laird: Among many other features, Python boasts a mature - f2py, which makes it particularly rewarding to yoke Fortran- and - Python-coded modules into finished applications. - -+ Ryan Gutenkunst: f2py is sweet magic. - -*F2PY is BAD*: - -+ `Is it worth using on a large scale python drivers for Fortran - subroutines, interfaced with f2py?`__ - -__ http://sepwww.stanford.edu/internal/computing/python.html - -Additional comments on F2PY, good or bad, are welcome! - -.. References: -.. _README.txt: index.html -.. _HISTORY.txt: HISTORY.html -.. _HISTORY.txt in CVS: http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt?rev=HEAD&content-type=text/x-cvsweb-markup -.. _TESTING.txt: TESTING.html diff -Nru python-numpy-1.13.3/doc/f2py/fortranobject.tex python-numpy-1.14.5/doc/f2py/fortranobject.tex --- python-numpy-1.13.3/doc/f2py/fortranobject.tex 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/fortranobject.tex 1970-01-01 00:00:00.000000000 +0000 @@ -1,574 +0,0 @@ -\documentclass{article} - -\headsep=0pt -\topmargin=0pt -\headheight=0pt -\oddsidemargin=0pt -\textwidth=6.5in -\textheight=9in - -\usepackage{xspace} -\usepackage{verbatim} -\newcommand{\fpy}{\texttt{f2py}\xspace} -\newcommand{\bs}{\symbol{`\\}} -\newcommand{\email}[1]{\special{html:}\texttt{<#1>}\special{html:}} -\title{\texttt{PyFortranObject} --- example usages} -\author{ -\large Pearu Peterson\\ -\small \email{pearu@cens.ioc.ee} -} - -\begin{document} - -\maketitle - -\special{html: Other formats of this document: -Gzipped PS, -PDF -} - -\tableofcontents - -\section{Introduction} -\label{sec:intro} - -Fortran language defines the following concepts that we would like to -access from Python: functions, subroutines, data in \texttt{COMMON} blocks, -F90 module functions and subroutines, F90 module data (both static and -allocatable arrays). - -In the following we shall assume that we know the signatures (full -specifications of routine arguments and variables) of these concepts -from their Fortran source codes. Now, in order to call or use them -from C, one needs to have pointers to the corresponding objects. The -pointers to Fortran 77 objects (routines, data in \texttt{COMMON} -blocks) are readily available to C codes (there are various sources -available about mixing Fortran 77 and C codes). On the other hand, F90 -module specifications are highly compiler dependent and sometimes it -is not even possible to access F90 module objects from C (at least, -not directly, see remark about MIPSPro 7 Compilers). But using some -tricks (described below), the pointers to F90 module objects can be -determined in runtime providing a compiler independent solution. - -To use Fortran objects from Python in unified manner, \fpy introduces -\texttt{PyFortranObject} to hold pointers of the Fortran objects and -the corresponding wrapper functions. In fact, \texttt{PyFortranObject} -does much more: it generates documentation strings in run-time (for -items in \texttt{COMMON} blocks and data in F90 modules), provides -methods for accessing Fortran data and for calling Fortran routines, -etc. - -\section{\texttt{PyFortranObject}} -\label{sec:pyfortobj} - -\texttt{PyFortranObject} is defined as follows -\begin{verbatim} -typedef struct { - PyObject_HEAD - int len; /* Number of attributes */ - FortranDataDef *defs; /* An array of FortranDataDef's */ - PyObject *dict; /* Fortran object attribute dictionary */ -} PyFortranObject; -\end{verbatim} -where \texttt{FortranDataDef} is -\begin{verbatim} -typedef struct { - char *name; /* attribute (array||routine) name */ - int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, - || rank=-1 for Fortran routine */ - struct {int d[F2PY_MAX_DIMS];} dims; /* dimensions of the array, || not used */ - int type; /* NPY_ || not used */ - char *data; /* pointer to array || Fortran routine */ - void (*func)(); /* initialization function for - allocatable arrays: - func(&rank,dims,set_ptr_func,name,len(name)) - || C/API wrapper for Fortran routine */ - char *doc; /* documentation string; only recommended - for routines. */ -} FortranDataDef; -\end{verbatim} -In the following we demonstrate typical usages of -\texttt{PyFortranObject}. Just relevant code fragments will be given. - - -\section{Fortran 77 subroutine} -\label{sec:f77subrout} - -Consider Fortran 77 subroutine -\begin{verbatim} -subroutine bar() -end -\end{verbatim} -The corresponding \texttt{PyFortranObject} is defined in C as follows: -\begin{verbatim} -static char doc_bar[] = "bar()"; -static PyObject *c_bar(PyObject *self, PyObject *args, - PyObject *keywds, void (*f2py_func)()) { - static char *capi_kwlist[] = {NULL}; - if (!PyArg_ParseTupleAndKeywords(args,keywds,"|:bar",capi_kwlist)) - return NULL; - (*f2py_func)(); - return Py_BuildValue(""); -} -extern void F_FUNC(bar,BAR)(); -static FortranDataDef f2py_routines_def[] = { - {"bar",-1, {-1}, 0, (char *)F_FUNC(bar,BAR),(void*)c_bar,doc_bar}, - {NULL} -}; -void initfoo() { - - d = PyModule_GetDict(m); - PyDict_SetItemString(d, f2py_routines_def[0].name, - PyFortranObject_NewAsAttr(&f2py_routines_def[0])); -} -\end{verbatim} -where CPP macro \texttt{F\_FUNC} defines how Fortran 77 routines are -seen in C. -In Python, Fortran subroutine \texttt{bar} is called as follows -\begin{verbatim} ->>> import foo ->>> foo.bar() -\end{verbatim} - -\section{Fortran 77 function} -\label{sec:f77func} -Consider Fortran 77 function -\begin{verbatim} -function bar() -complex bar -end -\end{verbatim} -The corresponding \texttt{PyFortranObject} is defined in C as in -previous example but with the following changes: -\begin{verbatim} -static char doc_bar[] = "bar = bar()"; -static PyObject *c_bar(PyObject *self, PyObject *args, - PyObject *keywds, void (*f2py_func)()) { - complex_float bar; - static char *capi_kwlist[] = {NULL}; - if (!PyArg_ParseTupleAndKeywords(args,keywds,"|:bar",capi_kwlist)) - return NULL; - (*f2py_func)(&bar); - return Py_BuildValue("O",pyobj_from_complex_float1(bar)); -} -extern void F_WRAPPEDFUNC(bar,BAR)(); -static FortranDataDef f2py_routines_def[] = { - {"bar",-1,{-1},0,(char *)F_WRAPPEDFUNC(bar,BAR),(void *)c_bar,doc_bar}, - {NULL} -}; -\end{verbatim} -where CPP macro \texttt{F\_WRAPPEDFUNC} gives the pointer to the following -Fortran 77 subroutine: -\begin{verbatim} -subroutine f2pywrapbar (barf2pywrap) -external bar -complex bar, barf2pywrap -barf2pywrap = bar() -end -\end{verbatim} -With these hooks, calling Fortran functions returning composed types -becomes platform/compiler independent. - - -\section{\texttt{COMMON} block data} -\label{sec:commondata} - -Consider Fortran 77 \texttt{COMMON} block -\begin{verbatim} -integer i -COMMON /bar/ i -\end{verbatim} -In order to access the variable \texttt{i} from Python, -\texttt{PyFortranObject} is defined as follows: -\begin{verbatim} -static FortranDataDef f2py_bar_def[] = { - {"i",0,{-1},NPY_INT}, - {NULL} -}; -static void f2py_setup_bar(char *i) { - f2py_bar_def[0].data = i; -} -extern void F_FUNC(f2pyinitbar,F2PYINITBAR)(); -static void f2py_init_bar() { - F_FUNC(f2pyinitbar,F2PYINITBAR)(f2py_setup_bar); -} -void initfoo() { - - PyDict_SetItemString(d, "bar", PyFortranObject_New(f2py_bar_def,f2py_init_bar)); -} -\end{verbatim} -where auxiliary Fortran function \texttt{f2pyinitbar} is defined as follows -\begin{verbatim} -subroutine f2pyinitbar(setupfunc) -external setupfunc -integer i -common /bar/ i -call setupfunc(i) -end -\end{verbatim} -and it is called in \texttt{PyFortranObject\_New}. - - -\section{Fortran 90 module subroutine} -\label{sec:f90modsubrout} - -Consider -\begin{verbatim} -module fun - subroutine bar() - end subroutine bar -end module fun -\end{verbatim} -\texttt{PyFortranObject} is defined as follows -\begin{verbatim} -static char doc_fun_bar[] = "fun.bar()"; -static PyObject *c_fun_bar(PyObject *self, PyObject *args, - PyObject *keywds, void (*f2py_func)()) { - static char *kwlist[] = {NULL}; - if (!PyArg_ParseTupleAndKeywords(args,keywds,"",kwlist)) - return NULL; - (*f2py_func)(); - return Py_BuildValue(""); -} -static FortranDataDef f2py_fun_def[] = { - {"bar",-1,{-1},0,NULL,(void *)c_fun_bar,doc_fun_bar}, - {NULL} -}; -static void f2py_setup_fun(char *bar) { - f2py_fun_def[0].data = bar; -} -extern void F_FUNC(f2pyinitfun,F2PYINITFUN)(); -static void f2py_init_fun() { - F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun); -} -void initfoo () { - - PyDict_SetItemString(d, "fun", PyFortranObject_New(f2py_fun_def,f2py_init_fun)); -} -\end{verbatim} -where auxiliary Fortran function \texttt{f2pyinitfun} is defined as -follows -\begin{verbatim} -subroutine f2pyinitfun(f2pysetupfunc) -use fun -external f2pysetupfunc -call f2pysetupfunc(bar) -end subroutine f2pyinitfun -\end{verbatim} -The following Python session demonstrates how to call Fortran 90 -module function \texttt{bar}: -\begin{verbatim} ->>> import foo ->>> foo.fun.bar() -\end{verbatim} - -\section{Fortran 90 module function} -\label{sec:f90modfunc} - -Consider -\begin{verbatim} -module fun - function bar() - complex bar - end subroutine bar -end module fun -\end{verbatim} -\texttt{PyFortranObject} is defined as follows -\begin{verbatim} -static char doc_fun_bar[] = "bar = fun.bar()"; -static PyObject *c_fun_bar(PyObject *self, PyObject *args, - PyObject *keywds, void (*f2py_func)()) { - complex_float bar; - static char *kwlist[] = {NULL}; - if (!PyArg_ParseTupleAndKeywords(args,keywds,"",kwlist)) - return NULL; - (*f2py_func)(&bar); - return Py_BuildValue("O",pyobj_from_complex_float1(bar)); -} -static FortranDataDef f2py_fun_def[] = { - {"bar",-1,{-1},0,NULL,(void *)c_fun_bar,doc_fun_bar}, - {NULL} -}; -static void f2py_setup_fun(char *bar) { - f2py_fun_def[0].data = bar; -} -extern void F_FUNC(f2pyinitfun,F2PYINITFUN)(); -static void f2py_init_fun() { - F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun); -} -void initfoo() { - - PyDict_SetItemString(d, "fun", PyFortranObject_New(f2py_fun_def,f2py_init_fun)); -} -\end{verbatim} -where -\begin{verbatim} -subroutine f2pywrap_fun_bar (barf2pywrap) -use fun -complex barf2pywrap -barf2pywrap = bar() -end - -subroutine f2pyinitfun(f2pysetupfunc) -external f2pysetupfunc,f2pywrap_fun_bar -call f2pysetupfunc(f2pywrap_fun_bar) -end -\end{verbatim} - - -\section{Fortran 90 module data} -\label{sec:f90moddata} - -Consider -\begin{verbatim} -module fun - integer i -end module fun -\end{verbatim} -Then -\begin{verbatim} -static FortranDataDef f2py_fun_def[] = { - {"i",0,{-1},NPY_INT}, - {NULL} -}; -static void f2py_setup_fun(char *i) { - f2py_fun_def[0].data = i; -} -extern void F_FUNC(f2pyinitfun,F2PYINITFUN)(); -static void f2py_init_fun() { - F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun); -} -void initfoo () { - - PyDict_SetItemString(d, "fun", - PyFortranObject_New(f2py_fun_def,f2py_init_fun)); -} -\end{verbatim} -where -\begin{verbatim} -subroutine f2pyinitfun(f2pysetupfunc) -use fun -external f2pysetupfunc -call f2pysetupfunc(i) -end subroutine f2pyinitfun -\end{verbatim} -Example usage in Python: -\begin{verbatim} ->>> import foo ->>> foo.fun.i = 4 -\end{verbatim} - -\section{Fortran 90 module allocatable array} -\label{sec:f90modallocarr} - -Consider -\begin{verbatim} -module fun - real, allocatable :: r(:) -end module fun -\end{verbatim} -Then -\begin{verbatim} -static FortranDataDef f2py_fun_def[] = { - {"r",1,{-1},NPY_FLOAT}, - {NULL} -}; -static void f2py_setup_fun(void (*r)()) { - f2py_fun_def[0].func = r; -} -extern void F_FUNC(f2pyinitfun,F2PYINITFUN)(); -static void f2py_init_fun() { - F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun); -} -void initfoo () { - - PyDict_SetItemString(d, "fun", PyFortranObject_New(f2py_fun_def,f2py_init_fun)); -} -\end{verbatim} -where -\begin{verbatim} -subroutine f2py_fun_getdims_r(r,s,f2pysetdata) -use fun, only: d => r -external f2pysetdata -logical ns -integer s(*),r,i,j -ns = .FALSE. -if (allocated(d)) then - do i=1,r - if ((size(d,r-i+1).ne.s(i)).and.(s(i).ge.0)) then - ns = .TRUE. - end if - end do - if (ns) then - deallocate(d) - end if -end if -if ((.not.allocated(d)).and.(s(1).ge.1)) then - allocate(d(s(1))) -end if -if (allocated(d)) then - do i=1,r - s(i) = size(d,r-i+1) - end do -end if -call f2pysetdata(d,allocated(d)) -end subroutine f2py_fun_getdims_r - -subroutine f2pyinitfun(f2pysetupfunc) -use fun -external f2pysetupfunc,f2py_fun_getdims_r -call f2pysetupfunc(f2py_fun_getdims_r) -end subroutine f2pyinitfun -\end{verbatim} -Usage in Python: -\begin{verbatim} ->>> import foo ->>> foo.fun.r = [1,2,3,4] -\end{verbatim} - -\section{Callback subroutine} -\label{sec:cbsubr} - -Thanks to Travis Oliphant for working out the basic idea of the -following callback mechanism. - -Consider -\begin{verbatim} -subroutine fun(bar) -external bar -call bar(1) -end -\end{verbatim} -Then -\begin{verbatim} -static char doc_foo8_fun[] = " -Function signature: - fun(bar,[bar_extra_args]) -Required arguments: - bar : call-back function -Optional arguments: - bar_extra_args := () input tuple -Call-back functions: - def bar(e_1_e): return - Required arguments: - e_1_e : input int"; -static PyObject *foo8_fun(PyObject *capi_self, PyObject *capi_args, - PyObject *capi_keywds, void (*f2py_func)()) { - PyObject *capi_buildvalue = NULL; - PyObject *bar_capi = Py_None; - PyTupleObject *bar_xa_capi = NULL; - PyTupleObject *bar_args_capi = NULL; - jmp_buf bar_jmpbuf; - int bar_jmpbuf_flag = 0; - int bar_nofargs_capi = 0; - static char *capi_kwlist[] = {"bar","bar_extra_args",NULL}; - - if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\ - "O!|O!:foo8.fun",\ - capi_kwlist,&PyFunction_Type,&bar_capi,&PyTuple_Type,&bar_xa_capi)) - goto capi_fail; - - bar_nofargs_capi = cb_bar_in_fun__user__routines_nofargs; - if (create_cb_arglist(bar_capi,bar_xa_capi,1,0, - &cb_bar_in_fun__user__routines_nofargs,&bar_args_capi)) { - if ((PyErr_Occurred())==NULL) - PyErr_SetString(foo8_error,"failed in processing argument list for call-back bar." ); - goto capi_fail; - } - - SWAP(bar_capi,cb_bar_in_fun__user__routines_capi,PyObject); - SWAP(bar_args_capi,cb_bar_in_fun__user__routines_args_capi,PyTupleObject); - memcpy(&bar_jmpbuf,&cb_bar_in_fun__user__routines_jmpbuf,sizeof(jmp_buf)); - bar_jmpbuf_flag = 1; - - if ((setjmp(cb_bar_in_fun__user__routines_jmpbuf))) { - if ((PyErr_Occurred())==NULL) - PyErr_SetString(foo8_error,"Failure of a callback function"); - goto capi_fail; - } else - (*f2py_func)(cb_bar_in_fun__user__routines); - - capi_buildvalue = Py_BuildValue(""); -capi_fail: - - if (bar_jmpbuf_flag) { - cb_bar_in_fun__user__routines_capi = bar_capi; - Py_DECREF(cb_bar_in_fun__user__routines_args_capi); - cb_bar_in_fun__user__routines_args_capi = bar_args_capi; - cb_bar_in_fun__user__routines_nofargs = bar_nofargs_capi; - memcpy(&cb_bar_in_fun__user__routines_jmpbuf,&bar_jmpbuf,sizeof(jmp_buf)); - bar_jmpbuf_flag = 0; - } - return capi_buildvalue; -} -extern void F_FUNC(fun,FUN)(); -static FortranDataDef f2py_routine_defs[] = { - {"fun",-1,{-1},0,(char *)F_FUNC(fun,FUN),(void *)foo8_fun,doc_foo8_fun}, - {NULL} -}; -void initfoo8 () { - - PyDict_SetItemString(d, f2py_routine_defs[0].name, - PyFortranObject_NewAsAttr(&f2py_routine_defs[0])); -} -\end{verbatim} -where -\begin{verbatim} -PyObject *cb_bar_in_fun__user__routines_capi = Py_None; -PyTupleObject *cb_bar_in_fun__user__routines_args_capi = NULL; -int cb_bar_in_fun__user__routines_nofargs = 0; -jmp_buf cb_bar_in_fun__user__routines_jmpbuf; -static void cb_bar_in_fun__user__routines (int *e_1_e_cb_capi) { - PyTupleObject *capi_arglist = cb_bar_in_fun__user__routines_args_capi; - PyObject *capi_return = NULL; - PyObject *capi_tmp = NULL; - int capi_j,capi_i = 0; - - int e_1_e=(*e_1_e_cb_capi); - if (capi_arglist == NULL) - goto capi_fail; - if (cb_bar_in_fun__user__routines_nofargs>capi_i) - if (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_int1(e_1_e))) - goto capi_fail; - - capi_return = PyEval_CallObject(cb_bar_in_fun__user__routines_capi, - (PyObject *)capi_arglist); - - if (capi_return == NULL) - goto capi_fail; - if (capi_return == Py_None) { - Py_DECREF(capi_return); - capi_return = Py_BuildValue("()"); - } - else if (!PyTuple_Check(capi_return)) { - capi_tmp = capi_return; - capi_return = Py_BuildValue("(O)",capi_tmp); - Py_DECREF(capi_tmp); - } - capi_j = PyTuple_Size(capi_return); - capi_i = 0; - goto capi_return_pt; -capi_fail: - fprintf(stderr,"Call-back cb_bar_in_fun__user__routines failed.\n"); - Py_XDECREF(capi_return); - longjmp(cb_bar_in_fun__user__routines_jmpbuf,-1); -capi_return_pt: - ; -} -\end{verbatim} -Usage in Python: -\begin{verbatim} ->>> import foo8 as foo ->>> def bar(i): print 'In bar i=',i -... ->>> foo.fun(bar) -In bar i= 1 -\end{verbatim} - -\end{document} - - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: t -%%% End: diff -Nru python-numpy-1.13.3/doc/f2py/hello.f python-numpy-1.14.5/doc/f2py/hello.f --- python-numpy-1.13.3/doc/f2py/hello.f 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/hello.f 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -C File hello.f - subroutine foo (a) - integer a - print*, "Hello from Fortran!" - print*, "a=",a - end - diff -Nru python-numpy-1.13.3/doc/f2py/HISTORY.txt python-numpy-1.14.5/doc/f2py/HISTORY.txt --- python-numpy-1.13.3/doc/f2py/HISTORY.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/HISTORY.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,1043 +0,0 @@ -.. -*- rest -*- - -========================= - F2PY History -========================= - -:Author: Pearu Peterson -:Web-site: http://cens.ioc.ee/projects/f2py2e/ -:Date: $Date: 2005/09/16 08:36:45 $ -:Revision: $Revision: 1.191 $ - -.. Contents:: - -Release 2.46.243 -===================== - -* common_rules.py - - - Fixed compiler warnings. - -* fortranobject.c - - - Fixed another dims calculation bug. - - Fixed dims calculation bug and added the corresponding check. - - Accept higher dimensional arrays if their effective rank matches. - Effective rank is multiplication of non-unit dimensions. - -* f2py2e.py - - - Added support for numpy.distutils version 0.4.0. - -* Documentation - - - Added example about ``intent(callback,hide)`` usage. Updates. - - Updated FAQ. - -* cb_rules.py - - - Fixed missing need kw error. - - Fixed getting callback non-existing extra arguments. - - External callback functions and extra_args can be set via - ext.module namespace. - - Avoid crash when external callback function is not set. - -* rules.py - - - Enabled ``intent(out)`` for ``intent(aux)`` non-complex scalars. - - Fixed splitting lines in F90 fixed form mode. - - Fixed FORTRANAME typo, relevant when wrapping scalar functions with - ``--no-wrap-functions``. - - Improved failure handling for callback functions. - - Fixed bug in writing F90 wrapper functions when a line length - is exactly 66. - -* cfuncs.py - - - Fixed dependency issue with typedefs. - - Introduced ``-DUNDERSCORE_G77`` that cause extra underscore to be - used for external names that contain an underscore. - -* capi_maps.py - - - Fixed typos. - - Fixed using complex cb functions. - -* crackfortran.py - - - Introduced parent_block key. Get ``use`` statements recursively - from parent blocks. - - Apply parameter values to kindselectors. - - Fixed bug evaluating ``selected_int_kind`` function. - - Ignore Name and Syntax errors when evaluating scalars. - - Treat ``_intType`` as ```` in get_parameters. - - Added support for F90 line continuation in fix format mode. - - Include optional attribute of external to signature file. - - Add ``entry`` arguments to variable lists. - - Treat \xa0 character as space. - - Fixed bug where __user__ callback subroutine was added to its - argument list. - - In strict 77 mode read only the first 72 columns. - - Fixed parsing ``v(i) = func(r)``. - - Fixed parsing ``integer*4::``. - - Fixed parsing ``1.d-8`` when used as a parameter value. - -Release 2.45.241_1926 -===================== - -* diagnose.py - - - Clean up output. - -* cb_rules.py - - - Fixed ``_cpointer`` usage for subroutines. - - Fortran function ``_cpointer`` can be used for callbacks. - -* func2subr.py - - - Use result name when wrapping functions with subroutines. - -* f2py2e.py - - - Fixed ``--help-link`` switch. - - Fixed ``--[no-]lower`` usage with ``-c`` option. - - Added support for ``.pyf.src`` template files. - -* __init__.py - - - Using ``exec_command`` in ``compile()``. - -* setup.py - - - Clean up. - - Disabled ``need_numpy_distutils`` function. From now on it is assumed - that proper version of ``numpy_distutils`` is already installed. - -* capi_maps.py - - - Added support for wrapping unsigned integers. In a .pyf file - ``integer(-1)``, ``integer(-2)``, ``integer(-4)`` correspond to - ``unsigned char``, ``unsigned short``, ``unsigned`` C types, - respectively. - -* tests/c/return_real.py - - - Added tests to wrap C functions returning float/double. - -* fortranobject.c - - - Added ``_cpointer`` attribute to wrapped objects. - -* rules.py - - - ``_cpointer`` feature for wrapped module functions is not - functional at the moment. - - Introduced ``intent(aux)`` attribute. Useful to save a value - of a parameter to auxiliary C variable. Note that ``intent(aux)`` - implies ``intent(c)``. - - Added ``usercode`` section. When ``usercode`` is used in ``python - module`` block twise then the contents of the second multi-line - block is inserted after the definition of external routines. - - Call-back function arguments can be CObjects. - -* cfuncs.py - - - Allow call-back function arguments to be fortran objects. - - Allow call-back function arguments to be built-in functions. - -* crackfortran.py - - - Fixed detection of a function signature from usage example. - - Cleaned up -h output for intent(callback) variables. - - Repair malformed argument list (missing argument name). - - Warn on the usage of multiple attributes without type specification. - - Evaluate only scalars ```` (e.g. not of strings). - - Evaluate ```` using parameters name space. - - Fixed resolving `()[result()]` pattern. - - ``usercode`` can be used more than once in the same context. - -Release 2.43.239_1831 -===================== - -* auxfuncs.py - - - Made ``intent(in,inplace)`` to mean ``intent(inplace)``. - -* f2py2e.py - - - Intoduced ``--help-link`` and ``--link-`` - switches to link generated extension module with system - ```` as defined by numpy_distutils/system_info.py. - -* fortranobject.c - - - Patch to make PyArray_CanCastSafely safe on 64-bit machines. - Fixes incorrect results when passing ``array('l')`` to - ``real*8 intent(in,out,overwrite)`` arguments. - -* rules.py - - - Avoid empty continuation lines in Fortran wrappers. - -* cfuncs.py - - - Adding ``\0`` at the end of a space-padded string, fixes tests - on 64-bit Gentoo. - -* crackfortran.py - - - Fixed splitting lines with string parameters. - -Release 2.43.239_1806 -===================== - -* Tests - - - Fixed test site that failed after padding strings with spaces - instead of zeros. - -* Documentation - - - Documented ``intent(inplace)`` attribute. - - Documented ``intent(callback)`` attribute. - - Updated FAQ, added Users Feedback section. - -* cfuncs.py - - - Padding longer (than provided from Python side) strings with spaces - (that is Fortran behavior) instead of nulls (that is C strncpy behavior). - -* f90mod_rules.py - - - Undoing rmbadnames in Python and Fortran layers. - -* common_rules.py - - - Renaming common block items that have names identical to C keywords. - - Fixed wrapping blank common blocks. - -* fortranobject.h - - - Updated numarray (0.9, 1.0, 1.1) support (patch by Todd Miller). - -* fortranobject.c - - - Introduced ``intent(inplace)`` feature. - - Fix numarray reference counts (patch by Todd). - - Updated numarray (0.9, 1.0, 1.1) support (patch by Todd Miller). - - Enabled F2PY_REPORT_ON_ARRAY_COPY for Numarray. - -* capi_maps.py - - - Always normalize .f2py_f2cmap keys to lower case. - -* rules.py - - - Disabled ``index`` macro as it conflicts with the one defined - in string.h. - - Moved ``externroutines`` up to make it visible to ``usercode``. - - Fixed bug in f90 code generation: no empty line continuation is - allowed. - - Fixed undefined symbols failure when ``fortranname`` is used - to rename a wrapped function. - - Support for ``entry`` statement. - -* auxfuncs.py - - - Made is* functions more robust with respect to parameters that - have no typespec specified. - - Using ``size_t`` instead of ``int`` as the type of string - length. Fixes issues on 64-bit platforms. - -* setup.py - - - Fixed bug of installing ``f2py`` script as ``.exe`` file. - -* f2py2e.py - - - ``--compiler=`` and ``--fcompiler=`` can be specified at the same time. - -* crackfortran.py - - - Fixed dependency detection for non-intent(in|inout|inplace) arguments. - They must depend on their dimensions, not vice-versa. - - Don't match ``!!f2py`` as a start of f2py directive. - - Only effective intent attributes will be output to ``-h`` target. - - Introduced ``intent(callback)`` to build interface between Python - functions and Fortran external routines. - - Avoid including external arguments to __user__ modules. - - Initial hooks to evaluate ``kind`` and ``selected_int_kind``. - - Evaluating parameters in {char,kind}selectors and applying rmbadname. - - Evaluating parameters using also module parameters. Fixed the order - of parameter evaluation. - - Fixed silly bug: when block name was not lower cased, it was not - recognized correctly. - - Applying mapping '.false.'->'False', '.true.'->'True' to logical - parameters. TODO: Support for logical expressions is needed. - - Added support for multiple statements in one line (separated with semicolon). - - Impl. get_useparameters function for using parameter values from - other f90 modules. - - Applied Bertholds patch to fix bug in evaluating expressions - like ``1.d0/dvar``. - - Fixed bug in reading string parameters. - - Evaluating parameters in charselector. Code cleanup. - - Using F90 module parameters to resolve kindselectors. - - Made the evaluation of module data init-expression more robust. - - Support for ``entry`` statement. - - Fixed ``determineexprtype`` that in the case of parameters - returned non-dictionary objects. - - Use ``-*- fix -*-`` to specify that a file is in fixed format. - -Release 2.39.235_1693 -===================== - -* fortranobject.{h,c} - - - Support for allocatable string arrays. - -* cfuncs.py - - - Call-back arguments can now be also instances that have ``__call__`` method - as well as instance methods. - -* f2py2e.py - - - Introduced ``--include_paths ::..`` command line - option. - - Added ``--compiler=`` support to change the C/C++ compiler from - f2py command line. - -* capi_maps.py - - - Handle ``XDY`` parameter constants. - -* crackfortran.py - - - Handle ``XDY`` parameter constants. - - - Introduced formatpattern to workaround a corner case where reserved - keywords are used in format statement. Other than that, format pattern - has no use. - - - Parameters are now fully evaluated. - -* More splitting of documentation strings. - -* func2subr.py - fixed bug for function names that f77 compiler - would set ``integer`` type. - -Release 2.39.235_1660 -===================== - -* f2py2e.py - - - Fixed bug in using --f90flags=.. - -* f90mod_rules.py - - - Split generated documentation strings (to avoid MSVC issue when - string length>2k) - - - Ignore ``private`` module data. - -Release 2.39.235_1644 -===================== - -:Date:24 February 2004 - -* Character arrays: - - - Finished complete support for character arrays and arrays of strings. - - ``character*n a(m)`` is treated like ``character a(m,n)`` with ``intent(c)``. - - Character arrays are now considered as ordinary arrays (not as arrays - of strings which actually didn't work). - -* docs - - - Initial f2py manpage file f2py.1. - - Updated usersguide and other docs when using numpy_distutils 0.2.2 - and up. - -* capi_maps.py - - - Try harder to use .f2py_f2cmap mappings when kind is used. - -* crackfortran.py - - - Included files are first search in the current directory and - then from the source file directory. - - Ignoring dimension and character selector changes. - - Fixed bug in Fortran 90 comments of fixed format. - - Warn when .pyf signatures contain undefined symbols. - - Better detection of source code formats. Using ``-*- fortran -*-`` - or ``-*- f90 -*-`` in the first line of a Fortran source file is - recommended to help f2py detect the format, fixed or free, - respectively, correctly. - -* cfuncs.py - - - Fixed intent(inout) scalars when typecode=='l'. - - Fixed intent(inout) scalars when not using numarray. - - Fixed intent(inout) scalars when using numarray. - -* diagnose.py - - - Updated for numpy_distutils 0.2.2 and up. - - Added numarray support to diagnose. - -* fortranobject.c - - - Fixed nasty bug with intent(in,copy) complex slice arrays. - - Applied Todd's patch to support numarray's byteswapped or - misaligned arrays, requires numarray-0.8 or higher. - -* f2py2e.py - - - Applying new hooks for numpy_distutils 0.2.2 and up, keeping - backward compatibility with depreciation messages. - - Using always os.system on non-posix platforms in f2py2e.compile - function. - -* rules.py - - - Changed the order of buildcallback and usercode junks. - -* setup.cfg - - - Added so that docs/ and tests/ directories are included to RPMs. - -* setup.py - - - Installing f2py.py instead of f2py.bat under NT. - - Introduced ``--with-numpy_distutils`` that is useful when making - f2py tar-ball with numpy_distutils included. - -Release 2.37.233-1545 -===================== - -:Date: 11 September 2003 - -* rules.py - - - Introduced ``interface_usercode`` replacement. When ``usercode`` - statement is used inside the first interface block, its contents - will be inserted at the end of initialization function of a F2PY - generated extension module (feature request: Berthold Höllmann). - - Introduced auxiliary function ``as_column_major_storage`` that - converts input array to an array with column major storage order - (feature request: Hans Petter Langtangen). - -* crackfortran.py - - - Introduced ``pymethoddef`` statement. - -* cfuncs.py - - - Fixed "#ifdef in #define TRYPYARRAYTEMPLATE" bug (patch thanks - to Bernhard Gschaider) - -* auxfuncs.py - - - Introduced ``getpymethod`` function. - - Enabled multi-line blocks in ``callprotoargument`` statement. - -* f90mod_rules.py - - - Undone "Fixed Warning 43 emitted by Intel Fortran compiler" that - causes (curios) segfaults. - -* fortranobject.c - - - Fixed segfaults (that were introduced with recent memory leak - fixes) when using allocatable arrays. - - Introduced F2PY_REPORT_ON_ARRAY_COPY CPP macro int-variable. If defined - then a message is printed to stderr whenever a copy of an array is - made and arrays size is larger than F2PY_REPORT_ON_ARRAY_COPY. - -Release 2.35.229-1505 -===================== - -:Date: 5 August 2003 - -* General - - - Introduced ``usercode`` statement (dropped ``c_code`` hooks). - -* setup.py - - - Updated the CVS location of numpy_distutils. - -* auxfuncs.py - - - Introduced ``isint1array(var)`` for fixing ``integer*1 intent(out)`` - support. - -* tests/f77/callback.py - - Introduced some basic tests. - -* src/fortranobject.{c,h} - - - Fixed memory leaks when getting/setting allocatable arrays. - (Bug report by Bernhard Gschaider) - - - Initial support for numarray (Todd Miller's patch). Use -DNUMARRAY - on the f2py command line to enable numarray support. Note that - there is no character arrays support and these hooks are not - tested with F90 compilers yet. - -* cfuncs.py - - - Fixed reference counting bug that appeared when constructing extra - argument list to callback functions. - - Added ``NPY_LONG != NPY_INT`` test. - -* f2py2e.py - - Undocumented ``--f90compiler``. - -* crackfortran.py - - - Introduced ``usercode`` statement. - - Fixed newlines when outputting multi-line blocks. - - Optimized ``getlincoef`` loop and ``analyzevars`` for cases where - len(vars) is large. - - Fixed callback string argument detection. - - Fixed evaluating expressions: only int|float expressions are - evaluated successfully. - -* docs - - Documented -DF2PY_REPORT_ATEXIT feature. - -* diagnose.py - - Added CPU information and sys.prefix printout. - -* tests/run_all.py - - Added cwd to PYTHONPATH. - -* tests/f??/return_{real,complex}.py - - Pass "infinity" check in SunOS. - -* rules.py - - - Fixed ``integer*1 intent(out)`` support - - Fixed free format continuation of f2py generated F90 files. - -* tests/mixed/ - - Introduced tests for mixing Fortran 77, Fortran 90 fixed and free - format codes in one module. - -* f90mod_rules.py - - - Fixed non-prototype warnings. - - Fixed Warning 43 emitted by Intel Fortran compiler. - - Avoid long lines in Fortran codes to reduce possible problems with - continuations of lines. - -Public Release 2.32.225-1419 -============================ - -:Date: 8 December 2002 - -* docs/usersguide/ - - Complete revision of F2PY Users Guide - -* tests/run_all.py - - - New file. A Python script to run all f2py unit tests. - -* Removed files: buildmakefile.py, buildsetup.py. - -* tests/f77/ - - - Added intent(out) scalar tests. - -* f2py_testing.py - - - Introduced. It contains jiffies, memusage, run, cmdline functions - useful for f2py unit tests site. - -* setup.py - - - Install numpy_distutils only if it is missing or is too old - for f2py. - -* f90modrules.py - - - Fixed wrapping f90 module data. - - Fixed wrapping f90 module subroutines. - - Fixed f90 compiler warnings for wrapped functions by using interface - instead of external stmt for functions. - -* tests/f90/ - - - Introduced return_*.py tests. - -* func2subr.py - - - Added optional signature argument to createfuncwrapper. - - In f2pywrappers routines, declare external, scalar, remaining - arguments in that order. Fixes compiler error 'Invalid declaration' - for:: - - real function foo(a,b) - integer b - real a(b) - end - -* crackfortran.py - - - Removed first-line comment information support. - - Introduced multiline block. Currently usable only for - ``callstatement`` statement. - - Improved array length calculation in getarrlen(..). - - "From sky" program group is created only if ``groupcounter<1``. - See TODO.txt. - - Added support for ``dimension(n:*)``, ``dimension(*:n)``. They are - treated as ``dimension(*)`` by f2py. - - Fixed parameter substitution (this fixes TODO item by Patrick - LeGresley, 22 Aug 2001). - -* f2py2e.py - - - Disabled all makefile, setup, manifest file generation hooks. - - Disabled --[no]-external-modroutines option. All F90 module - subroutines will have Fortran/C interface hooks. - - --build-dir can be used with -c option. - - only/skip modes can be used with -c option. - - Fixed and documented `-h stdout` feature. - - Documented extra options. - - Introduced --quiet and --verbose flags. - -* cb_rules.py - - - Fixed debugcapi hooks for intent(c) scalar call-back arguments - (bug report: Pierre Schnizer). - - Fixed intent(c) for scalar call-back arguments. - - Improved failure reports. - -* capi_maps.py - - - Fixed complex(kind=..) to C type mapping bug. The following hold - complex==complex(kind=4)==complex*8, complex(kind=8)==complex*16 - - Using signed_char for integer*1 (bug report: Steve M. Robbins). - - Fixed logical*8 function bug: changed its C correspondence to - long_long. - - Fixed memory leak when returning complex scalar. - -* __init__.py - - - Introduced a new function (for f2py test site, but could be useful - in general) ``compile(source[,modulename,extra_args])`` for - compiling fortran source codes directly from Python. - -* src/fortranobject.c - - - Multi-dimensional common block members and allocatable arrays - are returned as Fortran-contiguous arrays. - - Fixed NULL return to Python without exception. - - Fixed memory leak in getattr(,'__doc__'). - - .__doc__ is saved to .__dict__ (previously - it was generated each time when requested). - - Fixed a nasty typo from the previous item that caused data - corruption and occasional SEGFAULTs. - - array_from_pyobj accepts arbitrary rank arrays if the last dimension - is undefined. E.g. dimension(3,*) accepts a(3,4,5) and the result is - array with dimension(3,20). - - Fixed (void*) casts to make g++ happy (bug report: eric). - - Changed the interface of ARR_IS_NULL macro to avoid "``NULL used in - arithmetics``" warnings from g++. - -* src/fortranobject.h - - - Undone previous item. Defining NO_IMPORT_ARRAY for - src/fortranobject.c (bug report: travis) - - Ensured that PY_ARRAY_UNIQUE_SYMBOL is defined only for - src/fortranobject.c (bug report: eric). - -* rules.py - - - Introduced dummy routine feature. - - F77 and F90 wrapper subroutines (if any) as saved to different - files, -f2pywrappers.f and -f2pywrappers2.f90, - respectively. Therefore, wrapping F90 requires numpy_distutils >= - 0.2.0_alpha_2.229. - - Fixed compiler warnings about meaningless ``const void (*f2py_func)(..)``. - - Improved error messages for ``*_from_pyobj``. - - Changed __CPLUSPLUS__ macros to __cplusplus (bug report: eric). - - Changed (void*) casts to (f2py_init_func) (bug report: eric). - - Removed unnecessary (void*) cast for f2py_has_column_major_storage - in f2py_module_methods definition (bug report: eric). - - Changed the interface of f2py_has_column_major_storage function: - removed const from the 1st argument. - -* cfuncs.py - - - Introduced -DPREPEND_FORTRAN. - - Fixed bus error on SGI by using PyFloat_AsDouble when ``__sgi`` is defined. - This seems to be `know bug`__ with Python 2.1 and SGI. - - string_from_pyobj accepts only arrays whos elements size==sizeof(char). - - logical scalars (intent(in),function) are normalized to 0 or 1. - - Removed NUMFROMARROBJ macro. - - (char|short)_from_pyobj now use int_from_pyobj. - - (float|long_double)_from_pyobj now use double_from_pyobj. - - complex_(float|long_double)_from_pyobj now use complex_double_from_pyobj. - - Rewrote ``*_from_pyobj`` to be more robust. This fixes segfaults if - getting * from a string. Note that int_from_pyobj differs - from PyNumber_Int in that it accepts also complex arguments - (takes the real part) and sequences (takes the 1st element). - - Removed unnecessary void* casts in NUMFROMARROBJ. - - Fixed casts in ``*_from_pyobj`` functions. - - Replaced CNUMFROMARROBJ with NUMFROMARROBJ. - -.. __: http://sourceforge.net/tracker/index.php?func=detail&aid=435026&group_id=5470&atid=105470 - -* auxfuncs.py - - - Introduced isdummyroutine(). - - Fixed islong_* functions. - - Fixed isintent_in for intent(c) arguments (bug report: Pierre Schnizer). - - Introduced F2PYError and throw_error. Using throw_error, f2py - rejects illegal .pyf file constructs that otherwise would cause - compilation failures or python crashes. - - Fixed islong_long(logical*8)->True. - - Introduced islogical() and islogicalfunction(). - - Fixed prototype string argument (bug report: eric). - -* Updated README.txt and doc strings. Starting to use docutils. - -* Speed up for ``*_from_pyobj`` functions if obj is a sequence. - -* Fixed SegFault (reported by M.Braun) due to invalid ``Py_DECREF`` - in ``GETSCALARFROMPYTUPLE``. - -Older Releases -============== - -:: - - *** Fixed missing includes when wrapping F90 module data. - *** Fixed typos in docs of build_flib options. - *** Implemented prototype calculator if no callstatement or - callprotoargument statements are used. A warning is issued if - callstatement is used without callprotoargument. - *** Fixed transposing issue with array arguments in callback functions. - *** Removed -pyinc command line option. - *** Complete tests for Fortran 77 functions returning scalars. - *** Fixed returning character bug if --no-wrap-functions. - *** Described how to wrap F compiled Fortran F90 module procedures - with F2PY. See doc/using_F_compiler.txt. - *** Fixed the order of build_flib options when using --fcompiler=... - *** Recognize .f95 and .F95 files as Fortran sources with free format. - *** Cleaned up the output of 'f2py -h': removed obsolete items, - added build_flib options section. - *** Added --help-compiler option: it lists available Fortran compilers - as detected by numpy_distutils/command/build_flib.py. This option - is available only with -c option. - - -:Release: 2.13.175-1250 -:Date: 4 April 2002 - -:: - - *** Fixed copying of non-contiguous 1-dimensional arrays bug. - (Thanks to Travis O.). - - -:Release: 2.13.175-1242 -:Date: 26 March 2002 - -:: - - *** Fixed ignoring type declarations. - *** Turned F2PY_REPORT_ATEXIT off by default. - *** Made MAX,MIN macros available by default so that they can be - always used in signature files. - *** Disabled F2PY_REPORT_ATEXIT for FreeBSD. - - -:Release: 2.13.175-1233 -:Date: 13 March 2002 - -:: - - *** Fixed Win32 port when using f2py.bat. (Thanks to Erik Wilsher). - *** F2PY_REPORT_ATEXIT is disabled for MACs. - *** Fixed incomplete dependency calculator. - - -:Release: 2.13.175-1222 -:Date: 3 March 2002 - -:: - - *** Plugged a memory leak for intent(out) arrays with overwrite=0. - *** Introduced CDOUBLE_to_CDOUBLE,.. functions for copy_ND_array. - These cast functions probably work incorrectly in Numeric. - - -:Release: 2.13.175-1212 -:Date: 23 February 2002 - -:: - - *** Updated f2py for the latest numpy_distutils. - *** A nasty bug with multi-dimensional Fortran arrays is fixed - (intent(out) arrays had wrong shapes). (Thanks to Eric for - pointing out this bug). - *** F2PY_REPORT_ATEXIT is disabled by default for __WIN32__. - - -:Release: 2.11.174-1161 -:Date: 14 February 2002 - -:: - - *** Updated f2py for the latest numpy_distutils. - *** Fixed raise error when f2py missed -m flag. - *** Script name `f2py' now depends on the name of python executable. - For example, `python2.2 setup.py install' will create a f2py - script with a name `f2py2.2'. - *** Introduced 'callprotoargument' statement so that proper prototypes - can be declared. This is crucial when wrapping C functions as it - will fix segmentation faults when these wrappers use non-pointer - arguments (thanks to R. Clint Whaley for explaining this to me). - Note that in f2py generated wrapper, the prototypes have - the following forms: - extern #rtype# #fortranname#(#callprotoargument#); - or - extern #rtype# F_FUNC(#fortranname#,#FORTRANNAME#)(#callprotoargument#); - *** Cosmetic fixes to F2PY_REPORT_ATEXIT feature. - - -:Release: 2.11.174-1146 -:Date: 3 February 2002 - -:: - - *** Reviewed reference counting in call-back mechanism. Fixed few bugs. - *** Enabled callstatement for complex functions. - *** Fixed bug with initializing capi_overwrite_ - *** Introduced intent(overwrite) that is similar to intent(copy) but - has opposite effect. Renamed copy_=1 to overwrite_=0. - intent(overwrite) will make default overwrite_=1. - *** Introduced intent(in|inout,out,out=) attribute that renames - arguments name when returned. This renaming has effect only in - documentation strings. - *** Introduced 'callstatement' statement to pyf file syntax. With this - one can specify explicitly how wrapped function should be called - from the f2py generated module. WARNING: this is a dangerous feature - and should be used with care. It is introduced to provide a hack - to construct wrappers that may have very different signature - pattern from the wrapped function. Currently 'callstatement' can - be used only inside a subroutine or function block (it should be enough - though) and must be only in one continuous line. The syntax of the - statement is: callstatement ; - - -:Release: 2.11.174 -:Date: 18 January 2002 - -:: - - *** Fixed memory-leak for PyFortranObject. - *** Introduced extra keyword argument copy_ for intent(copy) - variables. It defaults to 1 and forces to make a copy for - intent(in) variables when passing on to wrapped functions (in case - they undesirably change the variable in-situ). - *** Introduced has_column_major_storage member function for all f2py - generated extension modules. It is equivalent to Python call - 'transpose(obj).iscontiguous()' but very efficient. - *** Introduced -DF2PY_REPORT_ATEXIT. If this is used when compiling, - a report is printed to stderr as python exits. The report includes - the following timings: - 1) time spent in all wrapped function calls; - 2) time spent in f2py generated interface around the wrapped - functions. This gives a hint whether one should worry - about storing data in proper order (C or Fortran). - 3) time spent in Python functions called by wrapped functions - through call-back interface. - 4) time spent in f2py generated call-back interface. - For now, -DF2PY_REPORT_ATEXIT is enabled by default. Use - -DF2PY_REPORT_ATEXIT_DISABLE to disable it (I am not sure if - Windows has needed tools, let me know). - Also, I appreciate if you could send me the output of 'F2PY - performance report' (with CPU and platform information) so that I - could optimize f2py generated interfaces for future releases. - *** Extension modules can be linked with dmalloc library. Use - -DDMALLOC when compiling. - *** Moved array_from_pyobj to fortranobject.c. - *** Usage of intent(inout) arguments is made more strict -- only - with proper type contiguous arrays are accepted. In general, - you should avoid using intent(inout) attribute as it makes - wrappers of C and Fortran functions asymmetric. I recommend using - intent(in,out) instead. - *** intent(..) has new keywords: copy,cache. - intent(copy,in) - forces a copy of an input argument; this - may be useful for cases where the wrapped function changes - the argument in situ and this may not be desired side effect. - Otherwise, it is safe to not use intent(copy) for the sake - of a better performance. - intent(cache,hide|optional) - just creates a junk of memory. - It does not care about proper storage order. Can be also - intent(in) but then the corresponding argument must be a - contiguous array with a proper elsize. - *** intent(c) can be used also for subroutine names so that - -DNO_APPEND_FORTRAN can be avoided for C functions. - - *** IMPORTANT BREAKING GOOD ... NEWS!!!: - - From now on you don't have to worry about the proper storage order - in multi-dimensional arrays that was earlier a real headache when - wrapping Fortran functions. Now f2py generated modules take care - of the proper conversations when needed. I have carefully designed - and optimized this interface to avoid any unnecessary memory usage - or copying of data. However, it is wise to use input arrays that - has proper storage order: for C arguments it is row-major and for - Fortran arguments it is column-major. But you don't need to worry - about that when developing your programs. The optimization of - initializing the program with proper data for possibly better - memory usage can be safely postponed until the program is working. - - This change also affects the signatures in .pyf files. If you have - created wrappers that take multi-dimensional arrays in arguments, - it is better to let f2py re-generate these files. Or you have to - manually do the following changes: reverse the axes indices in all - 'shape' macros. For example, if you have defined an array A(n,m) - and n=shape(A,1), m=shape(A,0) then you must change the last - statements to n=shape(A,0), m=shape(A,1). - - -:Release: 2.8.172 -:Date: 13 January 2002 - -:: - - *** Fixed -c process. Removed pyf_extensions function and pyf_file class. - *** Reorganized setup.py. It generates f2py or f2py.bat scripts - depending on the OS and the location of the python executable. - *** Started to use update_version from numpy_distutils that makes - f2py startup faster. As a side effect, the version number system - changed. - *** Introduced test-site/test_f2py2e.py script that runs all - tests. - *** Fixed global variables initialization problem in crackfortran - when run_main is called several times. - *** Added 'import Numeric' to C/API init function. - *** Fixed f2py.bat in setup.py. - *** Switched over to numpy_distutils and dropped fortran_support. - *** On Windows create f2py.bat file. - *** Introduced -c option: read fortran or pyf files, construct extension - modules, build, and save them to current directory. - In one word: do-it-all-in-one-call. - *** Introduced pyf_extensions(sources,f2py_opts) function. It simplifies - the extension building process considerably. Only for internal use. - *** Converted tests to use numpy_distutils in order to improve portability: - a,b,c - *** f2py2e.run_main() returns a pyf_file class instance containing - information about f2py generated files. - *** Introduced `--build-dir ' command line option. - *** Fixed setup.py for bdist_rpm command. - *** Added --numpy-setup command line option. - *** Fixed crackfortran that did not recognized capitalized type - specification with --no-lower flag. - *** `-h stdout' writes signature to stdout. - *** Fixed incorrect message for check() with empty name list. - - -:Release: 2.4.366 -:Date: 17 December 2001 - -:: - - *** Added command line option --[no-]manifest. - *** `make test' should run on Windows, but the results are not truthful. - *** Reorganized f2py2e.py a bit. Introduced run_main(comline_list) function - that can be useful when running f2py from another Python module. - *** Removed command line options -f77,-fix,-f90 as the file format - is determined from the extension of the fortran file - or from its header (first line starting with `!%' and containing keywords - free, fix, or f77). The later overrides the former one. - *** Introduced command line options --[no-]makefile,--[no-]latex-doc. - Users must explicitly use --makefile,--latex-doc if Makefile-, - module.tex is desired. --setup is default. Use --no-setup - to disable setup_.py generation. --overwrite-makefile - will set --makefile. - *** Added `f2py_rout_' to #capiname# in rules.py. - *** intent(...) statement with empty namelist forces intent(...) attribute for - all arguments. - *** Dropped DL_IMPORT and DL_EXPORT in fortranobject.h. - *** Added missing PyFortran_Type.ob_type initialization. - *** Added gcc-3.0 support. - *** Raising non-existing/broken Numeric as a FatalError exception. - *** Fixed Python 2.x specific += construct in fortran_support.py. - *** Fixed copy_ND_array for 1-rank arrays that used to call calloc(0,..) - and caused core dump with a non-gcc compiler (Thanks to Pierre Schnizer - for reporting this bug). - *** Fixed "warning: variable `..' might be clobbered by `longjmp' or `vfork'": - - Reorganized the structure of wrapper functions to get rid of - `goto capi_fail' statements that caused the above warning. - - -:Release: 2.3.343 -:Date: 12 December 2001 - -:: - - *** Issues with the Win32 support (thanks to Eric Jones and Tiffany Kamm): - - Using DL_EXPORT macro for init#modulename#. - - Changed PyObject_HEAD_INIT(&PyType_Type) to PyObject_HEAD_INIT(0). - - Initializing #name#_capi=NULL instead of Py_None in cb hooks. - *** Fixed some 'warning: function declaration isn't a prototype', mainly - in fortranobject.{c,h}. - *** Fixed 'warning: missing braces around initializer'. - *** Fixed reading a line containing only a label. - *** Fixed nonportable 'cp -fv' to shutil.copy in f2py2e.py. - *** Replaced PyEval_CallObject with PyObject_CallObject in cb_rules. - *** Replaced Py_DECREF with Py_XDECREF when freeing hidden arguments. - (Reason: Py_DECREF caused segfault when an error was raised) - *** Impl. support for `include "file"' (in addition to `include 'file'') - *** Fixed bugs (buildsetup.py missing in Makefile, in generated MANIFEST.in) - - -:Release: 2.3.327 -:Date: 4 December 2001 - -:: - - *** Sending out the third public release of f2py. - *** Support for Intel(R) Fortran Compiler (thanks to Patrick LeGresley). - *** Introduced `threadsafe' statement to pyf-files (or to be used with - the 'f2py' directive in fortran codes) to force - Py_BEGIN|END_ALLOW_THREADS block around the Fortran subroutine - calling statement in Python C/API. `threadsafe' statement has - an effect only inside a subroutine block. - *** Introduced `fortranname ' statement to be used only within - pyf-files. This is useful when the wrapper (Python C/API) function - has different name from the wrapped (Fortran) function. - *** Introduced `intent(c)' directive and statement. It is useful when - wrapping C functions. Use intent(c) for arguments that are - scalars (not pointers) or arrays (with row-ordering of elements). - - -:Release: 2.3.321 -:Date: 3 December 2001 - -:: - - *** f2py2e can be installed using distutils (run `python setup.py install'). - *** f2py builds setup_.py. Use --[no-]setup to control this - feature. setup_.py uses fortran_support module (from SciPy), - but for your convenience it is included also with f2py as an additional - package. Note that it has not as many compilers supported as with - using Makefile-, but new compilers should be added to - fortran_support module, not to f2py2e package. - *** Fixed some compiler warnings about else statements. diff -Nru python-numpy-1.13.3/doc/f2py/index.html python-numpy-1.14.5/doc/f2py/index.html --- python-numpy-1.13.3/doc/f2py/index.html 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/index.html 1970-01-01 00:00:00.000000000 +0000 @@ -1,264 +0,0 @@ - - - - - - -F2PY - Fortran to Python Interface Generator - - - - - -

F2PY ­ Fortran to Python Interface Generator

-by Pearu Peterson - -

What's new?

- -See NEWS.txt for the latest changes in f2py. -
-
July ??, 2002 -
Implemented prototype calculator, complete tests for scalar F77 - functions, --help-compiler option. Fixed number of bugs and - removed obsolete features. -
April 4, 2002 -
Fixed a nasty bug of copying one-dimensional non-contiguous arrays. - (Thanks to Travis O. for pointing this out). -
March 26, 2002 -
Bug fixes, turned off F2PY_REPORT_ATEXIT by default. -
March 13, 2002 -
MAC support, fixed incomplete dependency calculator, minor bug fixes. -
March 3, 2002 -
Fixed memory leak and copying of multi-dimensional complex arrays. -
Old news. -
- -

Introduction

- -Writing Python C/API wrappers for Fortran routines can be a very -tedious task, especially if a Fortran routine takes more than 20 -arguments but only few of them are relevant for the problems that they -solve. So, I have developed a tool that generates the C/API modules -containing wrapper functions of Fortran routines. I call this -tool as F2PY ­ Fortran to Python Interface Generator. -It is completely written in Python -language and can be called from the command line as f2py. -F2PY (in NumPy) is released under the terms of the NumPy License. - - -

f2py, Second Edition

- -The development of f2py started in summer of 1999. -For now (January, 2000) it has reached to stage of being a -complete tool: it scans real Fortran code, creates signature file -that the user can modify, constructs C/API module that can be -complied and imported to Python, and it creates LaTeX documentation -for wrapper functions. Below is a bit longer list of -f2py features: -
    -
  1. f2py scans real Fortran codes and produces the signature files. - The syntax of the signature files is borrowed from the Fortran 90/95 - language specification with some extensions. -
  2. f2py generates a GNU Makefile that can be used - for building shared modules (see below for a list of supported - platforms/compilers). Starting from the third release, - f2py generates setup_modulename.py for - building extension modules using distutils tools. -
  3. f2py uses the signature files to produce the wrappers for - Fortran 77 routines and their COMMON blocks. -
  4. For external arguments f2py constructs a very flexible - call-back mechanism so that Python functions can be called from - Fortran. -
  5. You can pass in almost arbitrary Python objects to wrapper - functions. If needed, f2py takes care of type-casting and - non-contiguous arrays. -
  6. You can modify the signature files so that f2py will generate - wrapper functions with desired signatures. depend() - attribute is introduced to control the initialization order of the - variables. f2py introduces intent(hide) - attribute to remove - the particular argument from the argument list of the wrapper - function and intent(c) that is useful for wrapping C -libraries. In addition, optional and -required - attributes are introduced and employed. -
  7. f2py supports almost all standard Fortran 77/90/95 constructs - and understands all basic Fortran types, including - (multi-dimensional, complex) arrays and character strings with - adjustable and assumed sizes/lengths. -
  8. f2py generates a LaTeX document containing the - documentations of the wrapped functions (argument types, dimensions, - etc). The user can easily add some human readable text to the - documentation by inserting note(<LaTeX text>) attribute to - the definition of routine signatures. -
  9. With f2py one can access also Fortran 90/95 - module subroutines from Python. -
- -For more information, see the User's -Guide of the tool. Windows users should also take a look at -f2py HOWTO for Win32 (its latest version -can be found here). - -

Requirements

-
    -
  1. You'll need Python - (1.5.2 or later, 2.2 is recommended) to run f2py - (because it uses exchanged module re). - To build generated extension modules with distutils setup script, - you'll need Python 2.x. -
  2. You'll need Numerical - Python - (version 13 or later, 20.3 is recommended) to compile - C/API modules (because they use function - PyArray_FromDimsAndDataAndDescr) -
- -

Download

- -
-
User's Guide: -
usersguide.html, - usersguide.pdf, - usersguide.ps.gz, - usersguide.dvi. -
Snapshots of the fifth public release: -
2.x/F2PY-2-latest.tar.gz -
Snapshots of earlier releases: -
rel-5.x, rel-4.x, - rel-3.x, - rel-2.x,rel-1.x, - rel-0.x -
- -

Installation

- -Unpack the source file, change to directory f2py-?-??? -and run python setup.py install. That's it! - -

Platform/Compiler Related Notes

- -f2py has been successfully tested on - -f2py will probably run on other UN*X systems as -well. Additions to the list of platforms/compilers where -f2py has been successfully used are most welcome. -

-Note: -Using Compaq Fortran -compiler on Alpha Linux is successful unless when -wrapping Fortran callback functions returning -COMPLEX. This applies also for IRIX64. -

-Note: -Fortran 90/95 module support is currently tested with Absoft F90, VAST/f90, Intel F90 compilers on Linux (MD7.0,Debian woody). - - -

Mailing list

- -There is a mailing list f2py-users -available for the users of the f2py -program and it is open for discussion, questions, and answers. You can subscribe -the list here. - -

CVS Repository

- -f2py is being developed under CVS and those who are -interested in the really latest version of f2py (possibly -unstable) can get it from the repository as follows: -
    -
  1. First you need to login (the password is guest): -
    -> cvs -d :pserver:anonymous@cens.ioc.ee:/home/cvs login
    -
    -
  2. and then do the checkout: -
    -> cvs -z6 -d :pserver:anonymous@cens.ioc.ee:/home/cvs checkout f2py2e
    -
    -
  3. In the directory f2py2e you can get the updates by hitting -
    -> cvs -z6 update -P -d
    -
    -
-You can browse f2py CVS repository here. - -

Related sites

- -
    -
  1. Numerical Python. -
  2. Pyfort -- The Python-Fortran connection tool. -
  3. Scientific Python. -
  4. SciPy -- Scientific tools for Python (includes Multipack). -
  5. The Fortran Company. -
  6. Fortran Standards. - -
  7. American National Standard Programming Language FORTRAN ANSI(R) X3.9-1978 -
  8. Mathtools.net -- A technical computing portal for all scientific and engineering needs. - -
- - -
-
-Valid HTML 4.0! -Pearu Peterson -<pearu(at)ioc.ee>
- -Last modified: Fri Jan 20 14:55:12 MST 2006 - -
- - - - -

-

-This Python -ring site owned by Pearu Peterson. -
-[ - Previous 5 Sites -| - Previous -| - Next -| - Next 5 Sites -| - Random Site -| - List Sites -] -
-

- - - - - - - - - - diff -Nru python-numpy-1.13.3/doc/f2py/intro.tex python-numpy-1.14.5/doc/f2py/intro.tex --- python-numpy-1.13.3/doc/f2py/intro.tex 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/intro.tex 1970-01-01 00:00:00.000000000 +0000 @@ -1,158 +0,0 @@ - -\section{Introduction} -\label{sec:intro} - -\fpy is a command line tool that generates Python C/API modules for -interfacing Fortran~77/90/95 codes and Fortran~90/95 modules from -Python. In general, using \fpy an -interface is produced in three steps: -\begin{itemize} -\item[(i)] \fpy scans Fortran sources and creates the so-called - \emph{signature} file; the signature file contains the signatures of - Fortran routines; the signatures are given in the free format of the - Fortran~90/95 language specification. Latest version of \fpy - generates also a make file for building shared module. - About currently supported compilers see the \fpy home page -\item[(ii)] Optionally, the signature files can be modified manually - in order to dictate how the Fortran routines should be called or - seemed from the Python environment. -\item[(iii)] \fpy reads the signature files and generates Python C/API - modules that can be compiled and imported to Python code. In - addition, a LaTeX document is generated that contains the - documentation of wrapped functions. -\end{itemize} -(Note that if you are satisfied with the default signature that \fpy -generates in step (i), all three steps can be covered with just -one call to \fpy --- by not specifying `\texttt{-h}' flag). -Latest versions of \fpy support so-called \fpy directive that allows -inserting various information about wrapping directly to Fortran -source code as comments (\texttt{f2py }). - -The following diagram illustrates the usage of the tool: -\begin{verbatim} -! Fortran file foo.f: - subroutine foo(a) - integer a - a = a + 5 - end -\end{verbatim} -\begin{verbatim} -! Fortran file bar.f: - function bar(a,b) - integer a,b,bar - bar = a + b - end -\end{verbatim} -\begin{itemize} -\item[(i)] \shell{\fpy foo.f bar.f -m foobar -h foobar.pyf} -\end{itemize} -\begin{verbatim} -!%f90 -! Signature file: foobar.pyf -python module foobar ! in - interface ! in :foobar - subroutine foo(a) ! in :foobar:foo.f - integer intent(inout) :: a - end subroutine foo - function bar(a,b) ! in :foobar:bar.f - integer :: a - integer :: b - integer :: bar - end function bar - end interface -end python module foobar -\end{verbatim} -\begin{itemize} -\item[(ii)] Edit the signature file (here I made \texttt{foo}s - argument \texttt{a} to be \texttt{intent(inout)}, see - Sec.~\ref{sec:attributes}). -\item[(iii)] \shell{\fpy foobar.pyf} -\end{itemize} -\begin{verbatim} -/* Python C/API module: foobarmodule.c */ -... -\end{verbatim} -\begin{itemize} -\item[(iv)] \shell{make -f Makefile-foobar} -%\shell{gcc -shared -I/usr/include/python1.5/ foobarmodule.c\bs\\ -%foo.f bar.f -o foobarmodule.so} -\end{itemize} -\begin{verbatim} -Python shared module: foobarmodule.so -\end{verbatim} -\begin{itemize} -\item[(v)] Usage in Python: -\end{itemize} -\vspace*{-4ex} -\begin{verbatim} ->>> import foobar ->>> print foobar.__doc__ -This module 'foobar' is auto-generated with f2py (version:1.174). -The following functions are available: - foo(a) - bar = bar(a,b) -. ->>> print foobar.bar(2,3) -5 ->>> from Numeric import * ->>> a = array(3) ->>> print a,foobar.foo(a),a -3 None 8 -\end{verbatim} -Information about how to call \fpy (steps (i) and (iii)) can be -obtained by executing\\ -\shell{\fpy}\\ -This will print the usage instructions. - Step (iv) is system dependent -(compiler and the locations of the header files \texttt{Python.h} and -\texttt{arrayobject.h}), and so you must know how to compile a shared -module for Python in you system. - -The next Section describes the step (ii) in more detail in order to -explain how you can influence to the process of interface generation -so that the users can enjoy more writing Python programs using your -wrappers that call Fortran routines. Step (v) is covered in -Sec.~\ref{sec:notes}. - - -\subsection{Features} -\label{sec:features} - -\fpy has the following features: -\begin{enumerate} -\item \fpy scans real Fortran codes and produces the signature files. - The syntax of the signature files is borrowed from the Fortran~90/95 - language specification with some extensions. -\item \fpy uses the signature files to produce the wrappers for - Fortran~77 routines and their \texttt{COMMON} blocks. -\item For \texttt{external} arguments \fpy constructs a very flexible - call-back mechanism so that Python functions can be called from - Fortran. -\item You can pass in almost arbitrary Python objects to wrapper - functions. If needed, \fpy takes care of type-casting and - non-contiguous arrays. -\item You can modify the signature files so that \fpy will generate - wrapper functions with desired signatures. \texttt{depend()} - attribute is introduced to control the initialization order of the - variables. \fpy introduces \texttt{intent(hide)} attribute to remove - the particular argument from the argument list of the wrapper - function. In addition, \texttt{optional} and \texttt{required} - attributes are introduced and employed. -\item \fpy supports almost all standard Fortran~77/90/95 constructs - and understands all basic Fortran types, including - (multi-dimensional, complex) arrays and character strings with - adjustable and assumed sizes/lengths. -\item \fpy generates a LaTeX document containing the - documentations of the wrapped functions (argument types, dimensions, - etc). The user can easily add some human readable text to the - documentation by inserting \texttt{note()} attribute to - the definition of routine signatures. -\item \fpy generates a GNU make file that can be used for building - shared modules calling Fortran functions. -\item \fpy supports wrapping Fortran 90/95 module routines. -\end{enumerate} - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "f2py2e" -%%% End: diff -Nru python-numpy-1.13.3/doc/f2py/Makefile python-numpy-1.14.5/doc/f2py/Makefile --- python-numpy-1.13.3/doc/f2py/Makefile 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,76 +0,0 @@ -# Makefile for compiling f2py2e documentation (dvi, ps, html) -# Pearu Peterson - -REL=4 -TOP = usersguide -LATEXSRC = bugs.tex commands.tex f2py2e.tex intro.tex notes.tex signaturefile.tex -MAINLATEX = f2py2e - -LATEX = latex -PDFLATEX = pdflatex - -COLLECTINPUT = ./collectinput.py -INSTALLDATA = install -m 644 -c - -TTH = tth -TTHFILTER = sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | $(TTH) -L$(MAINLATEX) -i -TTHFILTER2 = sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | $(TTH) -Lpython9 -i -TTHFILTER3 = sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | $(TTH) -Lfortranobject -i -TTHMISSING = "\ -***************************************************************\n\ -Warning: Could not find tth (a TeX to HTML translator) \n\ - or an error was arisen by tth\n\ -You can download tth from http://hutchinson.belmont.ma.us/tth/ \n\ -or\n\ -use your favorite LaTeX to HTML translator on file tmp_main.tex\n\ -***************************************************************\ -" - -all: dvi ps html clean -$(MAINLATEX).dvi: $(LATEXSRC) - $(LATEX) $(MAINLATEX).tex - $(LATEX) $(MAINLATEX).tex - $(LATEX) $(MAINLATEX).tex - $(PDFLATEX) $(MAINLATEX).tex -$(TOP).dvi: $(MAINLATEX).dvi - cp -f $(MAINLATEX).dvi $(TOP).dvi - mv -f $(MAINLATEX).pdf $(TOP).pdf -$(TOP).ps: $(TOP).dvi - dvips $(TOP).dvi -o -$(TOP).html: $(LATEXSRC) - $(COLLECTINPUT) < $(MAINLATEX).tex > tmp_$(MAINLATEX).tex - @test `which $(TTH)` && cat tmp_$(MAINLATEX).tex | $(TTHFILTER) > $(TOP).html\ - || echo -e $(TTHMISSING) -dvi: $(TOP).dvi -ps: $(TOP).ps - gzip -f $(TOP).ps -html: $(TOP).html - -python9: - cp -f python9.tex f2python9-final/src/ - cd f2python9-final && mk_html.sh - cd f2python9-final && mk_ps.sh - cd f2python9-final && mk_pdf.sh -pyfobj: - $(LATEX) fortranobject.tex - $(LATEX) fortranobject.tex - $(LATEX) fortranobject.tex - @test `which $(TTH)` && cat fortranobject.tex | $(TTHFILTER3) > pyfobj.html\ - || echo -e $(TTHMISSING) - dvips fortranobject.dvi -o pyfobj.ps - gzip -f pyfobj.ps - pdflatex fortranobject.tex - mv fortranobject.pdf pyfobj.pdf - -WWWDIR=/net/cens/home/www/unsecure/projects/f2py2e/ -wwwpage: all - $(INSTALLDATA) index.html $(TOP).html $(TOP).ps.gz $(TOP).dvi $(TOP).pdf \ - Release-$(REL).x.txt ../NEWS.txt win32_notes.txt $(WWWDIR) - $(INSTALLDATA) pyfobj.{ps.gz,pdf,html} $(WWWDIR) - $(INSTALLDATA) f2python9-final/f2python9.{ps.gz,pdf,html} f2python9-final/{flow,structure,aerostructure}.jpg $(WWWDIR) -clean: - rm -f tmp_$(MAINLATEX).* $(MAINLATEX).{aux,dvi,log,toc} -distclean: - rm -f tmp_$(MAINLATEX).* $(MAINLATEX).{aux,dvi,log,toc} - rm -f $(TOP).{ps,dvi,html,pdf,ps.gz} - rm -f *~ diff -Nru python-numpy-1.13.3/doc/f2py/multiarray/array_from_pyobj.c python-numpy-1.14.5/doc/f2py/multiarray/array_from_pyobj.c --- python-numpy-1.13.3/doc/f2py/multiarray/array_from_pyobj.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/multiarray/array_from_pyobj.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,323 +0,0 @@ -/* - * File: array_from_pyobj.c - * - * Description: - * ------------ - * Provides array_from_pyobj function that returns a contiguous array - * object with the given dimensions and required storage order, either - * in row-major (C) or column-major (Fortran) order. The function - * array_from_pyobj is very flexible about its Python object argument - * that can be any number, list, tuple, or array. - * - * array_from_pyobj is used in f2py generated Python extension - * modules. - * - * Author: Pearu Peterson - * Created: 13-16 January 2002 - * $Id: array_from_pyobj.c,v 1.1 2002/01/16 18:57:33 pearu Exp $ - */ - - -#define ARR_IS_NULL(arr,mess) \ -if (arr==NULL) { \ - fprintf(stderr,"array_from_pyobj:" mess); \ - return NULL; \ -} - -#define CHECK_DIMS_DEFINED(rank,dims,mess) \ -if (count_nonpos(rank,dims)) { \ - fprintf(stderr,"array_from_pyobj:" mess); \ - return NULL; \ -} - -#define HAS_PROPER_ELSIZE(arr,type_num) \ - ((PyArray_DescrFromType(type_num)->elsize) == (arr)->descr->elsize) - -/* static */ -/* void f2py_show_args(const int type_num, */ -/* const int *dims, */ -/* const int rank, */ -/* const int intent) { */ -/* int i; */ -/* fprintf(stderr,"array_from_pyobj:\n\ttype_num=%d\n\trank=%d\n\tintent=%d\n",\ */ -/* type_num,rank,intent); */ -/* for (i=0;i1)) { - lazy_transpose(arr); - arr->flags &= ~NPY_CONTIGUOUS; - } - Py_INCREF(arr); - } - return arr; - } - - if (PyArray_Check(obj)) { /* here we have always intent(in) or - intent(inout) */ - - PyArrayObject *arr = (PyArrayObject *)obj; - int is_cont = (intent & F2PY_INTENT_C) ? - (ISCONTIGUOUS(arr)) : (array_has_column_major_storage(arr)); - - if (check_and_fix_dimensions(arr,rank,dims)) - return NULL; /*XXX: set exception */ - - if ((intent & F2PY_INTENT_COPY) - || (! (is_cont - && HAS_PROPER_ELSIZE(arr,type_num) - && PyArray_CanCastSafely(arr->descr->type_num,type_num)))) { - PyArrayObject *tmp_arr = NULL; - if (intent & F2PY_INTENT_INOUT) { - ARR_IS_NULL(NULL,"intent(inout) array must be contiguous and" - " with a proper type and size.\n") - } - if ((rank>1) && (! (intent & F2PY_INTENT_C))) - lazy_transpose(arr); - if (PyArray_CanCastSafely(arr->descr->type_num,type_num)) { - tmp_arr = (PyArrayObject *)PyArray_CopyFromObject(obj,type_num,0,0); - ARR_IS_NULL(arr,"CopyFromObject failed: array.\n"); - } else { - tmp_arr = (PyArrayObject *)PyArray_FromDims(arr->nd, - arr->dimensions, - type_num); - ARR_IS_NULL(tmp_arr,"FromDims failed: array with unsafe cast.\n"); - if (copy_ND_array(arr,tmp_arr)) - ARR_IS_NULL(NULL,"copy_ND_array failed: array with unsafe cast.\n"); - } - if ((rank>1) && (! (intent & F2PY_INTENT_C))) { - lazy_transpose(arr); - lazy_transpose(tmp_arr); - tmp_arr->flags &= ~NPY_CONTIGUOUS; - } - arr = tmp_arr; - } - if (intent & F2PY_INTENT_OUT) - Py_INCREF(arr); - return arr; - } - - if ((obj==Py_None) && (intent & F2PY_OPTIONAL)) { - PyArrayObject *arr = NULL; - CHECK_DIMS_DEFINED(rank,dims,"optional must have defined dimensions.\n"); - arr = (PyArrayObject *)PyArray_FromDims(rank,dims,type_num); - ARR_IS_NULL(arr,"FromDims failed: optional.\n"); - if (intent & F2PY_INTENT_OUT) { - if ((!(intent & F2PY_INTENT_C)) && (rank>1)) { - lazy_transpose(arr); - arr->flags &= ~NPY_CONTIGUOUS; - } - Py_INCREF(arr); - } - return arr; - } - - if (intent & F2PY_INTENT_INOUT) { - ARR_IS_NULL(NULL,"intent(inout) argument must be an array.\n"); - } - - { - PyArrayObject *arr = (PyArrayObject *) \ - PyArray_ContiguousFromObject(obj,type_num,0,0); - ARR_IS_NULL(arr,"ContiguousFromObject failed: not a sequence.\n"); - if (check_and_fix_dimensions(arr,rank,dims)) - return NULL; /*XXX: set exception */ - if ((rank>1) && (! (intent & F2PY_INTENT_C))) { - PyArrayObject *tmp_arr = NULL; - lazy_transpose(arr); - arr->flags &= ~NPY_CONTIGUOUS; - tmp_arr = (PyArrayObject *) PyArray_CopyFromObject((PyObject *)arr,type_num,0,0); - Py_DECREF(arr); - arr = tmp_arr; - ARR_IS_NULL(arr,"CopyFromObject(Array) failed: intent(fortran)\n"); - lazy_transpose(arr); - arr->flags &= ~NPY_CONTIGUOUS; - } - if (intent & F2PY_INTENT_OUT) - Py_INCREF(arr); - return arr; - } - -} - - /*****************************************/ - /* Helper functions for array_from_pyobj */ - /*****************************************/ - -static -int array_has_column_major_storage(const PyArrayObject *ap) { - /* array_has_column_major_storage(a) is equivalent to - transpose(a).iscontiguous() but more efficient. - - This function can be used in order to decide whether to use a - Fortran or C version of a wrapped function. This is relevant, for - example, in choosing a clapack or flapack function depending on - the storage order of array arguments. - */ - int sd; - int i; - sd = ap->descr->elsize; - for (i=0;ind;++i) { - if (ap->dimensions[i] == 0) return 1; - if (ap->strides[i] != sd) return 0; - sd *= ap->dimensions[i]; - } - return 1; -} - -static -void lazy_transpose(PyArrayObject* arr) { - /* - Changes the order of array strides and dimensions. This - corresponds to the lazy transpose of a Numeric array in-situ. - Note that this function is assumed to be used even times for a - given array. Otherwise, the caller should set flags &= ~NPY_CONTIGUOUS. - */ - int rank,i,s,j; - rank = arr->nd; - if (rank < 2) return; - - for(i=0,j=rank-1;istrides[i]; - arr->strides[i] = arr->strides[j]; - arr->strides[j] = s; - s = arr->dimensions[i]; - arr->dimensions[i] = arr->dimensions[j]; - arr->dimensions[j] = s; - } -} - -static -int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,int *dims) { - /* - This function fills in blanks (that are -1's) in dims list using - the dimensions from arr. It also checks that non-blank dims will - match with the corresponding values in arr dimensions. - */ - const int arr_size = (arr->nd)?PyArray_Size((PyObject *)arr):1; - - if (rank > arr->nd) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */ - int new_size = 1; - int free_axe = -1; - int i; - /* Fill dims where -1 or 0; check dimensions; calc new_size; */ - for(i=0;ind;++i) { - if (dims[i] >= 0) { - if (dims[i]!=arr->dimensions[i]) { - fprintf(stderr,"%d-th dimension must be fixed to %d but got %d\n", - i,dims[i],arr->dimensions[i]); - return 1; - } - if (!dims[i]) dims[i] = 1; - } else { - dims[i] = arr->dimensions[i] ? arr->dimensions[i] : 1; - } - new_size *= dims[i]; - } - for(i=arr->nd;i1) { - fprintf(stderr,"%d-th dimension must be %d but got 0 (not defined).\n", - i,dims[i]); - return 1; - } else if (free_axe<0) - free_axe = i; - else - dims[i] = 1; - if (free_axe>=0) { - dims[free_axe] = arr_size/new_size; - new_size *= dims[free_axe]; - } - if (new_size != arr_size) { - fprintf(stderr,"confused: new_size=%d, arr_size=%d (maybe too many free" - " indices)\n",new_size,arr_size); - return 1; - } - } else { - int i; - for (i=rank;ind;++i) - if (arr->dimensions[i]>1) { - fprintf(stderr,"too many axes: %d, expected rank=%d\n",arr->nd,rank); - return 1; - } - for (i=0;i=0) { - if (arr->dimensions[i]!=dims[i]) { - fprintf(stderr,"%d-th dimension must be fixed to %d but got %d\n", - i,dims[i],arr->dimensions[i]); - return 1; - } - if (!dims[i]) dims[i] = 1; - } else - dims[i] = arr->dimensions[i]; - } - return 0; -} - -/* End of file: array_from_pyobj.c */ diff -Nru python-numpy-1.13.3/doc/f2py/multiarray/bar.c python-numpy-1.14.5/doc/f2py/multiarray/bar.c --- python-numpy-1.13.3/doc/f2py/multiarray/bar.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/multiarray/bar.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ - -#include - -void bar(int *a,int m,int n) { - int i,j; - printf("C:"); - printf("m=%d, n=%d\n",m,n); - for (i=0;id ==> generate tiling loop for index i with step size of - * e ==> generate tiling loop for index j with step size of - * i ==> generate loop for index i with unrolling factor of - * j ==> generate loop for index j with unrolling factor of - * ; ==> input terminator (required) - * rules are: - * i,j tokens must appear - * if d appears, it must appear before i - * if e appears, it must appear before j - * ; must appear - * matrix size is controlled by #define N in this program. - * - * this code was adapted from mmgen.c v1.2 and extended to generate pre- - * condition loops for unrolling factors that do not evenly divide the - * matrix size (or the tiling step size for loop nests with a tiling loop). - * note that this program only provides a preconditioning loop for the - * innermost loop. unrolling factors for non-innermost loops that do not - * evenly divide the matrix size (or step size) are not supported. - * - * my interest in this program generator is to hook it to a sentence - * generator and a minimum execution time finder, that is - * while((sentence=sgen())!=NULL){ - * genprogram=tpgen(sentence); - * system("cc -O4 genprogram.c"); - * system("a.out >> tpresults"); - * } - * findmintime(tpresults); - * this will find the optimum algorithm for the host system via an - * exhaustive search. - * - * please report bugs and suggestions for enhancements to me. - */ - -#include -#include -#include -#define N 500 - -#define ALLOC1 temp1=(struct line *)malloc(sizeof(struct line));\ -temp1->indentcnt=indentcnt; - -#define LINK1 temp1->next=insertbefore;\ -insertafter->next=temp1;\ -insertafter=temp1; - -#define INSERT1 temp1->next=start;\ -start=temp1; - -#define ALLOC2 temp1=(struct line *)malloc(sizeof(struct line));\ -temp2=(struct line *)malloc(sizeof(struct line));\ -temp1->indentcnt=indentcnt;\ -temp2->indentcnt=indentcnt++; - -#define LINK2 temp1->next=temp2;\ -temp2->next=insertbefore;\ -insertafter->next=temp1;\ -insertafter=temp1;\ -insertbefore=temp2; - -struct line{ int indentcnt; char line[256]; struct line *next; }; - -int indentcnt; -int iflag,jflag; -int ijflag,jiflag; -int dflag,eflag; -int counter; -int iistep,jjstep; -int iunroll,junroll; -int precond; - -char c; -int i,ttp,nt; -char *p0; -char tptype[80]; -char number[10]; - -struct line *start,*head,*insertafter,*insertbefore,*temp1,*temp2; - -void processloop(); -void processstmt(); - -main(){ - - indentcnt=0; - iflag=jflag=0; - ijflag=jiflag=0; - dflag=eflag=0; - iunroll=junroll=0; - counter=1; - precond=0; - ttp=0; - - start=NULL; - ALLOC2 - sprintf(temp1->line,"/* begin */\nt_start=second();\n"); - sprintf(temp2->line,"/* end */\nt_end = second();\n"); - head=temp1; temp1->next=temp2; temp2->next=NULL; - insertafter=temp1; insertbefore=temp2; - - while((c=getchar())!=';'){ - tptype[ttp++]=c; - if(isdigit(c)){ - nt=0; - while(isdigit(c)){ - number[nt++]=c; - c=getchar(); - if(c==';'){ fprintf(stderr,"unexpected ;!\n"); exit(1); } - tptype[ttp++]=c; - } - number[nt]='\0'; - sscanf(number,"%d",&counter); - } - switch(c){ - case 'd': - if(iflag){ fprintf(stderr,"d cannot appear after i!\n"); exit(1); } - dflag++; - ALLOC1 - sprintf(temp1->line,"#define IISTEP %d\n",counter); - INSERT1 - iistep=counter; - counter=1; - ALLOC2 - sprintf(temp1->line,"for(ii=0;ii<%d;ii+=IISTEP){\n",N); - sprintf(temp2->line,"}\n",N); - LINK2 - ALLOC1 - sprintf(temp1->line,"it=min(ii+IISTEP,%d);\n",N); - LINK1 - break; - case 'e': - if(jflag){ fprintf(stderr,"e cannot appear after j!\n"); exit(1); } - eflag++; - ALLOC1 - sprintf(temp1->line,"#define JJSTEP %d\n",counter); - INSERT1 - jjstep=counter; - counter=1; - ALLOC2 - sprintf(temp1->line,"for(jj=0;jj<%d;jj+=JJSTEP){\n",N); - sprintf(temp2->line,"}\n",N); - LINK2 - ALLOC1 - sprintf(temp1->line,"jt=min(jj+JJSTEP,%d);\n",N); - LINK1 - break; - case 'i': - iunroll=counter; - counter=1; - iflag++; if(jflag) jiflag++; - if(dflag) precond=iistep%iunroll; else precond=N%iunroll; - if(precond&&(jiflag==0)){ - fprintf(stderr,"unrolling factor for outer loop i\n"); - fprintf(stderr," does not evenly divide matrix/step size!\n"); - exit(1); - } - if(dflag&&(iunroll>1)&&(N%iistep)){ - fprintf(stderr,"with unrolling of i, step size for tiled loop ii\n"); - fprintf(stderr," does not evenly divide matrix size!\n"); - exit(1); - } - processloop('i',dflag,iunroll,precond,junroll); - break; - case 'j': - junroll=counter; - counter=1; - jflag++; if(iflag) ijflag++; - if(eflag) precond=jjstep%junroll; else precond=N%junroll; - if(precond&&(ijflag==0)){ - fprintf(stderr,"unrolling factor for outer loop j\n"); - fprintf(stderr," does not evenly divide matrix/step size!\n"); - exit(1); - } - if(eflag&&(junroll>1)&&(N%jjstep)){ - fprintf(stderr,"with unrolling of j, step size for tiled loop jj\n"); - fprintf(stderr," does not evenly divide matrix size!\n"); - exit(1); - } - processloop('j',eflag,junroll,precond,iunroll); - break; - default: break; - } - } - processstmt(); - - tptype[ttp++]=c; - - if((iflag==0)||(jflag==0)){ - fprintf(stderr, - "one of the loops (i,j) was not specified!\n"); - exit(1); - } - - temp1=start; - while(temp1!=NULL){ - printf("%s",temp1->line); - temp1=temp1->next; - } - printf("#include \n"); - printf("#include \n"); - printf("#include \n"); - if(dflag|eflag) printf("#define min(a,b) ((a)<=(b)?(a):(b))\n"); - printf("double second();\n"); - printf("double t_start,t_end,t_total;\n"); - printf("int times;\n"); - printf("\ndouble b[%d][%d],dummy[10000],bt[%d][%d];\n\nmain(){\n" - ,N,N,N,N); - if(precond) printf(" int i,j,n;\n"); else printf(" int i,j;\n"); - if(dflag) printf(" int ii,it;\n"); - if(eflag) printf(" int jj,jt;\n"); - printf("/* set coefficients so that result matrix should have \n"); - printf(" * column entries equal to column index\n"); - printf(" */\n"); - printf(" for (i=0;i<%d;i++){\n",N); - printf(" for (j=0;j<%d;j++){\n",N); - printf(" b[i][j] = (double) i;\n"); - printf(" }\n"); - printf(" }\n"); - printf("\n t_total=0.0;\n for(times=0;times<10;times++){\n\n",N); - printf("/* try to flush cache */\n"); - printf(" for(i=0;i<10000;i++){\n",N); - printf(" dummy[i] = 0.0;\n"); - printf(" }\n"); - printf("%s",head->line); - temp1=head->next; - while(temp1!=NULL){ - for(i=0;iindentcnt;i++) printf(" "); - while((p0=strstr(temp1->line,"+0"))!=NULL){ - *p0++=' '; *p0=' '; - } - printf("%s",temp1->line); - temp1=temp1->next; - } - printf("\n t_total+=t_end-t_start;\n }\n"); - printf("/* check result */\n"); - printf(" for (j=0;j<%d;j++){\n",N); - printf(" for (i=0;i<%d;i++){\n",N); - printf(" if (bt[i][j]!=((double)j)){\n"); - printf(" fprintf(stderr,\"error in bt[%cd][%cd]",'%','%'); - printf("\\n\",i,j);\n"); - printf(" fprintf(stderr,\" for %s\\n\");\n",tptype); - printf(" exit(1);\n"); - printf(" }\n"); - printf(" }\n"); - printf(" }\n"); - tptype[ttp]='\0'; - printf(" printf(\"%c10.2f secs\",t_total);\n",'%'); - printf(" printf(\" for 10 runs of %s\\n\");\n",tptype); - printf("}\n"); - printf("double second(){\n"); - printf(" void getrusage();\n"); - printf(" struct rusage ru;\n"); - printf(" double t;\n"); - printf(" getrusage(RUSAGE_SELF,&ru);\n"); - printf(" t = ((double)ru.ru_utime.tv_sec) +\n"); - printf(" ((double)ru.ru_utime.tv_usec)/1.0e6;\n"); - printf(" return t;\n"); - printf("}\n"); - -} - -void processloop(index,flag,unroll,precond,unroll2) -char index; -int flag,unroll,precond,unroll2; -{ - char build[80],temp[40]; - int n; - if(precond){ - ALLOC1 - sprintf(temp1->line,"/* preconditioning loop for unrolling factor */\n"); - LINK1 - if(unroll2==1){ - build[0]='\0'; - if(flag){ - if(index='i') - sprintf(temp,"n=IISTEP%c%d; ",'%',unroll); - else - sprintf(temp,"n=JJSTEP%c%d; ",'%',unroll); - strcat(build,temp); - sprintf(temp,"for(%c=%c%c;%c<%c%c+n;%c++) ",index,index,index, - index,index,index,index); - strcat(build,temp); - }else{ - sprintf(temp,"n=%d%c%d; ",N,'%',unroll); - strcat(build,temp); - sprintf(temp,"for(%c=0;%cline,"%s\n",build); - LINK1 - }else{ - if(flag){ - ALLOC1 - if(index=='i') - sprintf(temp1->line,"n=IISTEP%c%d;\n",'%',unroll); - else - sprintf(temp1->line,"n=JJSTEP%c%d;\n",'%',unroll); - LINK1 - ALLOC1 - sprintf(temp1->line,"for(%c=%c%c;%c<%c%c+n;%c++){\n",index,index,index, - index,index,index,index); - LINK1 - }else{ - ALLOC1 - sprintf(temp1->line,"n=%d%c%d;\n",N,'%',unroll); - LINK1 - ALLOC1 - sprintf(temp1->line,"for(%c=0;%cline," bt[i][j+%d]=b[j+%d][i];\n",n,n); - LINK1 - } - }else{ - for(n=0;nline," bt[i+%d][j]=b[j][i+%d];\n",n,n); - LINK1 - } - } - ALLOC1 - sprintf(temp1->line,"}\n"); - LINK1 - } - ALLOC2 - if(flag){ - sprintf(temp1->line,"for(%c=%c%c+n;%c<%ct;%c+=%d){\n",index,index,index, - index,index,index,unroll); - }else{ - sprintf(temp1->line,"for(%c=n;%c<%d;%c+=%d){\n",index,index,N,index, - unroll); - } - sprintf(temp2->line,"}\n",N); - LINK2 - }else{ - ALLOC2 - if(unroll==1){ - if(flag){ - sprintf(temp1->line,"for(%c=%c%c;%c<%ct;%c++){\n",index,index,index, - index,index,index); - }else{ - sprintf(temp1->line,"for(%c=0;%c<%d;%c++){\n",index,index,N,index); - } - }else{ - if(flag){ - sprintf(temp1->line,"for(%c=%c%c;%c<%ct;%c+=%d){\n",index,index,index, - index,index,index,unroll); - }else{ - sprintf(temp1->line,"for(%c=0;%c<%d;%c+=%d){\n",index,index,N,index, - unroll); - } - } - sprintf(temp2->line,"}\n",N); - LINK2 - } -} - -void processstmt() -{ - int i,j; - for(i=0;iline,"bt[i+%d][j+%d]=b[j+%d][i+%d];\n",i,j,j,i); - LINK1 - } - } -} --- -Mark Smotherman, Computer Science Dept., Clemson University, Clemson, SC - -======================================================================= -From: has (h.genceli@bre.com) - Subject: transpose of a nxm matrix stored in a vector !!! - Newsgroups: sci.math.num-analysis - Date: 2000/07/25 - - -If I have a matrix nrows x ncols, I can store it in a vector. -so A(i,j) is really a[i*ncols+j]. So really TRANS of A -(say B) is really is also a vector B where - -0<=i b[j*nrows+i] wrote: -> If I have a matrix nrows x ncols, I can store it in a vector. -> so A(i,j) is really a[i*ncols+j]. So really TRANS of A -> (say B) is really is also a vector B where - -[snip] - -Hey, if you just want to do a transpose-matrix vector multiply, there is -no need to explicitly store the transpose matrix in another array and -doubling the storage! - -W.C. --- - - From: Robin Becker (robin@jessikat.fsnet.co.uk) - Subject: Re: transpose of a nxm matrix stored in a vector !!! - Newsgroups: sci.math.num-analysis - Date: 2000/07/25 - - -In article , has -writes ->If I have a matrix nrows x ncols, I can store it in a vector. ->so A(i,j) is really a[i*ncols+j]. So really TRANS of A ->(say B) is really is also a vector B where -> ->0<=i b[j*nrows+i] b[j*nrows+i] = a[i*ncols+j]. -> ->Fine but I want to use only one array a to do this transformation. -> ->i.e a[j*nrows+i] = a[i*ncols+j]. this will itself ->erase some elements so each time a swap is necessary in a loop. -> ->temp = a[j*nrows+i] ->a[j*nrows+i] = a[i*ncols+j] ->a[i*ncols+j] = temp -> ->but still this will lose some info as it is, so indexing ->should have more intelligence in it ???? anybody ->can give me a lead here, thanks. -> ->Has -> -> -> - -void dmx_transpose(unsigned n, unsigned m, double* a, double* b) -{ - unsigned size = m*n; - if(b!=a){ - real *bmn, *aij, *anm; - bmn = b + size; /*b+n*m*/ - anm = a + size; - while(b3){ - unsigned i,row,column,current; - for(i=1, size -= 2;ii) { - real temp = a[i]; - a[i] = a[current]; - a[current] = temp; - } - } - } -} --- -Robin Becker - - From: E. Robert Tisdale (edwin@netwood.net) - Subject: Re: transpose of a nxm matrix stored in a vector !!! - Newsgroups: sci.math.num-analysis - Date: 2000/07/25 - - -Take a look at -The C++ Scalar, Vector, Matrix and Tensor class library - - http://www.netwood.net/~edwin/svmt/ - -SubVector& - SubVector::transpose(Extent p, Extent q) { - SubVector& - v = *this; - if (1 < p && 1 < q) { - // A vector v of extent n = qp is viewed as a q by p matrix U and - // a p by q matrix V where U_{ij} = v_{p*i+j} and V_{ij} = v_{q*i+j}. - // The vector v is modified in-place so that V is the transpose of U. - // The algorithm searches for every sequence k_s of S indices - // such that a circular shift of elements v_{k_s} <-- v_{k_{s+1}} - // and v_{k_{S-1}} <-- v_{k_0} effects an in-place transpose. - Extent n = q*p; - Extent m = 0; // count up to n-2 - Offset l = 0; // 1 <= l <= n-2 - while (++l < n-1 && m < n-2) { - Offset k = l; - Offset j = k; - while (l < (k = (j%p)*q + j/p)) { // Search backward for k < l. - j = k; - } - // If a sequence of indices beginning with l has any index k < l, - // it has already been transposed. The sequence length S = 1 - // and diagonal element v_k is its own transpose if k = j. - // Skip every index sequence that has already been transposed. - if (k == l) { // a new sequence - if (k < j) { // with 1 < S - TYPE x = v[k]; // save v_{k_0} - do { - v[k] = v[j]; // v_{k_{s}} <-- v_{k_{s+1}} - k = j; - ++m; - } while (l < (j = (k%q)*p + k/q)); - v[k] = x; // v_{k_{S-1}} <-- v_{k_0} - } - ++m; - } - } - } return v; - } - - - -SubVector& - -Read the rest of this message... (50 more lines) - - From: Victor Eijkhout (eijkhout@disco.cs.utk.edu) - Subject: Re: transpose of a nxm matrix stored in a vector !!! - Newsgroups: sci.math.num-analysis - Date: 2000/07/25 - - -"Alan Miller" writes: - -> The attached routine does an in situ transpose. -> begin 666 Dtip.f90 -> M4U5"4D]55$E.12!D=&EP("AA+"!N,2P@;C(L(&YD:6TI#0HA("TM+2TM+2TM - -Hm. F90? You're not silently allocating a temporary I hope? - -(Why did you have to encode this? Now I have to save, this decode, ... -and all for plain ascii?) - --- -Victor Eijkhout -"When I was coming up, [..] we knew exactly who the they were. It was us -versus them, and it was clear who the them was were. Today, we are not -so sure who the they are, but we know they're there." [G.W. Bush] - - From: Alan Miller (amiller_@_vic.bigpond.net.au) - Subject: Re: transpose of a nxm matrix stored in a vector !!! - Newsgroups: sci.math.num-analysis - Date: 2000/07/25 - - -Victor Eijkhout wrote in message ... ->"Alan Miller" writes: -> ->> The attached routine does an in situ transpose. ->> begin 666 Dtip.f90 ->> M4U5"4D]55$E.12!D=&EP("AA+"!N,2P@;C(L(&YD:6TI#0HA("TM+2TM+2TM -> ->Hm. F90? You're not silently allocating a temporary I hope? -> ->(Why did you have to encode this? Now I have to save, this decode, ... ->and all for plain ascii?) -> - -I know the problem. -I sometimes use a Unix system, and have to use decode64 to read -attachments. On the other hand, Windows wraps lines around, -formats then and generally makes the code unreadable. - -The straight code for dtip (double transpose in place) is attached -this time. - ->-- ->Victor Eijkhout - - --- -Alan Miller, Retired Scientist (Statistician) -CSIRO Mathematical & Information Sciences -Alan.Miller -at- vic.cmis.csiro.au -http://www.ozemail.com.au/~milleraj -http://users.bigpond.net.au/amiller/ - - -================================================================= - -From: Darran Edmundson (dedmunds@sfu.ca) - Subject: array reordering algorithm? - Newsgroups: sci.math.num-analysis - Date: 1995/04/30 - - -A code I've written refers to a complex array as two separate real arrays. -However, I have a canned subroutine which expects a single array where the -real and imaginary values alternate. Essentially I have a case of mismatched -data structures, yet for reasons that I'd rather not go into, I'm stuck with them. - -Assuming that the two real arrays A and B are sequential in memory, and -that the single array of alternating real/imaginary values C shares the same -space, what I need is a porting subroutine that remaps the data from one format -to the other - using as little space as possible. - -I think of the problem as follows. Imagine an array of dimension 10 containing -the values 1,3,5,7,9,2,4,6,8,10 in this order. - - A(1) / 1 \ C(1) - A(2) | 3 | C(2) - A(3) | 5 | C(3) - A(4) | 7 | C(4) - A(5) \ 9 | C(5) - | - B(1) / 2 | C(6) - B(2) | 4 | C(7) - B(3) | 6 | C(8) - B(4) | 8 | C(9) - B(5) \ 10 / C(10) - -Given that I know this initial pattern, I want to sort the array C in-place *without -making comparisons*. That is, the algorithm can only depend on the initial -knowledge of the pattern. Do you see what a sort is going to do? It will -make the A and B arrays alternate, i.e. C(1)=A(1), C(2)=B(1), C(3)=A(2), -C(4)=B(2), etc. It's not a real sort though because I can't actually refer to the -values above (i.e. no comparisons) because A and B will be holding real data, -not this contrived pattern. The pattern above exists though - it's the -natural ordering in memory of A and B. - -Either pair swapping only or a small amount of workspace can be used. The -in-place is important - imagine scaling this problem up to an -array of 32 or 64 million double precision values and you can easily see how -duplicating the array is not a feasible solution. - -Any ideas? I've been stumped on this for a day and a half now. - -Darran Edmundson -dedmunds@sfu.ca - - From: Roger Critchlow (rec@elf115.elf.org) - Subject: Re: array reordering algorithm? - Newsgroups: sci.math.num-analysis - Date: 1995/04/30 - - - Any ideas? I've been stumped on this for a day and a half now. - -Here's some code for in situ permutations of arrays that I wrote -a few years ago. It all started from the in situ transposition -algorithms in the Collected Algorithms of the ACM, the references -for which always get lost during the decryption from fortran. - -This is the minimum space algorithm. All you need to supply is -a function which computes the new order array index from the old -order array index. - -If you can spare n*m bits to record the indexes of elements which -have been permuted, then you can speed things up. - --- rec -- - ------------------------------------------------------------------------- -/* -** Arbitrary in situ permutations of an m by n array of base type TYPE. -** Copyright 1995 by Roger E Critchlow Jr, rec@elf.org, San Francisco, CA. -** Fair use permitted, caveat emptor. -*/ -typedef int TYPE; - -int transposition(int ij, int m, int n) /* transposition about diagonal from upper left to lower right */ -{ return ((ij%m)*n+ (ij/m)); } - -int countertrans(int ij, int m, int n) /* transposition about diagonal from upper right to lower left */ -{ return ((m-1-(ij%m))*n+ (n-1-(ij/m))); } - -int rotate90cw(int ij, int m, int n) /* 90 degree clockwise rotation */ -{ return ((m-1-(ij%m))*n+ (ij/m)); } - -int rotate90ccw(int ij, int m, int n) /* 90 degree counter clockwise rotation */ -{ return ((ij%m)*n+ (n-1-(ij/m))); } - -int rotate180(int ij, int m, int n) /* 180 degree rotation */ -{ return ((m-1-(ij/n))*n+ (n-1-(ij%n))); } - -int reflecth(int ij, int m, int n) /* reflection across horizontal plane */ -{ return ((m-1-(ij/n))*n+ (ij%n)); } - -int reflectv(int ij, int m, int n) /* reflection across vertical plane */ -{ return ((ij/n)*n+ (n-1-(ij%n))); } - -int in_situ_permutation(TYPE a[], int m, int n, int (*origination)(int ij, int m, int n)) -{ - int ij, oij, dij, n_to_do; - TYPE b; - n_to_do = m*n; - for (ij = 0; ij < m*n && n_to_do > 0; ij += 1) { - /* Test for previously permuted */ - for (oij = origination(ij,m,n); oij > ij; oij = origination(oij,m,n)) - ; - if (oij < ij) - continue; - /* Chase the cycle */ - dij = ij; - b = a[ij]; - for (oij = origination(dij,m,n); oij != ij; oij = origination(dij,m,n)) { - a[dij] = a[oij]; - dij = oij; - n_to_do -= 1; - } - a[dij] = b; - n_to_do -= 1; - } return 0; -} - -#define TESTING 1 -#if TESTING - -/* fill a matrix with sequential numbers, row major ordering */ -void fill_matrix_rows(a, m, n) TYPE *a; int m, n; -{ - int i, j; - for (i = 0; i < m; i += 1) - for (j = 0; j < n; j += 1) - a[i*n+j] = i*n+j; -} - -/* fill a matrix with sequential numbers, column major ordering */ -void fill_matrix_cols(a, m, n) TYPE *a; int m, n; -{ - int i, j; - for (i = 0; i < m; i += 1) - for (j = 0; j < n; j += 1) - a[i*n+j] = j*m+i; -} - -/* test a matrix for sequential numbers, row major ordering */ -int test_matrix_rows(a, m, n) TYPE *a; int m, n; -{ - int i, j, o; - for (o = i = 0; i < m; i += 1) - for (j = 0; j < n; j += 1) - o += a[i*n+j] != i*n+j; - return o; -} - -/* test a matrix for sequential numbers, column major ordering */ -int test_matrix_cols(a, m, n) TYPE *a; int m, n; -{ - int i, j, o; - for (o = i = 0; i < m; i += 1) - for (j = 0; j < n; j += 1) - o += a[i*n+j] != j*m+i; - return o; -} - -/* print a matrix */ -void print_matrix(a, m, n) TYPE *a; int m, n; -{ - char *format; - int i, j; - if (m*n < 10) format = "%2d"; - if (m*n < 100) format = "%3d"; - if (m*n < 1000) format = "%4d"; - if (m*n < 10000) format = "%5d"; - for (i = 0; i < m; i += 1) { - for (j = 0; j < n; j += 1) - printf(format, a[i*n+j]); - printf("\n"); - } -} - -#if TEST_TRANSPOSE -#define MAXSIZE 1000 - -main() -{ - int i, j, m, n, o; - TYPE a[MAXSIZE]; - for (m = 1; m < sizeof(a)/sizeof(a[0]); m += 1) - for (n = 1; m*n < sizeof(a)/sizeof(a[0]); n += 1) { - fill_matrix_rows(a, m, n); /* {0 1} {2 3} */ - if (o = transpose(a, m, n)) - printf(">> transpose returned %d for a[%d][%d], row major\n", o, m, n); - if ((o = test_matrix_cols(a, n, m)) != 0) /* {0 2} {1 3} */ - printf(">> transpose made %d mistakes for a[%d][%d], row major\n", o, m, n); - /* column major */ - fill_matrix_rows(a, m, n); - if (o = transpose(a, m, n)) - printf(">> transpose returned %d for a[%d][%d], column major\n", o, m, n); - if ((o = test_matrix_cols(a, n, m)) != 0) - printf(">> transpose made %d mistakes for a[%d][%d], column major\n", o, m, n); - } return 0; -} -#endif /* TEST_TRANSPOSE */ - - -#define TEST_DISPLAY 1 -#if TEST_DISPLAY -main(argc, argv) int argc; char *argv[]; -{ - TYPE *a; - int m = 5, n = 5; - extern void *malloc(); - if (argc > 1) { - m = atoi(argv[1]); - if (argc > 2) - n = atoi(argv[2]); - } - a = malloc(m*n*sizeof(TYPE)); - - printf("matrix\n"); - fill_matrix_rows(a, m, n); - print_matrix(a, m, n); - printf("transposition\n"); - in_situ_permutation(a, m, n, transposition); - print_matrix(a, n, m); - - printf("counter transposition\n"); - fill_matrix_rows(a, m, n); - in_situ_permutation(a, m, n, countertrans); - print_matrix(a, n, m); - - printf("rotate 90 degrees clockwise\n"); - fill_matrix_rows(a, m, n); - in_situ_permutation(a, m, n, rotate90cw); - print_matrix(a, n, m); - - printf("rotate 90 degrees counterclockwise\n"); - fill_matrix_rows(a, m, n); - in_situ_permutation(a, m, n, rotate90ccw); - print_matrix(a, n, m); - - printf("rotate 180 degrees\n"); - fill_matrix_rows(a, m, n); - in_situ_permutation(a, m, n, rotate180); - print_matrix(a, m, n); - - printf("reflect across horizontal\n"); - fill_matrix_rows(a, m, n); - in_situ_permutation(a, m, n, reflecth); - print_matrix(a, m, n); - - printf("reflect across vertical\n"); - fill_matrix_rows(a, m, n); - in_situ_permutation(a, m, n, reflectv); - print_matrix(a, m, n); - - return 0; -} - -#endif -#endif diff -Nru python-numpy-1.13.3/doc/f2py/multiarrays.txt python-numpy-1.14.5/doc/f2py/multiarrays.txt --- python-numpy-1.13.3/doc/f2py/multiarrays.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/multiarrays.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ -From pearu@ioc.ee Thu Dec 30 09:58:01 1999 -Date: Fri, 26 Nov 1999 12:02:42 +0200 (EET) -From: Pearu Peterson -To: Users of f2py2e -- Curtis Jensen , - Vladimir Janku , - Travis Oliphant -Subject: Multidimensional arrays in f2py2e - - -Hi! - -Below I will describe how f2py2e wraps Fortran multidimensional arrays as -it constantly causes confusion. As for example, consider Fortran code - - subroutine foo(l,m,n,a) - integer l,m,n - real*8 a(l,m,n) - .. - end -Running f2py2e with -h flag, it generates the following signature - -subroutine foo(l,m,n,a) - integer optional,check(shape(a,2)==l),depend(a) :: l=shape(a,2) - integer optional,check(shape(a,1)==m),depend(a) :: m=shape(a,1) - integer optional,check(shape(a,0)==n),depend(a) :: n=shape(a,0) - real*8 dimension(l,m,n),check(rank(a)==3) :: a -end subroutine foo - -where parameters l,m,n are considered optional and they are initialized in -Python C/API code using the array a. Note that a can be also a proper -list, that is, asarray(a) should result in a rank-3 array. But then there -is an automatic restriction that elements of a (in Python) are not -changeable (in place) even if Fortran subroutine changes the array a (in -C,Fortran). - -Hint: you can attribute the array a with 'intent(out)' which causes foo to -return the array a (in Python) if you are to lazy to define a=asarray(a) -before the call to foo (in Python). - -Calling f2py2e without the switch -h, a Python C/API module will be -generated. After compiling it and importing it to Python ->>> print foo.__doc__ -shows -None = foo(a,l=shape(a,2),m=shape(a,1),n=shape(a,0)) - -You will notice that f2py2e has changed the order of arguments putting the -optional ones at the end of the argument list. -Now, you have to be careful when specifying the parameters l,m,n (though -situations where you need this should be rare). A proper definition -of the array a should be, say - - a = zeros(n,m,l) - -Note that the dimensions l,m,n are in reverse, that is, the array a should -be transposed when feeding it to the wrapper. - -Hint (and a performance hit): To be always consistent with fortran -arrays, you can define, for example - a = zeros(l,m,n) -and call from Python - foo(transpose(a),l,m,n) -which is equivalent with the given Fortran call - call foo(l,m,n,a) - -Another hint (not recommended, though): If you don't like optional -arguments feature at all and want to be strictly consistent with Fortran -signature, that is, you want to call foo from Python as - foo(l,m,n,a) -then you should edit the signature to -subroutine foo(l,m,n,a) - integer :: l - integer :: m - integer :: n - real*8 dimension(l,m,n),check(rank(a)==3),depend(l,m,n), & - check(shape(a,2)==l,shape(a,1)==m,shape(a,0)==n):: a -end -Important! Note that now the array a should depend on l,m,n -so that the checks can be performed in the proper order. -(you cannot check, say, shape(a,2)==l before initializing a or l) -(There are other ways to edit the signature in order to get the same -effect but they are not so safe and I will not discuss about them here). - -Hint: If the array a should be a work array (as used frequently in -Fortran) and you a too lazy (its good laziness;) to provide it (in Python) -then you can define it as optional by ediding the signature: -subroutine foo(l,m,n,a) - integer :: l - integer :: m - integer :: n - real*8 dimension(l,m,n),check(rank(a)==3),depend(l,m,n), & - check(shape(a,2)==l,shape(a,1)==m,shape(a,0)==n):: a - optional a -end -Note again that the array a must depend on l,m,n. Then the array a will be -allocated in the Python C/API module. Not also that ->>> print foo.__doc__ -shows then -None = foo(l,m,n,a=) -Performance hint: If you call the given foo lots of times from Python then -you don't want to allocate/deallocate the memory in each call. So, it is -then recommended to define a temporary array in Python, for instance ->>> tmp = zeros(n,m,l) ->>> for i in ...: ->>> foo(l,m,n,a=tmp) - -Important! It is not good at all to define - >>> tmp = transpose(zeros(l,m,n)) -because tmp will be then a noncontiguous array and there will be a -huge performance hit as in Python C/API a new array will be allocated and -also a copying of arrays will be performed elementwise! -But - >>> tmp = asarray(transpose(zeros(l,m,n))) -is still ok. - -I hope that the above answers lots of your (possible) questions about -wrapping Fortran multidimensional arrays with f2py2e. - -Regards, - Pearu diff -Nru python-numpy-1.13.3/doc/f2py/notes.tex python-numpy-1.14.5/doc/f2py/notes.tex --- python-numpy-1.13.3/doc/f2py/notes.tex 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/notes.tex 1970-01-01 00:00:00.000000000 +0000 @@ -1,310 +0,0 @@ - -\section{Calling wrapper functions from Python} -\label{sec:notes} - -\subsection{Scalar arguments} -\label{sec:scalars} - -In general, for scalar argument you can pass in in -addition to ordinary Python scalars (like integers, floats, complex -values) also arbitrary sequence objects (lists, arrays, strings) --- -then the first element of a sequence is passed in to the Fortran routine. - -It is recommended that you always pass in scalars of required type. This -ensures the correctness as no type-casting is needed. -However, no exception is raised if type-casting would produce -inaccurate or incorrect results! For example, in place of an expected -complex value you can give an integer, or vice-versa (in the latter case only -a rounded real part of the complex value will be used). - -If the argument is \texttt{intent(inout)} then Fortran routine can change the -value ``in place'' only if you pass in a sequence object, for -instance, rank-0 array. Also make sure that the type of an array is of -correct type. Otherwise type-casting will be performed and you may -get inaccurate or incorrect results. The following example illustrates this -\begin{verbatim} ->>> a = array(0) ->>> calculate_pi(a) ->>> print a -3 -\end{verbatim} - -If you pass in an ordinary Python scalar in place of -\texttt{intent(inout)} variable, it will be used as an input argument -since -Python -scalars cannot not be changed ``in place'' (all Python scalars -are immutable objects). - -\subsection{String arguments} -\label{sec:strings} - -You can pass in strings of arbitrary length. If the length is greater than -required, only a required part of the string is used. If the length -is smaller than required, additional memory is allocated and fulfilled -with `\texttt{\bs0}'s. - -Because Python strings are immutable, \texttt{intent(inout)} argument -expects an array version of a string --- an array of chars: -\texttt{array("")}. -Otherwise, the change ``in place'' has no effect. - - -\subsection{Array arguments} -\label{sec:arrays} - -If the size of an array is relatively large, it is \emph{highly - recommended} that you pass in arrays of required type. Otherwise, -type-casting will be performed which includes the creation of new -arrays and their copying. If the argument is also -\texttt{intent(inout)}, the wasted time is doubled. So, pass in arrays -of required type! - -On the other hand, there are situations where it is perfectly all -right to ignore this recommendation: if the size of an array is -relatively small or the actual time spent in Fortran routine takes -much longer than copying an array. Anyway, if you want to optimize -your Python code, start using arrays of required types. - -Another source of performance hit is when you use non-contiguous -arrays. The performance hit will be exactly the same as when using -incorrect array types. This is because a contiguous copy is created -to be passed in to the Fortran routine. - -\fpy provides a feature such that the ranks of array arguments need -not to match --- only the correct total size matters. For example, if -the wrapper function expects a rank-1 array \texttt{array([...])}, -then it is correct to pass in rank-2 (or higher) arrays -\texttt{array([[...],...,[...]])} assuming that the sizes will match. -This is especially useful when the arrays should contain only one -element (size is 1). Then you can pass in arrays \texttt{array(0)}, -\texttt{array([0])}, \texttt{array([[0]])}, etc and all cases are -handled correctly. In this case it is correct to pass in a Python -scalar in place of an array (but then ``change in place'' is ignored, -of course). - -\subsubsection{Multidimensional arrays} - -If you are using rank-2 or higher rank arrays, you must always -remember that indexing in Fortran starts from the lowest dimension -while in Python (and in C) the indexing starts from the highest -dimension (though some compilers have switches to change this). As a -result, if you pass in a 2-dimensional array then the Fortran routine -sees it as the transposed version of the array (in multi-dimensional -case the indexes are reversed). - -You must take this matter into account also when modifying the -signature file and interpreting the generated Python signatures: - -\begin{itemize} -\item First, when initializing an array using \texttt{init\_expr}, the index -vector \texttt{\_i[]} changes accordingly to Fortran convention. -\item Second, the result of CPP-macro \texttt{shape(,0)} - corresponds to the last dimension of the Fortran array, etc. -\end{itemize} -Let me illustrate this with the following example:\\ -\begin{verbatim} -! Fortran file: arr.f - subroutine arr(l,m,n,a) - integer l,m,n - real*8 a(l,m,n) - ... - end -\end{verbatim} -\fpy will generate the following signature file:\\ -\begin{verbatim} -!%f90 -! Signature file: arr.f90 -python module arr ! in - interface ! in :arr - subroutine arr(l,m,n,a) ! in :arr:arr.f - integer optional,check(shape(a,2)==l),depend(a) :: l=shape(a,2) - integer optional,check(shape(a,1)==m),depend(a) :: m=shape(a,1) - integer optional,check(shape(a,0)==n),depend(a) :: n=shape(a,0) - real*8 dimension(l,m,n) :: a - end subroutine arr - end interface -end python module arr -\end{verbatim} -and the following wrapper function will be produced -\begin{verbatim} -None = arr(a,l=shape(a,2),m=shape(a,1),n=shape(a,0)) -\end{verbatim} - -In general, I would suggest not to specify the given optional -variables \texttt{l,m,n} when calling the wrapper function --- let the -interface find the values of the variables \texttt{l,m,n}. But there -are occasions when you need to specify the dimensions in Python. - -So, in Python a proper way to create an array from the given -dimensions is -\begin{verbatim} ->>> a = zeros(n,m,l,'d') -\end{verbatim} -(note that the dimensions are reversed and correct type is specified), -and then a complete call to \texttt{arr} is -\begin{verbatim} ->>> arr(a,l,m,n) -\end{verbatim} - -From the performance point of view, always be consistent with Fortran -indexing convention, that is, use transposed arrays. But if you do the -following -\begin{verbatim} ->>> a = transpose(zeros(l,m,n,'d')) ->>> arr(a) -\end{verbatim} -then you will get a performance hit! The reason is that here the -transposition is not actually performed. Instead, the array \texttt{a} -will be non-contiguous which means that before calling a Fortran -routine, internally a contiguous array is created which -includes memory allocation and copying. In addition, if -the argument array is also \texttt{intent(inout)}, the results are -copied back to the initial array which doubles the -performance hit! - -So, to improve the performance: always pass in -arrays that are contiguous. - -\subsubsection{Work arrays} - -Often Fortran routines use the so-called work arrays. The -corresponding arguments can be declared as optional arguments, but be -sure that all dimensions are specified (bounded) and defined before -the initialization (dependence relations). - -On the other hand, if you call the Fortran routine many times then you -don't want to allocate/deallocate the memory of the work arrays on -every call. In this case it is recommended that you create temporary -arrays with proper sizes in Python and use them as work arrays. But be -careful when specifying the required type and be sure that the -temporary arrays are contiguous. Otherwise the performance hit would -be even harder than the hit when not using the temporary arrays from -Python! - - - -\subsection{Call-back arguments} -\label{sec:cbargs} - -\fpy builds a very flexible call-back mechanisms for call-back -arguments. If the wrapper function expects a call-back function \texttt{fun} -with the following Python signature to be passed in -\begin{verbatim} -def fun(a_1,...,a_n): - ... - return x_1,...,x_k -\end{verbatim} -but the user passes in a function \texttt{gun} with the signature -\begin{verbatim} -def gun(b_1,...,b_m): - ... - return y_1,...,y_l -\end{verbatim} -and the following extra arguments (specified as additional optional -argument for the wrapper function): -\begin{verbatim} -fun_extra_args = (e_1,...,e_p) -\end{verbatim} -then the actual call-back is constructed accordingly to the following rules: -\begin{itemize} -\item if \texttt{p==0} then \texttt{gun(a\_1,...,a\_q)}, where - \texttt{q=min(m,n)}; -\item if \texttt{n+p<=m} then \texttt{gun(a\_1,...,a\_n,e\_1,...,e\_p)}; -\item if \texttt{p<=mm} then \texttt{gun(e\_1,...,e\_m)}; -\item if \texttt{n+p} is less than the number of required arguments - of the function \texttt{gun}, an exception is raised. -\end{itemize} - -A call-back function \texttt{gun} may return any number of objects as a tuple: -if \texttt{kl}, then only objects \texttt{x\_1,...,x\_l} are set. - - -\subsection{Obtaining information on wrapper functions} -\label{sec:info} - -From the previous sections we learned that it is useful for the -performance to pass in arguments of expected type, if possible. To -know what are the expected types, \fpy generates a complete -documentation strings for all wrapper functions. You can read them -from Python by printing out \texttt{\_\_doc\_\_} attributes of the -wrapper functions. For the example in Sec.~\ref{sec:intro}: -\begin{verbatim} ->>> print foobar.foo.__doc__ -Function signature: - foo(a) -Required arguments: - a : in/output rank-0 array(int,'i') ->>> print foobar.bar.__doc__ -Function signature: - bar = bar(a,b) -Required arguments: - a : input int - b : input int -Return objects: - bar : int -\end{verbatim} - -In addition, \fpy generates a LaTeX document -(\texttt{module.tex}) containing a bit more information on -the wrapper functions. See for example Appendix that contains a result -of the documentation generation for the example module -\texttt{foobar}. Here the file \texttt{foobar-smart.f90} (modified -version of \texttt{foobar.f90}) is used --- it contains -\texttt{note()} attributes for specifying some additional -information. - -\subsection{Wrappers for common blocks} -\label{sec:wrapcomblock} - -[See examples \texttt{test-site/e/runme*}] - -What follows is obsolute for \fpy version higher that 2.264. - -\fpy generates wrapper functions for common blocks. For every common -block with a name \texttt{} a function -\texttt{get\_()} is constructed that takes no arguments -and returns a dictionary. The dictionary represents maps between the -names of common block fields and the arrays containing the common -block fields (multi-dimensional arrays are transposed). So, in order -to access to the common block fields, you must first obtain the -references -\begin{verbatim} -commonblock = get_() -\end{verbatim} -and then the fields are available through the arrays -\texttt{commonblock[""]}. -To change the values of common block fields, you can use for scalars -\begin{verbatim} -commonblock[""][0] = -\end{verbatim} -and for arrays -\begin{verbatim} -commonblock[""][:] = -\end{verbatim} -for example. - -For more information on the particular common block wrapping, see -\texttt{get\_.\_\_doc\_\_}. - -\subsection{Wrappers for F90/95 module data and routines} -\label{sec:wrapf90modules} - -[See example \texttt{test-site/mod/runme\_mod}] - -\subsection{Examples} -\label{sec:examples} - -Examples on various aspects of wrapping Fortran routines to Python can -be found in directories \texttt{test-site/d/} and -\texttt{test-site/e/}: study the shell scripts \texttt{runme\_*}. See -also files in \texttt{doc/ex1/}. - - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "f2py2e" -%%% End: diff -Nru python-numpy-1.13.3/doc/f2py/oldnews.html python-numpy-1.14.5/doc/f2py/oldnews.html --- python-numpy-1.13.3/doc/f2py/oldnews.html 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/oldnews.html 1970-01-01 00:00:00.000000000 +0000 @@ -1,121 +0,0 @@ - - - - - - -F2PY - Fortran to Python Interface Generator - - - - -

F2PY old news.

- -
-
February 23, 2002 -
Fixed a bug of incorrect shapes of multi-dimensional arrays - when returning from Fortran routine (thanks to Eric for pointing - this out). - F2PY_REPORT_ATEXIT is disabled by default under Win32. -
February 14, 2002 -
Introduced callprotoargument statement so that - proper prototypes can be specified (this fixes SEGFAULTs when - wrapping C functions with f2py, see NEWS.txt for more details). Updated for the - latest numpy_distutils. Fixed few bugs. -
February 3, 2002 -
Introduced intent(overwrite),intent(out=name) - attributes, callstatement C-expr; statement, and - reviewed reference counting in callback mechanism. Fixed bugs. -
January 18, 2002 -
Introduced extra keyword argument copy_#varname#=1 - for intent(copy) variables, - -DF2PY_REPORT_ATEXIT for reporting f2py - performance, - has_column_major_storage member function for generated - modules, and dmalloc support. -
January 16, 2002 -
BREAKING NEWS! Solved long lasted dilemma of wrapping - multi-dimensional arrays where different - storage orders in C and Fortran come into account. From now on - this difference is dealt automatically by the f2py generated - module and in a very efficient way. For example, the corresponding - element A(i,j) of a Fortran array can be accessed in Python as - A[i,j]. -
January 13, 2002 -
Fifth Public Release is coming soon..., a snapshot is available - for download, now with updates. -
December 17, 2001 -
Fourth Public Release: Win32 support. -
Making f2py2e a module. Currently it has only one - member function run_main(comline_list). -
Removed command line arguments -fix,-f90,-f77 - and introduced many new ones. See NEWS.txt. -
intent(..) statement with empty name list defines - default intent(..) attribute for all routine arguments. -
Refinements in Win32 support. Eric Jones has provided a f2py - HOWTO for Windows users. See win32_notes.txt. -
Major rewrote of the code generator to achieve - a higher quality of generated C/API modules (-Wall messages are - considerably reduced, especially for callback functions). -
Many bugs were fixed. -
December 12, 2001 -
Win32 support (thanks to Eric Jones and Tiffany Kamm). Minor - cleanups and fixes. -
December 4, 2001 -
Third Public Release: f2py supports distutils. It can be - installed with one and it generates setup_modulename.py - to be used for building Python extension modules. -
Introduced threadsafe, fortranname, - and intent(c) statements. -
August 13, 2001 -
Changed the name FPIG to F2PY for avoiding confusion with project names. -
Updated f2py for use with Numeric version 20.x. -
January 12, 2001 -
Example usages of PyFortranObject. - Fixed bugs. Updated the - Python 9 Conference paper (F2PY paper). -
December 9, 2000 -
Implemented support for PARAMETER statement. -
November 6, 2000 -
Submitted a paper for 9th Python Conference (accepted). It is available in html, PDF, - and Gzipped PS formats. -
September 17, 2000 -
Support for F90/95 module data and routines. COMMON block - wrapping is rewritten. New signature file syntax: - pythonmodule. Signature files generated with - f2py-2.264 or earlier, are incompatible (need replacement - module with - pythonmodule). -
September 12, 2000 -
The second public release of f2py is out. See Release notes. -
September 11, 2000 -
Now f2py supports wrapping Fortran 90/95 module routines - (support for F90/95 module data coming soon) -
June 12, 2000 -
Now f2py has a mailing list f2py-users open for discussion. - -
- - - -
-
-Valid HTML 4.0! -Pearu Peterson -<pearu(at)ioc.ee>
- -Last modified: Mon Dec 3 19:40:26 EET 2001 - -
- - - - - - - - diff -Nru python-numpy-1.13.3/doc/f2py/OLDNEWS.txt python-numpy-1.14.5/doc/f2py/OLDNEWS.txt --- python-numpy-1.13.3/doc/f2py/OLDNEWS.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/OLDNEWS.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ - -.. topic:: Old F2PY NEWS - - January 30, 2005 - - Latest F2PY release (version 2.45.241_1926). - New features: wrapping unsigned integers, support for ``.pyf.src`` template files, - callback arguments can now be CObjects, fortran objects, built-in functions. - Introduced ``intent(aux)`` attribute. Wrapped objects have ``_cpointer`` - attribute holding C pointer to wrapped functions or variables. - Many bug fixes and improvements, updated documentation. - `Differences with the previous release (version 2.43.239_1831)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.163&r2=1.137&f=h - - October 4, 2004 - F2PY bug fix release (version 2.43.239_1831). - Better support for 64-bit platforms. - Introduced ``--help-link`` and ``--link-`` options. - Bug fixes. - `Differences with the previous release (version 2.43.239_1806)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.137&r2=1.131&f=h - - September 25, 2004 - Latest F2PY release (version 2.43.239_1806). - Support for ``ENTRY`` statement. New attributes: - ``intent(inplace)``, ``intent(callback)``. Supports Numarray 1.1. - Introduced ``-*- fix -*-`` header content. Improved ``PARAMETER`` support. - Documentation updates. `Differences with the previous release - (version 2.39.235-1693)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.131&r2=1.98&f=h - - March 30, 2004 - F2PY bug fix release (version 2.39.235-1693). Two new command line switches: - ``--compiler`` and ``--include_paths``. Support for allocatable string arrays. - Callback arguments may now be arbitrary callable objects. Win32 installers - for F2PY and Scipy_core are provided. - `Differences with the previous release (version 2.37.235-1660)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.98&r2=1.87&f=h - - March 9, 2004 - F2PY bug fix release (version 2.39.235-1660). - `Differences with the previous release (version 2.37.235-1644)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.87&r2=1.83&f=h - - February 24, 2004 - Latest F2PY release (version 2.39.235-1644). - Support for numpy_distutils 0.2.2 and up (e.g. compiler flags can be - changed via f2py command line options). Implemented support for - character arrays and arrays of strings (e.g. ``character*(*) a(m,..)``). - *Important bug fixes regarding complex arguments, upgrading is - highly recommended*. Documentation updates. - `Differences with the previous release (version 2.37.233-1545)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.83&r2=1.58&f=h - - September 11, 2003 - Latest F2PY release (version 2.37.233-1545). - New statements: ``pymethoddef`` and ``usercode`` in interface blocks. - New function: ``as_column_major_storage``. - New CPP macro: ``F2PY_REPORT_ON_ARRAY_COPY``. - Bug fixes. - `Differences with the previous release (version 2.35.229-1505)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.58&r2=1.49&f=h - - August 2, 2003 - Latest F2PY release (version 2.35.229-1505). - `Differences with the previous release (version 2.32.225-1419)`__. - - __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.49&r2=1.28&f=h - - April 2, 2003 - Initial support for Numarray_ (thanks to Todd Miller). - - December 8, 2002 - Sixth public release of F2PY (version 2.32.225-1419). Comes with - revised `F2PY Users Guide`__, `new testing site`__, lots of fixes - and other improvements, see `HISTORY.txt`_ for details. - - __ usersguide/index.html - __ TESTING.txt_ - -.. References - ========== - -.. _HISTORY.txt: HISTORY.html -.. _Numarray: http://www.stsci.edu/resources/software_hardware/numarray -.. _TESTING.txt: TESTING.html diff -Nru python-numpy-1.13.3/doc/f2py/options.tex python-numpy-1.14.5/doc/f2py/options.tex --- python-numpy-1.13.3/doc/f2py/options.tex 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/options.tex 1970-01-01 00:00:00.000000000 +0000 @@ -1,63 +0,0 @@ - -\section{\fpy command line options} -\label{sec:opts} - -\fpy has the following command line syntax (run \fpy without arguments -to get up to date options!!!): -\begin{verbatim} -f2py [] [[[only:]||[skip:]] ]\ - [: ...] -\end{verbatim} -where -\begin{description} -\item[\texttt{}] --- the following options are available: - \begin{description} - \item[\texttt{-f77}] --- \texttt{} are in Fortran~77 - fixed format (default). - \item[\texttt{-f90}] --- \texttt{} are in - Fortran~90/95 free format (default for signature files). - \item[\texttt{-fix}] --- \texttt{} are in - Fortran~90/95 fixed format. - \item[\texttt{-h }] --- after scanning the - \texttt{} write the signatures of Fortran routines - to file \texttt{} and exit. If \texttt{} - exists, \fpy quits without overwriting the file. Use - \texttt{-{}-overwrite-signature} to overwrite. - \item[\texttt{-m }] --- specify the name of the module - when scanning Fortran~77 codes for the first time. \fpy will - generate Python C/API module source \texttt{module.c}. - \item[\texttt{-{}-lower/-{}-no-lower}] --- lower/do not lower the cases - when scanning the \texttt{}. Default when - \texttt{-h} flag is specified/unspecified (that is for Fortran~77 - codes/signature files). - \item[\texttt{-{}-short-latex}] --- use this flag when you want to - include the generated LaTeX document to another LaTeX document. - \item[\texttt{-{}-debug-capi}] --- create a very verbose C/API - code. Useful for debbuging. -% \item[\texttt{-{}-h-force}] --- if \texttt{-h } is used then -% overwrite the file \texttt{} (if it exists) and continue -% with constructing the C/API module source. - \item[\texttt{-makefile }] --- run \fpy without arguments - for more information. - \item[\texttt{-{}-use-libs}] --- see \texttt{-makefile}. - \item[\texttt{-{}-overwrite-makefile}] --- overwrite existing - \texttt{Makefile-}. - \item[\texttt{-v}] --- print \fpy version number and exit. - \item[\texttt{-pyinc}] --- print Python include path and exit. - \end{description} -\item[\texttt{}] --- are the paths to Fortran files or - to signature files that will be scanned for \texttt{} in order to determine their signatures. -\item[\texttt{}] --- are the names of Fortran - routines for which Python C/API wrapper functions will be generated. - Default is all that are found in \texttt{}. -\item[\texttt{only:}/\texttt{skip:}] --- are flags for filtering - in/out the names of fortran routines to be wrapped. Run \fpy without - arguments for more information about the usage of these flags. -\end{description} - - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "f2py2e" -%%% End: diff -Nru python-numpy-1.13.3/doc/f2py/pyforttest.pyf python-numpy-1.14.5/doc/f2py/pyforttest.pyf --- python-numpy-1.13.3/doc/f2py/pyforttest.pyf 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/pyforttest.pyf 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -subroutine foo(a,m,n) -integer m = size(a,1) -integer n = size(a,2) -real, intent(inout) :: a(m,n) -end subroutine foo diff -Nru python-numpy-1.13.3/doc/f2py/pytest.py python-numpy-1.14.5/doc/f2py/pytest.py --- python-numpy-1.13.3/doc/f2py/pytest.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/pytest.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -from __future__ import division, absolute_import, print_function - -#File: pytest.py -import Numeric -def foo(a): - a = Numeric.array(a) - m, n = a.shape - for i in range(m): - for j in range(n): - a[i, j] = a[i, j] + 10*(i+1) + (j+1) - return a -#eof diff -Nru python-numpy-1.13.3/doc/f2py/python9.tex python-numpy-1.14.5/doc/f2py/python9.tex --- python-numpy-1.13.3/doc/f2py/python9.tex 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/python9.tex 1970-01-01 00:00:00.000000000 +0000 @@ -1,1044 +0,0 @@ -\documentclass[twocolumn]{article} -\usepackage{epsfig} -\usepackage{xspace} -\usepackage{verbatim} - - -\headsep=0pt -\topmargin=0pt -\headheight=0pt -\oddsidemargin=0pt -\textwidth=6.5in -\textheight=9in -%%tth:\newcommand{\xspace}{ } -\newcommand{\fpy}{\texttt{f2py}\xspace} -\newcommand{\bs}{\symbol{`\\}} -% need bs here: -%%tth:\newcommand{\bs}{\texttt{}} - -\newcommand{\tthhide}[1]{#1} -\newcommand{\latexhide}[1]{} -%%tth:\newcommand{\tthhide}[1]{} -%%tth:\newcommand{\latexhide}[1]{#1} - -\newcommand{\shell}[1]{ -\latexhide{ - \special{html: -
-
-sh> #1
-
-
} -} -\tthhide{ - \\[1ex] - \hspace*{1em} - \texttt{sh> \begin{minipage}[t]{0.8\textwidth}#1\end{minipage}}\\[1ex] -} -} - -\newcommand{\email}[1]{\special{html:}\texttt{<#1>}\special{html:}} -\newcommand{\wwwsite}[1]{\special{html:}{#1}\special{html:}} -\title{Fortran to Python Interface Generator with -an Application to Aerospace Engineering} -\author{ -\large Pearu Peterson\\ -\small \email{pearu@cens.ioc.ee}\\ -\small Center of Nonlinear Studies\\ -\small Institute of Cybernetics at TTU\\ -\small Akadeemia Rd 21, 12618 Tallinn, ESTONIA\\[2ex] -\large Joaquim R. R. A. Martins and Juan J. Alonso\\ -\small \email{joaquim.martins@stanford.edu}, \email{jjalonso@stanford.edu}\\ -\small Department of Aeronautics and Astronautics\\ -\small Stanford University, CA -} -\date{$Revision: 1.17 $\\\today} -\begin{document} - -\maketitle - -\special{html: Other formats of this document: -Gzipped PS, -PDF -} - -\begin{abstract} - FPIG --- Fortran to Python Interface Generator --- is a tool for - generating Python C/API extension modules that interface - Fortran~77/90/95 codes with Python. This tool automates the process - of interface generation by scanning the Fortran source code to - determine the signatures of Fortran routines and creating a - Python C/API module that contains the corresponding interface - functions. FPIG also attempts to find dependence relations between - the arguments of a Fortran routine call (e.g. an array and its - dimensions) and constructs interface functions with potentially - fewer arguments. The tool is extremely flexible since the user has - control over the generation process of the interface by specifying the - desired function signatures. The home page for FPIG can be found at - \wwwsite{http://cens.ioc.ee/projects/f2py2e/}. - - FPIG has been used successfully to wrap a large number of Fortran - programs and libraries. Advances in computational science have led - to large improvements in the modeling of physical systems which are - often a result of the coupling of a variety of physical models that - were typically run in isolation. Since a majority of the available - physical models have been previously written in Fortran, the - importance of FPIG in accomplishing these couplings cannot be - understated. In this paper, we present an application of FPIG to - create an object-oriented framework for aero-structural analysis and - design of aircraft. -\end{abstract} - -%%tth: -\tableofcontents - -\section{Preface} -\label{sec:preface} - -The use of high-performance computing has made it possible to tackle -many important problems and discover new physical phenomena in science -and engineering. These accomplishments would not have been achieved -without the computer's ability to process large amounts of data in a -reasonably short time. It can safely be said that the computer has -become an essential tool for scientists and engineers. However, the -diversity of problems in science and engineering has left its mark as -computer programs have been developed in different programming -languages, including languages developed to describe certain specific -classes of problems. - -In interdisciplinary fields it is not uncommon for scientists and -engineers to face problems that have already been solved in a -different programming environment from the one they are familiar with. -Unfortunately, researchers may not have the time or willingness to -learn a new programming language and typically end up developing the -corresponding tools in the language that they normally use. This -approach to the development of new software can substantially impact -the time to develop and the quality of the resulting product: firstly, -it usually takes longer to develop and test a new tool than to learn a -new programming environment, and secondly it is very unlikely that a -non-specialist in a given field can produce a program that is more -efficient than more established tools. - -To avoid situations such as the one described above, one alternative -would be to provide automatic or semi-automatic interfaces between programming -languages. Another possibility would be to provide language -translators, but these obviously require more work than interface -generators --- a translator must understand all language constructs -while an interface generator only needs to understand a subset of these -constructs. With an automatic interface between two languages, scientists or -engineers can effectively use programs written in other programming -languages without ever having to learn them. - -Although it is clear that it is impossible to interface arbitrary programming -languages with each other, there is no reason for doing so. Low-level languages such as C and Fortran are well known for -their speed and are therefore suitable for applications where -performance is critical. High-level scripting languages, on the other -hand, are generally slower but much easier to learn and use, -especially when performing interactive analysis. Therefore, it makes -sense to create interfaces only in one direction: from lower-level -languages to higher-level languages. - -In an ideal world, scientists and engineers would use higher-level -languages for the manipulation of the mathematical formulas in a problem -rather than having to struggle with tedious programming details. For tasks -that are computationally demanding, they would use interfaces to -high-performance routines that are written in a lower-level language -optimized for execution speed. - - -\section{Introduction} -\label{sec:intro} - -This paper presents a tool that has been developed for the creation of -interfaces between Fortran and Python. - - -The Fortran language is popular in -scientific computing, and is used mostly in applications that use -extensive matrix manipulations (e.g. linear algebra). Since Fortran - has been the standard language among scientists and engineers for - at least three decades, there is a large number of legacy codes available that - perform a variety of tasks using very sophisticated algorithms (see -e.g. \cite{netlib}). - -The Python language \cite{python}, on the other hand, is a relatively -new programming language. It is a very high-level scripting language -that supports object-oriented programming. What makes Python -especially appealing is its very clear and natural syntax, which makes it -easy to learn and use. With Python one can implement relatively -complicated algorithms and tasks in a short time with very compact -source code. - -Although there are ongoing projects for extending Python's usage in -scientific computation, it lacks reliable tools that are common in -scientific and engineering such as ODE integrators, equation solvers, -tools for FEM, etc. The implementation of all of these tools in Python -would be not only too time-consuming but also inefficient. On the -other hand, these tools are already developed in other, -computationally more efficient languages such as Fortran or C. -Therefore, the perfect role for Python in the context of scientific -computing would be that of a ``gluing'' language. That is, the role -of providing high-level interfaces to C, C++ and Fortran libraries. - -There are a number of widely-used tools that can be used for interfacing -software libraries to Python. For binding C libraries with various -scripting languages, including Python, the tool most often used is -SWIG \cite{swig}. Wrapping Fortran routines with Python is less -popular, mainly because there are many platform and compiler-specific -issues that need to be addressed. Nevertheless, there is great -interest in interfacing Fortran libraries because they provide -invaluable tools for scientific computing. At LLNL, for example, a tool -called PyFort has been developed for connecting Fortran and -Python~\cite{pyfort}. - -The tools mentioned above require an input file describing signatures -of functions to be interfaced. To create these input files, one needs -to have a good knowledge of either C or Fortran. In addition, -binding libraries that have thousands of routines can certainly constitute a -very tedious task, even with these tools. - -The tool that is introduced in this paper, FPIG (Fortran to Python -Interface Generator)~\cite{fpig}, automatically generates interfaces -between Fortran and Python. It is different from the tools mentioned -above in that FPIG can create signature files automatically by -scanning the source code of the libraries and then construct Python -C/API extension modules. Note that the user need not be experienced -in C or even Fortran. In addition, FPIG is designed to wrap large -Fortran libraries containing many routines with only one or two -commands. This process is very flexible since one can always modify -the generated signature files to insert additional attributes in order -to achieve more sophisticated interface functions such as taking care -of optional arguments, predicting the sizes of array arguments and -performing various checks on the correctness of the input arguments. - -The organization of this paper is as follows. First, a simple example -of FPIG usage is given. Then FPIG's basic features are described and -solutions to platform and compiler specific issues are discussed. -Unsolved problems and future work on FPIG's development are also -addressed. Finally, an application to a large aero-structural solver -is presented as real-world example of FPIG's usage. - -\section{Getting Started} -\label{sec:getstart} - -To get acquainted with FPIG, let us consider the simple Fortran~77 -subroutine shown in Fig. \ref{fig:exp1.f}. -\begin{figure}[htb] - \latexhide{\label{fig:exp1.f}} - \special{html:
} - \verbatiminput{examples/exp1.f} - \special{html:
} - \caption{Example Fortran code \texttt{exp1.f}. This routine calculates - the simplest rational lower and upper approximations to $e$ (for - details of - the algorithm see \cite{graham-etal}, p.122)} - \tthhide{\label{fig:exp1.f}} -\end{figure} -In the sections that follow, two ways of creating interfaces to this -Fortran subroutine are described. The first and simplest way is -suitable for Fortran codes that are developed in connection with \fpy. -The second and not much more difficult method, is suitable for -interfacing existing Fortran libraries which might have been developed -by other programmers. - -Numerical Python~\cite{numpy} is needed in order to compile extension -modules generated by FPIG. - -\subsection{Interfacing Simple Routines} -\label{sec:example1} - -In order to call the Fortran routine \texttt{exp1} from Python, let us -create an interface to it by using \fpy (FPIG's front-end program). In -order to do this, we issue the following command, \shell{f2py -m foo -exp1.f} where the option \texttt{-m foo} sets the name of the Python -C/API extension module that \fpy will create to -\texttt{foo}. To learn more about the \fpy command line options, run \fpy -without arguments. - -The output messages in Fig. \ref{fig:f2pyoutmess} -illustrate the procedure followed by \fpy: - (i) it scans the Fortran source code specified in the command line, - (ii) it analyses and determines the routine signatures, - (iii) it constructs the corresponding Python C/API extension modules, - (iv) it writes documentation to a LaTeX file, and - (v) it creates a GNU Makefile for building the shared modules. -\begin{figure}[htb] - \latexhide{\label{fig:f2pyoutmess}} - \special{html:
} - {\tthhide{\small} - \verbatiminput{examples/exp1mess.txt} - } - \special{html:
} - \caption{Output messages of \texttt{f2py -m foo exp1.f}.} - \tthhide{\label{fig:f2pyoutmess}} -\end{figure} - -Now we can build the \texttt{foo} module: -\shell{make -f Makefile-foo} - -Figure \ref{fig:exp1session} illustrates a sample session for - calling the Fortran routine \texttt{exp1} from Python. -\begin{figure}[htb] - \latexhide{\label{fig:exp1session}} - \special{html:
} - \verbatiminput{examples/exp1session.txt} - \special{html:
} - \caption{Calling Fortran routine \texttt{exp1} from Python. Here - \texttt{l[0]/l[1]} gives an estimate to $e$ with absolute error - less than \texttt{u[0]/u[1]-l[0]/l[1]} (this value may depend on - the platform and compiler used).} - \tthhide{\label{fig:exp1session}} -\end{figure} - -Note the difference between the signatures of the Fortran routine -\texttt{exp1(l,u,n)} and the corresponding wrapper function -\texttt{l,u=exp1([n])}. Clearly, the later is more informative to -the user: \texttt{exp1} takes one optional argument \texttt{n} and it -returns \texttt{l}, \texttt{u}. This exchange of signatures is -achieved by special comment lines (starting with \texttt{Cf2py}) in -the Fortran source code --- these lines are interpreted by \fpy as -normal Fortran code. Therefore, in the given example the line \texttt{Cf2py - integer*4 :: n = 1} informs \fpy that the variable \texttt{n} is -optional with a default value equal to one. The line \texttt{Cf2py - intent(out) l,u} informs \fpy that the variables \texttt{l,u} are to be -returned to Python after calling Fortran function \texttt{exp1}. - -\subsection{Interfacing Libraries} -\label{sec:example2} - -In our example the Fortran source \texttt{exp1.f} contains \fpy -specific information, though only as comments. When interfacing -libraries from other parties, it is not recommended to modify their -source. Instead, one should use a special auxiliary file to collect -the signatures of all Fortran routines and insert \fpy specific -declaration and attribute statements in that file. This auxiliary file -is called a \emph{signature file} and is identified by the extension -\texttt{.pyf}. - -We can use \fpy to generate these signature files by using the -\texttt{-h .pyf} option. -In our example, \fpy could have been called as follows, -\shell{f2py -m foo -h foo.pyf exp1.f} -where the option \texttt{-h foo.pyf} requests \fpy to read the -routine signatures, save them to the file \texttt{foo.pyf}, and then -exit. -If \texttt{exp1.f} in Fig.~\ref{fig:exp1.f} were to -contain no lines starting with \texttt{Cf2py}, the corresponding -signature file \texttt{foo.pyf} would be as shown in Fig.~\ref{fig:foo.pyf}. -In order to obtain the exchanged and more convenient signature -\texttt{l,u=foo.exp1([n])}, we would edit \texttt{foo.pyf} as shown in -Fig.~\ref{fig:foom.pyf}. -The Python C/API extension module \texttt{foo} can be constructed by -applying \fpy to the signature file with the following command: -\shell{f2py foo.pyf} -The procedure for building the corresponding shared module and using -it in Python is identical to the one described in the previous section. - -\begin{figure}[htb] - \latexhide{\label{fig:foo.pyf}} - \special{html:
} - \verbatiminput{examples/foo.pyf} - \special{html:
} - \caption{Raw signature file \texttt{foo.pyf} generated with - \texttt{f2py -m foo -h foo.pyf exp1.f}} - \tthhide{\label{fig:foo.pyf}} -\end{figure} -\begin{figure}[htb] - \latexhide{\label{fig:foom.pyf}} - \special{html:
} - \verbatiminput{examples/foom.pyf} - \special{html:
} - \caption{Modified signature file \texttt{foo.pyf}} - \tthhide{\label{fig:foom.pyf}} -\end{figure} - -As we can see, the syntax of the signature file is an -extension of the Fortran~90/95 syntax. This means that only a few new -constructs are introduced for \fpy in addition to all standard Fortran -constructs; signature files can even be written in fixed form. A -complete set of constructs that are used when creating interfaces, is -described in the \fpy User's Guide \cite{f2py-ug}. - - -\section{Basic Features} -\label{sec:features} - -In this section a short overview of \fpy features is given. -\begin{enumerate} -\item All basic Fortran types are supported. They include -the following type specifications: -\begin{verbatim} -integer[ | *1 | *2 | *4 | *8 ] -logical[ | *1 | *2 | *4 | *8 ] -real[ | *4 | *8 | *16 ] -complex[ | *8 | *16 | *32 ] -double precision, double complex -character[ |*(*)|*1|*2|*3|...] -\end{verbatim} -In addition, they can all be in the kind-selector form -(e.g. \texttt{real(kind=8)}) or char-selector form -(e.g. \texttt{character(len=5)}). -\item Arrays of all basic types are supported. Dimension - specifications can be of form \texttt{} or - \texttt{:}. In addition, \texttt{*} and \texttt{:} - dimension specifications can be used for input arrays. - Dimension specifications may contain also \texttt{PARAMETER}'s. -\item The following attributes are supported: - \begin{itemize} - \item - \texttt{intent(in)}: used for input-only arguments. - \item - \texttt{intent(inout)}: used for arguments that are changed in - place. - \item - \texttt{intent(out)}: used for return arguments. - \item - \texttt{intent(hide)}: used for arguments to be removed from - the signature of the Python function. - \item - \texttt{intent(in,out)}, \texttt{intent(inout,out)}: used for - arguments with combined behavior. - \item - \texttt{dimension()} - \item - \texttt{depend([])}: used - for arguments that depend on other arguments in \texttt{}. - \item - \texttt{check([])}: used for checking the - correctness of input arguments. - \item - \texttt{note()}: used for - adding notes to the module documentation. - \item - \texttt{optional}, \texttt{required} - \item - \texttt{external}: used for call-back arguments. - \item - \texttt{allocatable}: used for Fortran 90/95 allocatable arrays. - \end{itemize} -\item Using \fpy one can call arbitrary Fortran~77/90/95 subroutines - and functions from Python, including Fortran 90/95 module routines. -\item Using \fpy one can access data in Fortran~77 COMMON blocks and - variables in Fortran 90/95 modules, including allocatable arrays. -\item Using \fpy one can call Python functions from Fortran (call-back - functions). \fpy supports very flexible hooks for call-back functions. -\item Wrapper functions perform the necessary type conversations for their - arguments resulting in contiguous Numeric arrays that are suitable for - passing to Fortran routines. -\item \fpy generates documentation strings -for \texttt{\_\_doc\_\_} attributes of the wrapper functions automatically. -\item \fpy scans Fortran codes and creates the signature - files. It automatically detects the signatures of call-back functions, - solves argument dependencies, decides the order of initialization of - optional arguments, etc. -\item \fpy automatically generates GNU Makefiles for compiling Fortran - and C codes, and linking them to a shared module. - \fpy detects available Fortran and C compilers. The - supported compilers include the GNU project C Compiler (gcc), Compaq - Fortran, VAST/f90 Fortran, Absoft F77/F90, and MIPSpro 7 Compilers, etc. - \fpy has been tested to work on the following platforms: Intel/Alpha - Linux, HP-UX, IRIX64. -\item Finally, the complete \fpy User's Guide is available in various - formats (ps, pdf, html, dvi). A mailing list, - \email{f2py-users@cens.ioc.ee}, is open for support and feedback. See - the FPIG's home page for more information \cite{fpig}. -\end{enumerate} - - -\section{Implementation Issues} -\label{sec:impl} - -The Fortran to Python interface can be thought of as a three layer -``sandwich'' of different languages: Python, C, and Fortran. This -arrangement has two interfaces: Python-C and C-Fortran. Since Python -itself is written in C, there are no basic difficulties in -implementing the Python-C interface~\cite{python-doc:ext}. The C-Fortran -interface, on the other hand, results in many platform and compiler specific -issues that have to be dealt with. We will now discuss these issues -in some detail and describe how they are solved in FPIG. - -\subsection{Mapping Fortran Types to C Types} -\label{sec:mapF2Ctypes} - -Table \ref{tab:mapf2c} defines how Fortran types are mapped to C types -in \fpy. -\begin{table}[htb] - \begin{center} - \begin{tabular}[c]{l|l} - Fortran type & C type \\\hline - \texttt{integer *1} & \texttt{char}\\ - \texttt{byte} & \texttt{char}\\ - \texttt{integer *2} & \texttt{short}\\ - \texttt{integer[ | *4]} & \texttt{int}\\ - \texttt{integer *8} & \texttt{long long}\\ - \texttt{logical *1} & \texttt{char}\\ - \texttt{logical *2} & \texttt{short}\\ - \texttt{logical[ | *4]} & \texttt{int}\\ - \texttt{logical *8} & \texttt{int}\\ - \texttt{real[ | *4]} & \texttt{float}\\ - \texttt{real *8} & \texttt{double}\\ - \texttt{real *16} & \texttt{long double}\\ - \texttt{complex[ | *8]} & \texttt{struct \{float r,i;\}}\\ - \texttt{complex *16} & \texttt{struct \{double r,i;\}}\\ - \texttt{complex *32} & \texttt{struct \{long double r,i;\}}\\ - \texttt{character[*...]} & \texttt{char *}\\ - \end{tabular} - \caption{Mapping Fortran types to C types.} - \label{tab:mapf2c} - \end{center} -\end{table} -Users may redefine these mappings by creating a \texttt{.f2py\_f2cmap} -file in the working directory. This file should contain a Python -dictionary of dictionaries, e.g. \texttt{\{'real':\{'low':'float'\}\}}, -that informs \fpy to map Fortran type \texttt{real(low)} -to C type \texttt{float} (here \texttt{PARAMETER low = ...}). - - -\subsection{Calling Fortran (Module) Routines} -\label{sec:callrout} - -When mixing Fortran and C codes, one has to know how function names -are mapped to low-level symbols in their object files. Different -compilers may use different conventions for this purpose. For example, gcc -appends the underscore \texttt{\_} to a Fortran routine name. Other -compilers may use upper case names, prepend or append different -symbols to Fortran routine names or both. In any case, if the -low-level symbols corresponding to Fortran routines are valid for the -C language specification, compiler specific issues can be solved by -using CPP macro features. - -Unfortunately, there are Fortran compilers that use symbols in -constructing low-level routine names that are not valid for C. For -example, the (IRIX64) MIPSpro 7 Compilers use `\$' character in the -low-level names of module routines which makes it impossible (at -least directly) to call such routines from C when using the MIPSpro 7 -C Compiler. - -In order to overcome this difficulty, FPIG introduces a unique -solution: instead of using low-level symbols for calling Fortran -module routines from C, the references to such routines are determined -at run-time by using special wrappers. These wrappers are called once -during the initialization of an extension module. They are simple -Fortran subroutines that use a Fortran module and call another C -function with Fortran module routines as arguments in order to save -their references to C global variables that are later used for calling -the corresponding Fortran module routines. This arrangement is -set up as follows. Consider the following Fortran 90 module with the -subroutine \texttt{bar}: -\special{html:
} -\begin{verbatim} -module fun - subroutine bar() - end -end -\end{verbatim} -\special{html:
} -Figure \ref{fig:capi-sketch} illustrates a Python C/API extension -module for accessing the F90 module subroutine \texttt{bar} from Python. -When the Python module \texttt{foo} is loaded, \texttt{finitbar} is -called. \texttt{finitbar} calls \texttt{init\_bar} by passing the -reference of the Fortran 90 module subroutine \texttt{bar} to C where it is -saved to the variable \texttt{bar\_ptr}. Now, when one executes \texttt{foo.bar()} -from Python, \texttt{bar\_ptr} is used in \texttt{bar\_capi} to call -the F90 module subroutine \texttt{bar}. -\begin{figure}[htb] - \latexhide{\label{fig:capi-sketch}} - \special{html:
} -\begin{verbatim} -#include "Python.h" -... -char *bar_ptr; -void init_bar(char *bar) { - bar_ptr = bar; -} -static PyObject * -bar_capi(PyObject *self,PyObject *args) { - ... - (*((void *)bar_ptr))(); - ... -} -static PyMethodDef -foo_module_methods[] = { - {"bar",bar_capi,METH_VARARGS}, - {NULL,NULL} -}; -extern void finitbar_; /* GCC convention */ -void initfoo() { - ... - finitbar_(init_bar); - Py_InitModule("foo",foo_module_methods); - ... -} -\end{verbatim} - \special{html:
} - \caption{Sketch of Python C/API for accessing F90 module subroutine - \texttt{bar}. The Fortran function \texttt{finitbar} is defined in - Fig.~\ref{fig:wrapbar}.} - \tthhide{\label{fig:capi-sketch}} -\end{figure} -\begin{figure}[ht] - \latexhide{\label{fig:wrapbar}} -\special{html:
} -\begin{verbatim} - subroutine finitbar(cinit) - use fun - extern cinit - call cinit(bar) - end -\end{verbatim} -\special{html:
} - \caption{Wrapper for passing the reference of \texttt{bar} to C code.} - \tthhide{\label{fig:wrapbar}} -\end{figure} - -Surprisingly, mixing C code and Fortran modules in this way is as -portable and compiler independent as mixing C and ordinary Fortran~77 -code. - -Note that extension modules generated by \fpy actually use -\texttt{PyFortranObject} that implements above described scheme with -exchanged functionalities (see Section \ref{sec:PFO}). - - -\subsection{Wrapping Fortran Functions} -\label{sec:wrapfunc} - -The Fortran language has two types of routines: subroutines and -functions. When a Fortran function returns a composed type such as -\texttt{COMPLEX} or \texttt{CHARACTER}-array then calling this -function directly from C may not work for all compilers, as C -functions are not supposed to return such references. In order to -avoid this, FPIG constructs an additional Fortran wrapper subroutine -for each such Fortran function. These wrappers call just the -corresponding functions in the Fortran layer and return the result to -C through its first argument. - - -\subsection{Accessing Fortran Data} -\label{sec:accsdata} - -In Fortran one can use \texttt{COMMON} blocks and Fortran module -variables to save data that is accessible from other routines. Using -FPIG, one can also access these data containers from Python. To achieve -this, FPIG uses special wrapper functions (similar to the ones used -for wrapping Fortran module routines) to save the references to these -data containers so that they can later be used from C. - -FPIG can also handle \texttt{allocatable} arrays. For example, if a -Fortran array is not yet allocated, then by assigning it in Python, -the Fortran to Python interface will allocate and initialize the -array. For example, the F90 module allocatable array \texttt{bar} -defined in -\special{html:
} -\begin{verbatim} -module fun - integer, allocatable :: bar(:) -end module -\end{verbatim} -\special{html:
} -can be allocated from Python as follows -\special{html:
} -\begin{verbatim} ->>> import foo ->>> foo.fun.bar = [1,2,3,4] -\end{verbatim} -\special{html:
} - -\subsection{\texttt{PyFortranObject}} -\label{sec:PFO} - -In general, we would like to access from Python the following Fortran -objects: -\begin{itemize} -\item subroutines and functions, -\item F90 module subroutines and functions, -\item items in COMMON blocks, -\item F90 module data. -\end{itemize} -Assuming that the Fortran source is available, we can determine the signatures -of these objects (the full specification of routine arguments, the -layout of Fortran data, etc.). In fact, \fpy gets this information -while scanning the Fortran source. - -In order to access these Fortran objects from C, we need to determine -their references. Note that the direct access of F90 module objects is -extremely compiler dependent and in some cases even impossible. -Therefore, FPIG uses various wrapper functions for obtaining the -references to Fortran objects. These wrapper functions are ordinary -F77 subroutines that can easily access objects from F90 modules and -that pass the references to Fortran objects as C variables. - - -\fpy generated Python C/API extension modules use -\texttt{PyFortranObject} to store the references of Fortran objects. -In addition to the storing functionality, the \texttt{PyFortranObject} -also provides methods for accessing/calling Fortran objects from -Python in a user-friendly manner. For example, the item \texttt{a} in -\texttt{COMMON /bar/ a(2)} can be accessed from Python as -\texttt{foo.bar.a}. - -Detailed examples of \texttt{PyFortranObject} usage can be found in -\cite{PFO}. - -\subsection{Callback Functions} -\label{sec:callback} - -Fortran routines may have arguments specified as \texttt{external}. -These arguments are functions or subroutines names that the receiving Fortran routine -will call from its body. For such arguments FPIG -constructs a call-back mechanism (originally contributed by Travis -Oliphant) that allows Fortran routines to call Python functions. This -is actually realized using a C layer between Python and -Fortran. Currently, the call-back mechanism is compiler independent -unless a call-back function needs to return a composed type -(e.g. \texttt{COMPLEX}). - -The signatures of call-back functions are determined when \fpy scans -the Fortran source code. To illustrate this, consider the following -example: -\special{html:
} -\begin{verbatim} - subroutine foo(bar, fun, boo) - integer i - real r - external bar,fun,boo - call bar(i, 1.2) - r = fun() - call sun(boo) - end -\end{verbatim} -\special{html:
} -\fpy recognizes the signatures of the user routines \texttt{bar} and -\texttt{fun} using the information contained in the lines \texttt{call - bar(i, 1.2)} and \texttt{r = fun()}: -\special{html:
} -\begin{verbatim} -subroutine bar(a,b) - integer a - real b -end -function fun() - real fun -end -\end{verbatim} -\special{html:
} -But \fpy cannot determine the signature of the user routine -\texttt{boo} because the source contains no information at all about -the \texttt{boo} specification. Here user needs to provide the -signature of \texttt{boo} manually. - -\section{Future Work} -\label{sec:future} - -FPIG can be used to wrap almost any Fortran code. However, there are -still issues that need to be resolved. Some of them are listed below: -\begin{enumerate} -\item One of the FPIG's goals is to become as platform and compiler - independent as possible. Currently FPIG can be used on - any UN*X platform that has gcc installed in it. In the future, FPIG - should be also tested on Windows systems. -\item Another goal of FPIG is to become as simple to use as - possible. To achieve that, FPIG should start using the facilities of - \texttt{distutils}, the new Python standard to distribute and build - Python modules. Therefore, a contribution to \texttt{distutils} - that can handle Fortran extensions should be developed. -\item Currently users must be aware of - the fact that multi-dimensional arrays are stored differently in C - and Fortran (they must provide transposed multi-dimensional arrays - to wrapper functions). In the future a solution should be found such - that users do not need to worry about this rather - confusing and technical detail. -\item Finally, a repository of signature files for widely-used Fortran - libraries (e.g. BLAS, LAPACK, MINPACK, ODEPACK, EISPACK, LINPACK) should be - provided. -\end{enumerate} - - -\section{Application to a Large Aero-Structural Analysis Framework} -\label{sec:app} - - -\subsection{The Need for Python and FPIG} -\label{sec:appsub1} - -As a demonstration of the power and usefulness of FPIG, we will -present work that has been done at the Aerospace Computing Laboratory -at Stanford University. The focus of the research is on aircraft -design optimization using high-fidelity analysis tools such as -Computational Fluid Dynamics (CFD) and Computational Structural -Mechanics (CSM)~\cite{reno99}. - -The group's analysis programs are written mainly in Fortran and are the result -of many years of development. Until now, any researcher that needed -to use these tools would have to learn a less than user-friendly -interface and become relatively familiar with the inner workings of -the codes before starting the research itself. The need to -couple analyses of different disciplines revealed the additional -inconvenience of gluing and scripting the different codes with -Fortran. - -It was therefore decided that the existing tools should be wrapped -using an object-oriented language in order to improve their ease of -use and versatility. The use of several different languages such as -C++, Java and Perl was investigated but Python seemed to provide the -best solution. The fact that it combines scripting capability -with a fully-featured object-oriented programming language, and that -it has a clean syntax were factors that determined our choice. The -introduction of tools that greatly facilitate the task of wrapping -Fortran with Python provided the final piece needed to realize our -objective. - -\subsection{Wrapping the Fortran Programs} - -In theory, it would have been possible to wrap our Fortran programs -with C and then with Python by hand. However, this would have been a -labor intensive task that would detract from our research. The use of -tools that automate the task of wrapping has been extremely useful. - -The first such tool that we used was PyFort. This tool created the C -wrappers and Python modules automatically, based on signature files -(\texttt{.pyf}) provided by the user. Although it made the task of -wrapping considerably easier, PyFort was limited by the fact that any -Fortran data that was needed at the Python level had to be passed in -the argument list of the Fortran subroutine. Since the bulk of the -data in our programs is shared by using Fortran~77 common blocks and -Fortran~90 modules, this required adding many more arguments to the -subroutine headers. Furthermore, since Fortran does not allow common -block variables or module data to be specified in a subroutine -argument list, a dummy pointer for each desired variable had to be -created and initialized. - -The search for a better solution to this problem led us to \fpy. -Since \fpy provides a solution for accessing common block and module -variables, there was no need to change the Fortran source anymore, -making the wrapping process even easier. With \fpy we also -experienced an increased level of automation since it produces the -signature files automatically, as well as a Makefile for the joint -compilation of the original Fortran and C wrapper codes. This increased -automation did not detract from its flexibility since it was always -possible to edit the signature files to provide different functionality. - -Once Python interfaces were created for each Fortran application -by running \fpy, it was just a matter of using Python to achieve the -final objective of developing an object-oriented framework for our -multidisciplinary solvers. The Python modules that we designed are -discussed in the following section. - - -\subsection{Module Design} -\label{ssec:module} - -The first objective of this effort was to design the classes for each -type of analysis, each representing an independent Python module. In -our case, we are interested in performing aero-structural analysis and -optimization of aircraft wings. We therefore needed an analysis tool -for the flow (CFD), another for analyzing the structure (CSM), as well -as a geometry database. In addition, we needed to interface these two -tools in order to analyze the coupled system. The object design for -each of these modules should be general enough that the underlying -analysis code in Fortran can be changed without changing the Python -interface. Another requirement was that the modules be usable on -their own for single discipline analysis. - -\subsubsection{Geometry} - -The \emph{Geometry} class provides a database for the outer mold -geometry of the aircraft. This database needs to be accessed by both -the flow and structural solvers. It contains a parametric description -of the aircraft's surface as well as methods that extract and update -this information. - - -\subsubsection{Flow} - -The flow solver was wrapped in a class called \emph{Flow}. The class -was designed so that it can wrap any type of CFD solver. It contains -two main objects: the computational mesh and a solver object. A graph -showing the hierarchy of the objects in \emph{Flow} is shown in -Fig.~\ref{fig:flow}. -\tthhide{ -\begin{figure}[h] - \centering - \epsfig{file=./flow.eps, angle=0, width=.7\linewidth} - \caption{The \emph{Flow} container class.} - \label{fig:flow} -\end{figure} -} -\latexhide{ -\begin{figure}[h] - \label{fig:flow} -\special{html: -
- -
-} - \caption{The \emph{Flow} container class.} -\end{figure} -} -Methods in the flow class include those used for the initialization of -all the class components as well as methods that write the current -solution to a file. - - -\subsubsection{Structure} - -The \emph{Structure} class wraps a structural analysis code. The class -stores the information about the structure itself in an object called -\emph{Model} which also provides methods for changing and exporting -its information. A list of the objects contained in this class can be -seen in Fig.~\ref{fig:structure}. -\tthhide{ -\begin{figure}[h] - \centering - \epsfig{file=./structure.eps, angle=0, width=.7\linewidth} - \caption{The \emph{Structure} container class.} - \label{fig:structure} -\end{figure} -} -\latexhide{ -\begin{figure}[h] - \label{fig:structure} -\special{html: -
- -
-} - \caption{The \emph{Structure} container class.} -\end{figure} -} -Since the \emph{Structure} class contains a -dictionary of \emph{LoadCase} objects, it is able to store and solve -multiple load cases, a capability that the original Fortran code -does not have. - - -\subsubsection{Aerostructure} - -The \emph{Aerostructure} class is the main class in the -aero-structural analysis module and contains a \emph{Geometry}, a -\emph{Flow} and a \emph{Structure}. In addition, the class defines -all the functions that are necessary to translate aerodynamic -loads to structural loads and structural displacements to -geometry surface deformations. - -One of the main methods of this class is the one that solves the -aeroelastic system. This method is printed below: -\begin{verbatim} -def Iterate(self, load_case): - """Iterates the aero-structural solution.""" - self.flow.Iterate() - self._UpdateStructuralLoads() - self.structure.CalcDisplacements(load_case) - self.structure.CalcStresses(load_case) - self._UpdateFlowMesh() - return -\end{verbatim} -This is indeed a very readable script, thanks to Python, and any -high-level changes to the solution procedure can be easily -implemented. -The \emph{Aerostructure} class also contains methods that export all -the information on the current solution for visualization, an example -of which is shown in the next section. - - -\subsection{Results} - -In order to visualize results, and because we needed to view results -from multiple disciplines simultaneously, we selected OpenDX. Output -files in DX format are written at the Python level and the result can -be seen in Fig.~\ref{fig:aerostructure} for the case of a transonic -airliner configuration. -\tthhide{ -\begin{figure*}[t] - \centering - \epsfig{file=./aerostructure.eps, angle=-90, width=\linewidth} - \caption{Aero-structural model and results.} - \label{fig:aerostructure} -\end{figure*} -} -\latexhide{ -\begin{figure}[h] - \label{fig:aerostructure} -\special{html: -
- -
-} - \caption{Aero-structural model and results.} -\end{figure} -} - - -The figure illustrates the multidisciplinary nature of the -problem. The grid pictured in the background is the mesh used by the -flow solver and is colored by the pressure values computed at the -cell centers. The wing in the foreground and its outer surface is -clipped to show the internal structural components which are colored -by their stress value. - -In conclusion, \fpy and Python have been extremely useful tools in our -pursuit for increasing the usability and flexibility of existing Fortran -tools. - - -\begin{thebibliography}{99} -\bibitem{netlib} -\newblock Netlib repository at UTK and ORNL. -\newblock \\\wwwsite{http://www.netlib.org/} -\bibitem{python} -Python language. -\newblock \\\wwwsite{http://www.python.org/} -\bibitem{swig} -SWIG --- Simplified Wrapper and Interface Generator. -\newblock \\\wwwsite{http://www.swig.org/} -\bibitem{pyfort} -PyFort --- The Python-Fortran connection tool. -\newblock \\\wwwsite{http://pyfortran.sourceforge.net/} -\bibitem{fpig} -FPIG --- Fortran to Python Interface Generator. -\newblock \\\wwwsite{http://cens.ioc.ee/projects/f2py2e/} -\bibitem{numpy} -Numerical Extension to Python. -\newblock \\\wwwsite{http://numpy.sourceforge.net/} -\bibitem{graham-etal} -R. L. Graham, D. E. Knuth, and O. Patashnik. -\newblock {\em {C}oncrete {M}athematics: a foundation for computer science.} -\newblock Addison-Wesley, 1988 -\bibitem{f2py-ug} -P. Peterson. -\newblock {\em {\tt f2py} - Fortran to Python Interface Generator. Second Edition.} -\newblock 2000 -\newblock -\\\wwwsite{http://cens.ioc.ee/projects/f2py2e/usersguide.html} -\bibitem{python-doc:ext} -Python Documentation: Extending and Embedding. -\newblock \\\wwwsite{http://www.python.org/doc/ext/} -\bibitem{PFO} -P. Peterson. {\em {\tt PyFortranObject} example usages.} -\newblock 2001 -\newblock \\\wwwsite{http://cens.ioc.ee/projects/f2py2e/pyfobj.html} -\bibitem{reno99} -Reuther, J., J. J. Alonso, J. R. R. A. Martins, and -S. C. Smith. -\newblock ``A Coupled Aero-Structural Optimization Method for - Complete Aircraft Configurations'', -\newblock {\em Proceedings of the 37th Aerospace Sciences Meeting}, -\newblock AIAA Paper 1999-0187. Reno, NV, January, 1999 -\end{thebibliography} - -%\end{multicols} - -%\begin{figure}[htbp] -% \begin{center} -% \epsfig{file=aerostructure2b.ps,width=0.75\textwidth} -% \end{center} -%\end{figure} - - - -\end{document} - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: t -%%% End: diff -Nru python-numpy-1.13.3/doc/f2py/README.txt python-numpy-1.14.5/doc/f2py/README.txt --- python-numpy-1.13.3/doc/f2py/README.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/README.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,415 +0,0 @@ -.. -*- rest -*- - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - F2PY: Fortran to Python interface generator -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -:Author: Pearu Peterson -:License: NumPy License -:Web-site: http://cens.ioc.ee/projects/f2py2e/ -:Discussions to: `f2py-users mailing list`_ -:Documentation: `User's Guide`__, FAQ__ -:Platforms: All -:Date: $Date: 2005/01/30 18:54:53 $ - -.. _f2py-users mailing list: http://cens.ioc.ee/mailman/listinfo/f2py-users/ -__ usersguide/index.html -__ FAQ.html - -.. Contents:: - -============== - Introduction -============== - -The purpose of the F2PY --*Fortran to Python interface generator*-- -project is to provide connection between Python_ and Fortran -languages. F2PY is a Python extension tool for creating Python C/API -modules from (handwritten or F2PY generated) signature files (or -directly from Fortran sources). The generated extension modules -facilitate: - -* Calling Fortran 77/90/95, Fortran 90/95 module, and C functions from - Python. - -* Accessing Fortran 77 ``COMMON`` blocks and Fortran 90/95 module - data (including allocatable arrays) from Python. - -* Calling Python functions from Fortran or C (call-backs). - -* Automatically handling the difference in the data storage order of - multi-dimensional Fortran and Numerical Python (i.e. C) arrays. - -In addition, F2PY can build the generated extension modules to shared -libraries with one command. F2PY uses the ``numpy_distutils`` module -from SciPy_ that supports number of major Fortran compilers. - -.. - (see `COMPILERS.txt`_ for more information). - -F2PY generated extension modules depend on NumPy_ package that -provides fast multi-dimensional array language facility to Python. - - ---------------- - Main features ---------------- - -Here follows a more detailed list of F2PY features: - -* F2PY scans real Fortran codes to produce the so-called signature - files (.pyf files). The signature files contain all the information - (function names, arguments and their types, etc.) that is needed to - construct Python bindings to Fortran (or C) functions. - - The syntax of signature files is borrowed from the - Fortran 90/95 language specification and has some F2PY specific - extensions. The signature files can be modified to dictate how - Fortran (or C) programs are called from Python: - - + F2PY solves dependencies between arguments (this is relevant for - the order of initializing variables in extension modules). - - + Arguments can be specified to be optional or hidden that - simplifies calling Fortran programs from Python considerably. - - + In principle, one can design any Python signature for a given - Fortran function, e.g. change the order arguments, introduce - auxiliary arguments, hide the arguments, process the arguments - before passing to Fortran, return arguments as output of F2PY - generated functions, etc. - -* F2PY automatically generates __doc__ strings (and optionally LaTeX - documentation) for extension modules. - -* F2PY generated functions accept arbitrary (but sensible) Python - objects as arguments. The F2PY interface automatically takes care of - type-casting and handling of non-contiguous arrays. - -* The following Fortran constructs are recognized by F2PY: - - + All basic Fortran types:: - - integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ] - integer*([ -1 | -2 | -4 | -8 ]) - character[ | *(*) | *1 | *2 | *3 | ... ] - real[ | *4 | *8 | *16 ], double precision - complex[ | *8 | *16 | *32 ] - - Negative ``integer`` kinds are used to wrap unsigned integers. - - + Multi-dimensional arrays of all basic types with the following - dimension specifications:: - - | : | * | : - - + Attributes and statements:: - - intent([ in | inout | out | hide | in,out | inout,out | c | - copy | cache | callback | inplace | aux ]) - dimension() - common, parameter - allocatable - optional, required, external - depend([]) - check([]) - note() - usercode, callstatement, callprotoargument, threadsafe, fortranname - pymethoddef - entry - -* Because there are only little (and easily handleable) differences - between calling C and Fortran functions from F2PY generated - extension modules, then F2PY is also well suited for wrapping C - libraries to Python. - -* Practice has shown that F2PY generated interfaces (to C or Fortran - functions) are less error prone and even more efficient than - handwritten extension modules. The F2PY generated interfaces are - easy to maintain and any future optimization of F2PY generated - interfaces transparently apply to extension modules by just - regenerating them with the latest version of F2PY. - -* `F2PY Users Guide and Reference Manual`_ - - -=============== - Prerequisites -=============== - -F2PY requires the following software installed: - -* Python_ (versions 1.5.2 or later; 2.1 and up are recommended). - You must have python-dev package installed. -* NumPy_ (versions 13 or later; 20.x, 21.x, 22.x, 23.x are recommended) -* Numarray_ (version 0.9 and up), optional, partial support. -* Scipy_distutils (version 0.2.2 and up are recommended) from SciPy_ - project. Get it from Scipy CVS or download it below. - -Python 1.x users also need distutils_. - -Of course, to build extension modules, you'll need also working C -and/or Fortran compilers installed. - -========== - Download -========== - -You can download the sources for the latest F2PY and numpy_distutils -releases as: - -* `2.x`__/`F2PY-2-latest.tar.gz`__ -* `2.x`__/`numpy_distutils-latest.tar.gz`__ - -Windows users might be interested in Win32 installer for F2PY and -Scipy_distutils (these installers are built using Python 2.3): - -* `2.x`__/`F2PY-2-latest.win32.exe`__ -* `2.x`__/`numpy_distutils-latest.win32.exe`__ - -Older releases are also available in the directories -`rel-0.x`__, `rel-1.x`__, `rel-2.x`__, `rel-3.x`__, `rel-4.x`__, `rel-5.x`__, -if you need them. - -.. __: 2.x/ -.. __: 2.x/F2PY-2-latest.tar.gz -.. __: 2.x/ -.. __: 2.x/numpy_distutils-latest.tar.gz -.. __: 2.x/ -.. __: 2.x/F2PY-2-latest.win32.exe -.. __: 2.x/ -.. __: 2.x/numpy_distutils-latest.win32.exe -.. __: rel-0.x -.. __: rel-1.x -.. __: rel-2.x -.. __: rel-3.x -.. __: rel-4.x -.. __: rel-5.x - -Development version of F2PY from CVS is available as `f2py2e.tar.gz`__. - -__ http://cens.ioc.ee/cgi-bin/viewcvs.cgi/python/f2py2e/f2py2e.tar.gz?tarball=1 - -Debian Sid users can simply install ``python-f2py`` package. - -============== - Installation -============== - -Unpack the source file, change to directrory ``F2PY-?-???/`` and run -(you may need to become a root):: - - python setup.py install - -The F2PY installation installs a Python package ``f2py2e`` to your -Python ``site-packages`` directory and a script ``f2py`` to your -Python executable path. - -See also Installation__ section in `F2PY FAQ`_. - -.. __: FAQ.html#installation - -Similarly, to install ``numpy_distutils``, unpack its tar-ball and run:: - - python setup.py install - -======= - Usage -======= - -To check if F2PY is installed correctly, run -:: - - f2py - -without any arguments. This should print out the usage information of -the ``f2py`` program. - -Next, try out the following three steps: - -1) Create a Fortran file `hello.f`__ that contains:: - - C File hello.f - subroutine foo (a) - integer a - print*, "Hello from Fortran!" - print*, "a=",a - end - -__ hello.f - -2) Run - - :: - - f2py -c -m hello hello.f - - This will build an extension module ``hello.so`` (or ``hello.sl``, - or ``hello.pyd``, etc. depending on your platform) into the current - directory. - -3) Now in Python try:: - - >>> import hello - >>> print hello.__doc__ - >>> print hello.foo.__doc__ - >>> hello.foo(4) - Hello from Fortran! - a= 4 - >>> - -If the above works, then you can try out more thorough -`F2PY unit tests`__ and read the `F2PY Users Guide and Reference Manual`_. - -__ FAQ.html#q-how-to-test-if-f2py-is-working-correctly - -=============== - Documentation -=============== - -The documentation of the F2PY project is collected in ``f2py2e/docs/`` -directory. It contains the following documents: - -`README.txt`_ (on GitHub__) - The first thing to read about F2PY -- this document. - -__ https://github.com/numpy/numpy/blob/master/numpy/f2py/docs/README.txt - -`usersguide/index.txt`_, `usersguide/f2py_usersguide.pdf`_ - F2PY Users Guide and Reference Manual. Contains lots of examples. - -`FAQ.txt`_ (on GitHub__) - F2PY Frequently Asked Questions. - -__ https://github.com/numpy/numpy/blob/master/numpy/f2py/docs/FAQ.txt - -`TESTING.txt`_ (on GitHub__) - About F2PY testing site. What tests are available and how to run them. - -__ https://github.com/numpy/numpy/blob/master/numpy/f2py/docs/TESTING.txt - -`HISTORY.txt`_ (on GitHub__) - A list of latest changes in F2PY. This is the most up-to-date - document on F2PY. - -__ https://github.com/numpy/numpy/blob/master/numpy/f2py/docs/HISTORY.txt - -`THANKS.txt`_ - Acknowledgments. - -.. - `COMPILERS.txt`_ - Compiler and platform specific notes. - -=============== - Mailing list -=============== - -A mailing list f2py-users@cens.ioc.ee is open for F2PY related -discussion/questions/etc. - -* `Subscribe..`__ -* `Archives..`__ - -__ http://cens.ioc.ee/mailman/listinfo/f2py-users -__ http://cens.ioc.ee/pipermail/f2py-users - - -===== - CVS -===== - -F2PY is being developed under CVS_. The CVS version of F2PY can be -obtained as follows: - -1) First you need to login (the password is ``guest``):: - - cvs -d :pserver:anonymous@cens.ioc.ee:/home/cvs login - -2) and then do the checkout:: - - cvs -z6 -d :pserver:anonymous@cens.ioc.ee:/home/cvs checkout f2py2e - -3) You can update your local F2PY tree ``f2py2e/`` by executing:: - - cvs -z6 update -P -d - -You can browse the `F2PY CVS`_ repository. - -=============== - Contributions -=============== - -* `A short introduction to F2PY`__ by Pierre Schnizer. - -* `F2PY notes`__ by Fernando Perez. - -* `Debian packages of F2PY`__ by José Fonseca. [OBSOLETE, Debian Sid - ships python-f2py package] - -__ http://fubphpc.tu-graz.ac.at/~pierre/f2py_tutorial.tar.gz -__ http://cens.ioc.ee/pipermail/f2py-users/2003-April/000472.html -__ http://jrfonseca.dyndns.org/debian/ - - -=============== - Related sites -=============== - -* `Numerical Python`_ -- adds a fast array facility to the Python language. -* Pyfort_ -- A Python-Fortran connection tool. -* SciPy_ -- An open source library of scientific tools for Python. -* `Scientific Python`_ -- A collection of Python modules that are - useful for scientific computing. -* `The Fortran Company`_ -- A place to find products, services, and general - information related to the Fortran programming language. -* `American National Standard Programming Language FORTRAN ANSI(R) X3.9-1978`__ -* `J3`_ -- The US Fortran standards committee. -* SWIG_ -- A software development tool that connects programs written - in C and C++ with a variety of high-level programming languages. -* `Mathtools.net`_ -- A technical computing portal for all scientific - and engineering needs. - -.. __: http://www.fortran.com/fortran/F77_std/rjcnf.html - -.. References - ========== - - -.. _F2PY Users Guide and Reference Manual: usersguide/index.html -.. _usersguide/index.txt: usersguide/index.html -.. _usersguide/f2py_usersguide.pdf: usersguide/f2py_usersguide.pdf -.. _README.txt: README.html -.. _COMPILERS.txt: COMPILERS.html -.. _F2PY FAQ: -.. _FAQ.txt: FAQ.html -.. _HISTORY.txt: HISTORY.html -.. _HISTORY.txt from CVS: http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt?rev=HEAD&content-type=text/x-cvsweb-markup -.. _THANKS.txt: THANKS.html -.. _TESTING.txt: TESTING.html -.. _F2PY CVS2: http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/ -.. _F2PY CVS: http://cens.ioc.ee/cgi-bin/viewcvs.cgi/python/f2py2e/ - -.. _CVS: http://www.cvshome.org/ -.. _Python: http://www.python.org/ -.. _SciPy: http://www.numpy.org/ -.. _NumPy: http://www.numpy.org/ -.. _Numarray: http://www.stsci.edu/resources/software_hardware/numarray -.. _docutils: http://docutils.sourceforge.net/ -.. _distutils: http://www.python.org/sigs/distutils-sig/ -.. _Numerical Python: http://www.numpy.org/ -.. _Pyfort: http://pyfortran.sourceforge.net/ -.. _Scientific Python: - http://starship.python.net/crew/hinsen/scientific.html -.. _The Fortran Company: http://www.fortran.com/fortran/ -.. _J3: http://www.j3-fortran.org/ -.. _Mathtools.net: http://www.mathtools.net/ -.. _SWIG: http://www.swig.org/ - -.. - Local Variables: - mode: indented-text - indent-tabs-mode: nil - sentence-end-double-space: t - fill-column: 70 - End: diff -Nru python-numpy-1.13.3/doc/f2py/Release-1.x.txt python-numpy-1.14.5/doc/f2py/Release-1.x.txt --- python-numpy-1.13.3/doc/f2py/Release-1.x.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/Release-1.x.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ - -I am pleased to announce the first public release of f2py 1.116: - -Writing Python C/API wrappers for Fortran routines can be a very -tedious task, especially if a Fortran routine takes more than 20 -arguments but only few of them are relevant for the problems that they -solve. - -The Fortran to Python Interface Generator, or FPIG for short, is a -command line tool (f2py) for generating Python C/API modules for -wrapping Fortran 77 routines, accessing common blocks from Python, and -calling Python functions from Fortran (call-backs). - -The tool can be downloaded from - - http://cens.ioc.ee/projects/f2py2e/ - -where you can find also information about f2py features and its User's -Guide. - -f2py is released under the LGPL license. - -With regards, - Pearu Peterson - -

f2py 1.116 - The -Fortran to Python Interface Generator (25-Jan-00) diff -Nru python-numpy-1.13.3/doc/f2py/Release-2.x.txt python-numpy-1.14.5/doc/f2py/Release-2.x.txt --- python-numpy-1.13.3/doc/f2py/Release-2.x.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/Release-2.x.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,77 +0,0 @@ - -FPIG - Fortran to Python Interface Generator - -I am pleased to announce the second public release of f2py -(version 2.264): - - http://cens.ioc.ee/projects/f2py2e/ - -f2py is a command line tool for binding Python and Fortran codes. It -scans Fortran 77/90/95 codes and generates a Python C/API module that -makes it possible to call Fortran routines from Python. No Fortran or -C expertise is required for using this tool. - -Features include: - - *** All basic Fortran types are supported: - integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ], - character[ | *(*) | *1 | *2 | *3 | ... ] - real[ | *4 | *8 | *16 ], double precision, - complex[ | *8 | *16 | *32 ] - - *** Multi-dimensional arrays of (almost) all basic types. - Dimension specifications: - | : | * | : - - *** Supported attributes: - intent([ in | inout | out | hide | in,out | inout,out ]) - dimension() - depend([]) - check([]) - note() - optional, required, external - - *** Calling Fortran 77/90/95 subroutines and functions. Also - Fortran 90/95 module routines. Internal initialization of - optional arguments. - - *** Accessing COMMON blocks from Python. Accessing Fortran 90/95 - module data coming soon. - - *** Call-back functions: calling Python functions from Fortran with - very flexible hooks. - - *** In Python, arguments of the interfaced functions may be of - different type - necessary type conversations are done - internally in C level. - - *** Automatically generates documentation (__doc__,LaTeX) for - interface functions. - - *** Automatically generates signature files --- user has full - control over the interface constructions. Automatically - detects the signatures of call-back functions, solves argument - dependencies, etc. - - *** Automatically generates Makefile for compiling Fortran and C - codes and linking them to a shared module. Many compilers are - supported: gcc, Compaq Fortran, VAST/f90 Fortran, Absoft - F77/F90, MIPSpro 7 Compilers, etc. Platforms: Intel/Alpha - Linux, HP-UX, IRIX64. - - *** Complete User's Guide in various formats (html,ps,pdf,dvi). - - *** f2py users list is available for support, feedback, etc. - -More information about f2py, see - - http://cens.ioc.ee/projects/f2py2e/ - -f2py is released under the LGPL license. - -Sincerely, - Pearu Peterson - September 12, 2000 - -

f2py 2.264 - The -Fortran to Python Interface Generator (12-Sep-00) diff -Nru python-numpy-1.13.3/doc/f2py/Release-3.x.txt python-numpy-1.14.5/doc/f2py/Release-3.x.txt --- python-numpy-1.13.3/doc/f2py/Release-3.x.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/Release-3.x.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,87 +0,0 @@ - -F2PY - Fortran to Python Interface Generator - -I am pleased to announce the third public release of f2py -(version 2.3.321): - - http://cens.ioc.ee/projects/f2py2e/ - -f2py is a command line tool for binding Python and Fortran codes. It -scans Fortran 77/90/95 codes and generates a Python C/API module that -makes it possible to call Fortran subroutines from Python. No Fortran or -C expertise is required for using this tool. - -Features include: - - *** All basic Fortran types are supported: - integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ], - character[ | *(*) | *1 | *2 | *3 | ... ] - real[ | *4 | *8 | *16 ], double precision, - complex[ | *8 | *16 | *32 ] - - *** Multi-dimensional arrays of (almost) all basic types. - Dimension specifications: - | : | * | : - - *** Supported attributes and statements: - intent([ in | inout | out | hide | in,out | inout,out ]) - dimension() - depend([]) - check([]) - note() - optional, required, external -NEW: intent(c), threadsafe, fortranname - - *** Calling Fortran 77/90/95 subroutines and functions. Also - Fortran 90/95 module subroutines are supported. Internal - initialization of optional arguments. - - *** Accessing COMMON blocks from Python. -NEW: Accessing Fortran 90/95 module data. - - *** Call-back functions: calling Python functions from Fortran with - very flexible hooks. - - *** In Python, arguments of the interfaced functions may be of - different type - necessary type conversations are done - internally in C level. - - *** Automatically generates documentation (__doc__,LaTeX) for - interfaced functions. - - *** Automatically generates signature files --- user has full - control over the interface constructions. Automatically - detects the signatures of call-back functions, solves argument - dependencies, etc. - -NEW: * Automatically generates setup_.py for building - extension modules using tools from distutils and - fortran_support module (SciPy). - - *** Automatically generates Makefile for compiling Fortran and C - codes and linking them to a shared module. Many compilers are - supported: gcc, Compaq Fortran, VAST/f90 Fortran, Absoft - F77/F90, MIPSpro 7 Compilers, etc. Platforms: Intel/Alpha - Linux, HP-UX, IRIX64. - - *** Complete User's Guide in various formats (html,ps,pdf,dvi). - - *** f2py users list is available for support, feedback, etc. - -NEW: * Installation with distutils. - - *** And finally, many bugs are fixed. - -More information about f2py, see - - http://cens.ioc.ee/projects/f2py2e/ - -LICENSE: - f2py is released under the LGPL. - -Sincerely, - Pearu Peterson - December 4, 2001 - -

f2py 2.3.321 - The -Fortran to Python Interface Generator (04-Dec-01) diff -Nru python-numpy-1.13.3/doc/f2py/Release-4.x.txt python-numpy-1.14.5/doc/f2py/Release-4.x.txt --- python-numpy-1.13.3/doc/f2py/Release-4.x.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/Release-4.x.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ - -F2PY - Fortran to Python Interface Generator - -I am pleased to announce the fourth public release of f2py -(version 2.4.366): - - http://cens.ioc.ee/projects/f2py2e/ - -f2py is a command line tool for binding Python and Fortran codes. It -scans Fortran 77/90/95 codes and generates a Python C/API module that -makes it possible to call Fortran subroutines from Python. No Fortran or -C expertise is required for using this tool. - -New features: - *** Win32 support. - *** Better Python C/API generated code (-Wall is much less verbose). - -Features include: - - *** All basic Fortran types are supported: - integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ], - character[ | *(*) | *1 | *2 | *3 | ... ] - real[ | *4 | *8 | *16 ], double precision, - complex[ | *8 | *16 | *32 ] - - *** Multi-dimensional arrays of (almost) all basic types. - Dimension specifications: - | : | * | : - - *** Supported attributes and statements: - intent([ in | inout | out | hide | in,out | inout,out ]) - dimension() - depend([]) - check([]) - note() - optional, required, external - intent(c), threadsafe, fortranname - - *** Calling Fortran 77/90/95 subroutines and functions. Also - Fortran 90/95 module subroutines are supported. Internal - initialization of optional arguments. - - *** Accessing COMMON blocks from Python. - Accessing Fortran 90/95 module data. - - *** Call-back functions: calling Python functions from Fortran with - very flexible hooks. - - *** In Python, arguments of the interfaced functions may be of - different type - necessary type conversations are done - internally in C level. - - *** Automatically generates documentation (__doc__,LaTeX) for - interfaced functions. - - *** Automatically generates signature files --- user has full - control over the interface constructions. Automatically - detects the signatures of call-back functions, solves argument - dependencies, etc. - - *** Automatically generates setup_.py for building - extension modules using tools from distutils and - fortran_support module (SciPy). - - *** Automatically generates Makefile for compiling Fortran and C - codes and linking them to a shared module. Many compilers are - supported: gcc, Compaq Fortran, VAST/f90 Fortran, Absoft - F77/F90, MIPSpro 7 Compilers, etc. Platforms: Intel/Alpha - Linux, HP-UX, IRIX64. - - *** Complete User's Guide in various formats (html,ps,pdf,dvi). - - *** f2py users list is available for support, feedback, etc. - - *** Installation with distutils. - - *** And finally, many bugs are fixed. - -More information about f2py, see - - http://cens.ioc.ee/projects/f2py2e/ - -LICENSE: - f2py is released under the LGPL. - -Sincerely, - Pearu Peterson - December 17, 2001 - -

f2py 2.4.366 - The -Fortran to Python Interface Generator (17-Dec-01) diff -Nru python-numpy-1.13.3/doc/f2py/signaturefile.tex python-numpy-1.14.5/doc/f2py/signaturefile.tex --- python-numpy-1.13.3/doc/f2py/signaturefile.tex 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/signaturefile.tex 1970-01-01 00:00:00.000000000 +0000 @@ -1,368 +0,0 @@ - -\section{Signature file} -\label{sec:signaturefile} - -The syntax of a signature file is borrowed from the Fortran~90/95 -language specification. Almost all Fortran~90/95 standard constructs -are understood. Recall that Fortran~77 is a subset of Fortran~90/95. -This tool introduces also some new attributes that are used for -controlling the process of Fortran to Python interface construction. -In the following, a short overview of the constructs -used in signature files will be given. - - -\subsection{Module block} -\label{sec:moduleblock} - -A signature file contains one or more \texttt{pythonmodule} blocks. A -\texttt{pythonmodule} block has the following structure: -\begin{verbatim} -python module - interface - - end [interface] - interface - module - - - end [module []] - end [interface] -end [pythonmodule []] -\end{verbatim} -For each \texttt{pythonmodule} block \fpy will generate a C-file -\texttt{module.c} (see step (iii)). (This is not true if -\texttt{} contains substring \texttt{\_\_user\_\_}, see -Sec.~\ref{sec:cbmodule} and \texttt{external} attribute). - -\subsection{Signatures of Fortran routines and Python functions} -\label{sec:routineblock} - - -The signature of a Fortran routine has the following structure: -\begin{verbatim} -[] function|subroutine [([])] \ - [result ()] - [] - [] - [] - [] - [] -end [function|subroutine []] -\end{verbatim} - -Let us introduce also the signature of the corresponding wrapper -function: -\begin{verbatim} -def ([,]): - ... - return -\end{verbatim} - -Before you edit the signature file, you should first decide what is the -desired signature of the corresponding Python function. \fpy offers -many possibilities to control the interface construction process: you -may want to insert/change/remove various attributes in the -declarations of the arguments in order to change the appearance -of the arguments in the Python wrapper function. - -\begin{itemize} -\item -The definition of the \texttt{} is -\begin{verbatim} - [[]::] -\end{verbatim} -where -\begin{verbatim} - := byte | character[] - | complex[] | real[] - | double complex | double precision - | integer[] | logical[] -\end{verbatim} -\begin{verbatim} - := * | ([len=][,[kind]]) - | (kind=[,len=]) - := * | ([kind=]) -\end{verbatim} -(there is no sense to modify \texttt{}s generated by \fpy). -\texttt{} is a comma separated list of attributes (see -Sec.~\ref{sec:attributes}); -\begin{verbatim} - := [[*][()] - | [()]*] - | [// | =] [,] -\end{verbatim} -where \texttt{} is a comma separated list of dimension -bounds; \texttt{} is a C-expression (see -Sec.~\ref{sec:C-expr}). If an argument is not defined with -\texttt{}, its type is determined by -applying \texttt{implicit} rules (if it is not specifyied, then -standard rules are applied). - -\item The definition of the \texttt{} is -a short form of the \texttt{}: -\begin{verbatim} - -\end{verbatim} - -\item \texttt{} is defined as follows -\begin{verbatim} -use [, | ,ONLY:] - := local_name=>use_name [,] -\end{verbatim} - Currently the \texttt{use} statement is used to link call-back - modules (Sec.~\ref{sec:cbmodule}) and the \texttt{external} - arguments (call-back functions). - -\item \texttt{} is defined as follows -\begin{verbatim} -common // -\end{verbatim} -where -\begin{verbatim} - := [()] [,] -\end{verbatim} -One \texttt{module} block should not contain two or more -\texttt{common} blocks with the same name. Otherwise, the later ones -are ignored. The types of variables in \texttt{} can -be defined in \texttt{}. Note that there -you can specify also the array specifications; then you don't need to -do that in \texttt{}. -\end{itemize} - -\subsection{Attributes} -\label{sec:attributes} - -The following attributes are used by \fpy: -\begin{description} -\item[\texttt{optional}] --- the variable is moved to the end of - optional argument list of the wrapper function. Default value of an - optional argument can be specified using \texttt{} in - \texttt{entitydecl}. You can use \texttt{optional} attribute also for - \texttt{external} arguments (call-back functions), but it is your - responsibility to ensure that it is given by the user if Fortran - routine wants to call it. -\item[\texttt{required}] --- the variable is considered as a required - argument (that is default). You will need this in order to overwrite - the \texttt{optional} attribute that is automatically set when - \texttt{} is used. However, usage of this attribute - should be rare. -\item[\texttt{dimension()}] --- used when the variable is - an array. For unbounded dimensions symbols `\texttt{*}' or - `\texttt{:}' can be used (then internally the corresponding - dimensions are set to -1; you'll notice this when certain exceptions - are raised). -\item[\texttt{external}] --- the variable is a call-back function. \fpy will - construct a call-back mechanism for this function. Also call-back - functions must be defined by their signatures, and there are several - ways to do that. In most cases, \fpy will be able to determine the signatures - of call-back functions from the Fortran source code; then it - builds an additional \texttt{module} block with a name containing - string `\texttt{\_\_user\_\_}' (see Sec.~\ref{sec:cbmodule}) and - includes \texttt{use} statement to the routines signature. Anyway, - you should check that the generated signature is correct. - - Alternatively, you can specify the signature by inserting to the - routines block a ``model'' how the call-back function would be called - from Fortran. For subroutines you should use\\ - \hspace*{2em}\texttt{call ()}\\ - and for functions\\% - \hspace*{2em}\texttt{ = ()}\\ - The variables in \texttt{} and \texttt{} - must be defined as well. You can use the arguments of the main - routine, for instance. -\item[\texttt{intent()}] --- this specifies the - ``intention'' of the variable. \texttt{} is a comma - separated list of the following specifications: - \begin{description} - \item[\texttt{in}] --- the variable is considered to be an input - variable (default). It means that the Fortran function uses only - the value(s) of the variable and is assumed not to change it. - \item[\texttt{inout}] --- the variable is considered to be an - input/output variable which means that Fortran routine may change - the value(s) of the variable. Note that in Python only array - objects can be changed ``in place''. (\texttt{intent(outin)} is - \texttt{intent(inout)}.) - \item[\texttt{out}] --- the value of the (output) variable is - returned by the wrapper function: it is appended to the list of - \texttt{}. If \texttt{out} is specified alone, - also \texttt{hide} is assumed. - \item[\texttt{hide}] --- use this if the variable \emph{should not} - or \emph{need not} to be in the list of wrapper function arguments - (not even in optional ones). For example, this is assumed if - \texttt{intent(out)} is used. You can ``hide'' an argument if it - has always a constant value specified in \texttt{}, - for instance. - \end{description} - The following rules apply: - \begin{itemize} - \item if no \texttt{intent} attribute is specified, \texttt{intent(in)} is - assumed; - \item \texttt{intent(in,inout)} is \texttt{intent(in)}; - \item \texttt{intent(in,hide)}, \texttt{intent(inout,hide)} are \texttt{intent(hide)}; - \item \texttt{intent(out)} is \texttt{intent(out,hide)}; -\item \texttt{intent(inout)} is NOT \texttt{intent(in,out)}. - \end{itemize} - In conclusion, the following combinations are ``minimal'': - \texttt{intent(in)}, \texttt{intent(inout)}, \texttt{intent(out)}, - \texttt{intent(hide)}, \texttt{intent(in,out)}, and - \texttt{intent(inout,out)}. -\item[\texttt{check([])}] --- if - \texttt{} evaluates to zero, an exception is raised - about incorrect value or size or any other incorrectness of the - variable. If \texttt{check()} or \texttt{check} is used then \fpy - will not try to guess the checks automatically. -\item[\texttt{depend([])}] --- the variable depends on other - variables listed in \texttt{}. These dependence relations - determine the order of internal initialization of the variables. If - you need to change these relations then be careful not to break the - dependence relations of other relevant variables. If - \texttt{depend()} or \texttt{depend} is used then \fpy will not try - to guess the dependence relations automatically. -\item[\texttt{note()}] --- with this attribute you can - include human readable documentation strings to the LaTeX document - that \fpy generates. Do not insert here information that \fpy can - establish by itself, such as, types, sizes, lengths of the - variables. Here you can insert almost arbitrary LaTeX text. Note - that \texttt{} is mainly used inside the LaTeX - \texttt{description} environment. Hint: you can use - \texttt{\bs{}texttt\{\}} for typesetting variable \texttt{} - in LaTeX. In order to get a new line to the LaTeX document, use - \texttt{\bs{}n} followed by a space. For longer text, you may want - to use line continuation feature of Fortran 90/95 language: set - \texttt{\&} (ampersand) - to be the last character in a line. -\item[\texttt{parameter}] --- the variable is parameter and it must - have a value. If the parameter is used in dimension specification, - it is replaced by its value. (Are there any other usages of - parameters except in dimension specifications? Let me know and I'll - add support for it). -\end{description} - - -\subsection{C-expressions} -\label{sec:C-expr} - -The signature of a routine may contain C-expressions in -\begin{itemize} -\item \texttt{} for initializing particular variable, or in -\item \texttt{} of the \texttt{check} attribute, or in -\item \texttt{} of the \texttt{dimension} attribute. -\end{itemize} -A C-expression may contain -\begin{itemize} -\item standard C-statement, -\item functions offered in \texttt{math.h}, -\item previously initialized variables (study -the dependence relations) from the argument list, and -\item the following CPP-macros: - \begin{description} - \item[\texttt{len()}] --- the length of an array \texttt{}; - \item[\texttt{shape(,)}] --- the $n$-th dimension of an array - \texttt{}; - \item[\texttt{rank()}] --- the rank of an array \texttt{}; - \item[\texttt{slen()}] --- the length of a string \texttt{}. - \end{description} -\end{itemize} - - -In addition, when initializing arrays, an index vector \texttt{int - \_i[rank()];} -is available: \texttt{\_i[0]} refers to -the index of the first dimension, \texttt{\_i[1]} to the index of -the second dimension, etc. For example, the argument type declaration\\ -\hspace*{2em}\texttt{integer a(10) = \_i[0]}\\ -is equivalent with the following Python statement\\ -\hspace*{2em}\texttt{a = array(range(10))} - - -\subsection{Required/optional arguments} -\label{sec:reqoptargs} - -When \texttt{optional} attribute is used (including the usage of -\texttt{} without the \texttt{required} attribute), the -corresponding variable in the argument list of a Fortran routine is -appended to the optional argument list of the wrapper function. - -For optional array argument all dimensions must be bounded (not -\texttt{(*)} or \texttt{(:)}) and defined at the time of -initialization (dependence relations). - -If the \texttt{None} object is passed in in place of a required array -argument, it will be considered as optional: that is, the memory is -allocated (of course, if it has unbounded dimensions, an exception -will be raised), and if \texttt{} is defined, -initialization is carried out. - - -\subsection{Internal checks} -\label{sec:intchecks} - -All array arguments are checked against the correctness of their rank. -If there is a mismatch, \fpy attempts to fix that by constructing an -array with a correct rank from the given array argument (there will be -no performance hit as no data is copied). The freedom to do so is -given only if some dimensions are unbounded or their value is 1. An -exception is raised when the sizes will not match. - -All bounded dimensions of an array are checked to be larger or equal -to the dimensions specified in the signature. - -So, you don't need to give explicit \texttt{check} attributes to check -these internal checks. - - -\subsection{Call-back modules} -\label{sec:cbmodule} - -A Fortran routine may have \texttt{external} arguments (call-back -functions). The signatures of the call-back functions must be defined -in a call-back \texttt{module} block (its name contains -\texttt{\_\_user\_\_}), in general; other possibilities are described -in the \texttt{external} attribute specification (see -Sec.~\ref{sec:attributes}). For the signatures of call-back -functions the following restrictions apply: -\begin{itemize} -\item Attributes \texttt{external}, \texttt{check(...)}, and - initialization statements are ignored. -\item Attribute \texttt{optional} is used only for changing the order - of the arguments. -\item For arrays all dimension bounds must be specified. They may be - C-expressions containing variables from the argument list. - Note that here CPP-macros \texttt{len}, \texttt{shape}, - \texttt{rank}, and \texttt{slen} are not available. -\end{itemize} - - -\subsection{Common blocks} -\label{sec:commonblocks} - -All fields in a common block are mapped to arrays of appropriate sizes -and types. Scalars are mapped to rank-0 arrays. For multi-dimensional -fields the corresponding arrays are transposed. In the type -declarations of the variables representing the common block fields, -only \texttt{dimension()}, \texttt{intent(hide)}, and -\texttt{note()} attributes are used, others are ignored. - -\subsection{Including files} -\label{sec:include} - -You can include files to the signature file using -\begin{verbatim} -include '' -\end{verbatim} -statement. It can be used in any part of the signature file. -If the file \texttt{} does not exists or it is not in the path, -the \texttt{include} line is ignored. - -\subsection{\fpy directives} -\label{sec:directives} - -You can insert signature statements directly to Fortran source codes -as comments. Anything that follows \texttt{f2py} is -regarded as normal statement for \fpy. - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "f2py2e" -%%% End: - diff -Nru python-numpy-1.13.3/doc/f2py/simple.f python-numpy-1.14.5/doc/f2py/simple.f --- python-numpy-1.13.3/doc/f2py/simple.f 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/simple.f 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -cFile: simple.f - subroutine foo(a,m,n) - integer m,n,i,j - real a(m,n) -cf2py intent(in,out) a -cf2py intent(hide) m,n - do i=1,m - do j=1,n - a(i,j) = a(i,j) + 10*i+j - enddo - enddo - end -cEOF diff -Nru python-numpy-1.13.3/doc/f2py/simple_session.dat python-numpy-1.14.5/doc/f2py/simple_session.dat --- python-numpy-1.13.3/doc/f2py/simple_session.dat 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/simple_session.dat 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ ->>> import pytest ->>> import f2pytest ->>> import pyforttest ->>> print f2pytest.foo.__doc__ -foo - Function signature: - a = foo(a) -Required arguments: - a : input rank-2 array('f') with bounds (m,n) -Return objects: - a : rank-2 array('f') with bounds (m,n) - ->>> print pyforttest.foo.__doc__ -foo(a) - ->>> pytest.foo([[1,2],[3,4]]) -array([[12, 14], - [24, 26]]) ->>> f2pytest.foo([[1,2],[3,4]]) # F2PY can handle arbitrary input sequences -array([[ 12., 14.], - [ 24., 26.]],'f') ->>> pyforttest.foo([[1,2],[3,4]]) -Traceback (most recent call last): - File "", line 1, in ? -pyforttest.error: foo, argument A: Argument intent(inout) must be an array. - ->>> import Numeric ->>> a=Numeric.array([[1,2],[3,4]],'f') ->>> f2pytest.foo(a) -array([[ 12., 14.], - [ 24., 26.]],'f') ->>> a # F2PY makes a copy when input array is not Fortran contiguous -array([[ 1., 2.], - [ 3., 4.]],'f') ->>> a=Numeric.transpose(Numeric.array([[1,3],[2,4]],'f')) ->>> a -array([[ 1., 2.], - [ 3., 4.]],'f') ->>> f2pytest.foo(a) -array([[ 12., 14.], - [ 24., 26.]],'f') ->>> a # F2PY passes Fortran contiguous input array directly to Fortran -array([[ 12., 14.], - [ 24., 26.]],'f') -# See intent(copy), intent(overwrite), intent(inplace), intent(inout) -# attributes documentation to enhance the above behavior. - ->>> a=Numeric.array([[1,2],[3,4]],'f') ->>> pyforttest.foo(a) ->>> a # Huh? Pyfort 8.5 gives wrong results.. -array([[ 12., 23.], - [ 15., 26.]],'f') diff -Nru python-numpy-1.13.3/doc/f2py/TESTING.txt python-numpy-1.14.5/doc/f2py/TESTING.txt --- python-numpy-1.13.3/doc/f2py/TESTING.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/TESTING.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,108 +0,0 @@ - -======================================================= - F2PY unit testing site -======================================================= - -.. Contents:: - -Tests ------ - -* To run all F2PY unit tests in one command:: - - cd tests - python run_all.py [] - - For example:: - - localhost:~/src_cvs/f2py2e/tests$ python2.2 run_all.py 100 --quiet - ********************************************** - Running '/usr/bin/python2.2 f77/return_integer.py 100 --quiet' - run 1000 tests in 1.87 seconds - initial virtual memory size: 3952640 bytes - current virtual memory size: 3952640 bytes - ok - ********************************************** - Running '/usr/bin/python2.2 f77/return_logical.py 100 --quiet' - run 1000 tests in 1.47 seconds - initial virtual memory size: 3952640 bytes - current virtual memory size: 3952640 bytes - ok - ... - - If some tests fail, try to run the failing tests separately (without - the ``--quiet`` option) as described below to get more information - about the failure. - -* Test intent(in), intent(out) scalar arguments, - scalars returned by F77 functions - and F90 module functions:: - - tests/f77/return_integer.py - tests/f77/return_real.py - tests/f77/return_logical.py - tests/f77/return_complex.py - tests/f77/return_character.py - tests/f90/return_integer.py - tests/f90/return_real.py - tests/f90/return_logical.py - tests/f90/return_complex.py - tests/f90/return_character.py - - Change to tests/ directory and run:: - - python f77/return_.py [] - python f90/return_.py [] - - where ```` is integer, real, logical, complex, or character. - Test scripts options are described below. - - A test is considered successful if the last printed line is "ok". - - If you get import errors like:: - - ImportError: No module named f77_ext_return_integer - - but ``f77_ext_return_integer.so`` exists in the current directory then - it means that the current directory is not included in to `sys.path` - in your Python installation. As a fix, prepend ``.`` to ``PYTHONPATH`` - environment variable and rerun the tests. For example:: - - PYTHONPATH=. python f77/return_integer.py - -* Test mixing Fortran 77, Fortran 90 fixed and free format codes:: - - tests/mixed/run.py - -* Test basic callback hooks:: - - tests/f77/callback.py - -Options -------- - -You may want to use the following options when running the test -scripts: - -```` - Run tests ```` times. Useful for detecting memory leaks. Under - Linux tests scripts output virtual memory size state of the process - before and after calling the wrapped functions. - -``--quiet`` - Suppress all messages. On success only "ok" should be displayed. - -``--fcompiler=`` - Use:: - - f2py -c --help-fcompiler - - to find out what compilers are available (or more precisely, which - ones are recognized by ``numpy_distutils``). - -Reporting failures ------------------- - -XXX: (1) make sure that failures are due to f2py and (2) send full -stdout/stderr messages to me. Also add compiler,python,platform -information. diff -Nru python-numpy-1.13.3/doc/f2py/THANKS.txt python-numpy-1.14.5/doc/f2py/THANKS.txt --- python-numpy-1.13.3/doc/f2py/THANKS.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/THANKS.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,63 +0,0 @@ - -================= - Acknowledgments -================= - -F2PY__ is an open source Python package and command line tool developed and -maintained by Pearu Peterson (me__). - -.. __: http://cens.ioc.ee/projects/f2py2e/ -.. __: http://cens.ioc.ee/~pearu/ - -Many people have contributed to the F2PY project in terms of interest, -encouragement, suggestions, criticism, bug reports, code -contributions, and keeping me busy with developing F2PY. For all that -I thank - - James Amundson, John Barnard, David Beazley, Frank Bertoldi, Roman - Bertle, James Boyle, Moritz Braun, Rolv Erlend Bredesen, John - Chaffer, Fred Clare, Adam Collard, Ben Cornett, Jose L Gomez Dans, - Jaime D. Perea Duarte, Paul F Dubois, Thilo Ernst, Bonilla Fabian, - Martin Gelfand, Eduardo A. Gonzalez, Siegfried Gonzi, Bernhard - Gschaider, Charles Doutriaux, Jeff Hagelberg, Janko Hauser, Thomas - Hauser, Heiko Henkelmann, William Henney, Yueqiang Huang, Asim - Hussain, Berthold Höllmann, Vladimir Janku, Henk Jansen, Curtis - Jensen, Eric Jones, Tiffany Kamm, Andrey Khavryuchenko, Greg - Kochanski, Jochen Küpper, Simon Lacoste-Julien, Tim Lahey, Hans - Petter Langtangen, Jeff Layton, Matthew Lewis, Patrick LeGresley, - Joaquim R R A Martins, Paul Magwene Lionel Maziere, Craig McNeile, - Todd Miller, David C. Morrill, Dirk Muders, Kevin Mueller, Andrew - Mullhaupt, Vijayendra Munikoti, Travis Oliphant, Kevin O'Mara, Arno - Paehler, Fernando Perez, Didrik Pinte, Todd Alan Pitts, Prabhu - Ramachandran, Brad Reisfeld, Steve M. Robbins, Theresa Robinson, - Pedro Rodrigues, Les Schaffer, Christoph Scheurer, Herb Schilling, - Pierre Schnizer, Kevin Smith, Paulo Teotonio Sobrinho, José Rui - Faustino de Sousa, Andrew Swan, Dustin Tang, Charlie Taylor, Paul le - Texier, Michael Tiller, Semen Trygubenko, Ravi C Venkatesan, Peter - Verveer, Nils Wagner, R. Clint Whaley, Erik Wilsher, Martin - Wiechert, Gilles Zerah, SungPil Yoon. - -(This list may not be complete. Please forgive me if I have left you -out and let me know, I'll add your name.) - -Special thanks are due to ... - -Eric Jones - he and Travis O. are responsible for starting the -numpy_distutils project that allowed to move most of the platform and -compiler specific codes out from F2PY. This simplified maintaining the -F2PY project considerably. - -Joaquim R R A Martins - he made possible for me to test F2PY on IRIX64 -platform. He also presented our paper about F2PY in the 9th Python -Conference that I planned to attend but had to cancel in very last -minutes. - -Travis Oliphant - his knowledge and experience on Numerical Python -C/API has been invaluable in early development of the F2PY program. -His major contributions are call-back mechanism and copying N-D arrays -of arbitrary types. - -Todd Miller - he is responsible for Numarray support in F2PY. - -Thanks! - Pearu diff -Nru python-numpy-1.13.3/doc/f2py/TODO.txt python-numpy-1.14.5/doc/f2py/TODO.txt --- python-numpy-1.13.3/doc/f2py/TODO.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/TODO.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -Determine fixed/free format Fortran 90 dialect from the -contents of Fortran files. See numpy_distutils/command/build_flib.py. - -[DONE] -======================================================================== -Wrapping F90 code as follows: - -subroutine foo -print*,"In foo" -end subroutine foo -subroutine bar(func) - interface aa ! bug: this interface block is ignored - subroutine foo - end subroutine foo - end interface - !external foo - external func - call func(foo) -end subroutine bar -subroutine gun(a) - external a - call a() -end subroutine gun -subroutine fun - call bar(gun) -end subroutine fun - -========================================================================= -Users Guide needs major revision. - -[DONE] -========================================================================= -On Thu, 27 Sep 2001, José Luis Gómez Dans wrote: - -> Hi, -> just one question: does f2py supporte derived types in F90 code? -> Stuff like something%or and things like that. - -Not yet. - -========================================================================= -Date: Tue, 28 Aug 2001 22:23:04 -0700 -From: Patrick LeGresley -To: f2py-users@cens.ioc.ee -Subject: [f2py] Strange initialization of allocatable arrays - -I've noticed an odd behavior when setting an allocatable, multidimensional -array in a module. If the rank of the array is odd, the initialization is -fine. However, if the rank is even only the first element of the array is -set properly. See the attached sample code for example. - -========================================================================= -On Wed, 22 Aug 2001, Patrick LeGresley wrote: - -> I've noticed that if a parameter is defined in terms of another parameter, -> that the parameter is replaced not by a number but by another parameter -> (try the attached subroutine for example). Is there any way to have f2py -> automatically recognize the dependencies and generate a signature file -> without parameter variables ? - -It is certainly possible. In fact, f2py has only a basic support for -PARAMETER statements and it fails in your 'advanced' example to produce a -robust signature file. -I am sorry but you have to wait until I'll get back from my travel tour -(somewhere in the middle of September) and get a chance to work on it. - -[DONE] diff -Nru python-numpy-1.13.3/doc/f2py/using_F_compiler.txt python-numpy-1.14.5/doc/f2py/using_F_compiler.txt --- python-numpy-1.13.3/doc/f2py/using_F_compiler.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/using_F_compiler.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,147 +0,0 @@ - -Title: Wrapping F compiled Fortran 90 modules with F2PY - ================================================ - -Rationale: The F compiler does not support external procedures which - makes it impossible to use it in F2PY in a normal way. - This document describes a workaround to this problem so - that F compiled codes can be still wrapped with F2PY. - -Author: Pearu Peterson -Date: May 8, 2002 - -Acknowledgement: Thanks to Siegfried Gonzi who hammered me to produce - this document. - -Normally wrapping Fortran 90 modules to Python using F2PY is carried -out with the following command - - f2py -c -m fun foo.f90 - -where file foo.f90 contains, for example, - -module foo - public :: bar - contains - subroutine bar (a) - integer,intent(inout) :: a - print *,"Hello from foo.bar" - print *,"a=",a - a = a + 5 - print *,"a=",a - end subroutine bar -end module foo - -Then with a supported F90 compiler (running `f2py -c --help-compiler' -will display the found compilers) f2py will generate an extension -module fun.so into the current directory and the Fortran module foo -subroutine bar can be called from Python as follows - ->>> import fun ->>> print fun.foo.bar.__doc__ -bar - Function signature: - bar(a) -Required arguments: - a : in/output rank-0 array(int,'i') - ->>> from Numeric import array ->>> a = array(3) ->>> fun.foo.bar(a) - Hello from foo.bar - a= 3 - a= 8 ->>> a -8 ->>> - -This works nicely with all supported Fortran compilers. - -However, the F compiler (http://www.fortran.com/F/compilers.html) is -an exception. Namely, the F compiler is designed to recognize only -module procedures (and main programs, of course) but F2PY needs to -compile also the so-called external procedures that it generates to -facilitate accessing Fortran F90 module procedures from C and -subsequently from Python. As a result, wrapping F compiled Fortran -procedures to Python is _not_ possible using the simple procedure as -described above. But, there is a workaround that I'll describe below -in five steps. - -1) Compile foo.f90: - - F -c foo.f90 - -This creates an object file foo.o into the current directory. - -2) Create the signature file: - - f2py foo.f90 -h foo.pyf - -This creates a file foo.pyf containing - -module foo ! in foo.f90 - real public :: bar - subroutine bar(a) ! in foo.f90:foo - integer intent(inout) :: a - end subroutine bar -end module foo - -3) Open the file foo.pyf with your favorite text editor and change the - above signature to - -python module foo - interface - subroutine bar(a) - fortranname foo_MP_bar - intent(c) bar - integer intent(in,out) :: a - end subroutine bar - end interface -end python module foo - -The most important modifications are - - a) adding `python' keyword everywhere before the `module' keyword - - b) including an `interface' block around the all subroutine blocks. - - c) specifying the real symbol name of the subroutine using - `fortranname' statement. F generated symbol names are in the form - _MP_ - - d) specifying that subroutine is `intent(c)'. - -Notice that the `intent(inout)' attribute is changed to -`intent(in,out)' that instructs the wrapper to return the modified -value of `a'. - -4) Build the extension module - - f2py -c foo.pyf foo.o --fcompiler=Gnu /opt/F/lib/quickfit.o \ - /opt/F/lib/libf96.a - -This will create the extension module foo.so into the current -directory. Notice that you must use Gnu compiler (gcc) for linking. -And the paths to F specific object files and libraries may differ for -your F installation. - -5) Finally, we can call the module subroutine `bar' from Python - ->>> import foo ->>> print foo.bar.__doc__ -bar - Function signature: - a = bar(a) -Required arguments: - a : input int -Return objects: - a : int - ->>> foo.bar(3) -8 ->>> - -Notice that the F compiled module procedures are called as ordinary -external procedures. Also I/O seems to be lacking for F compiled -Fortran modules. - -Enjoy, - Pearu diff -Nru python-numpy-1.13.3/doc/f2py/win32_notes.txt python-numpy-1.14.5/doc/f2py/win32_notes.txt --- python-numpy-1.13.3/doc/f2py/win32_notes.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/f2py/win32_notes.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,84 +0,0 @@ -The following notes are from Eric Jones. - -My Setup: - -For Python/Fortran development, I run Windows 2000 and use the mingw32 -(www.mingw.org) set of gcc/g77 compilers and tools (gcc 2.95.2) to build python -extensions. I'll also occasionally use MSVC for extension development, but -rarely on projects that include Fortran code. This short HOWTO describes how -I use f2py in the Windows environment. Pretty much everything is done from -a CMD (DOS) prompt, so you'll need to be familiar with using shell commands. - -Installing f2py: - -Before installing f2py, you'll need to install python. I use python2.1 (maybe -python2.2 will be out by the time you read this). Any version of Python beyond -version 1.52 should be fine. See www.python.org for info on installing Python. - -You'll also need Numeric which is available at -http://sourceforge.net/projects/numpy/. The latest version is 20.3. - -Since Pearu has moved to a setup.py script, installation is pretty easy. You -can download f2py from http://cens.ioc.ee/projects/f2py2e/. The latest public -release is http://cens.ioc.ee/projects/f2py2e/rel-3.x/f2py-3.latest.tgz. Even -though this is a .tgz file instead of a .zip file, most standard compression -utilities such as WinZip (www.winzip.com) handle unpacking .tgz files -automatically. Here are the download steps: - - 1. Download the latest version of f2py and save it to disk. - - 2. Use WinZip or some other tool to open the "f2py.xxx.tgz" file. - a. When WinZip says archive contains one file, "f2py.xxx.tar" - and ask if it should open it, respond with "yes". - b. Extract (use the extract button at the top) all the files - in the archive into a file. I'll use c:\f2py2e - - 3. Open a cmd prompt by clicking start->run and typing "cmd.exe". - Now type the following commands. - - C:\WINDOWS\SYSTEM32> cd c:\f2py2e - C:\F2PY2E> python setup.py install - - This will install f2py in the c:\python21\f2py2e directory. It - also copies a few scripts into the c:\python21\Scripts directory. - That's all there is to installing f2py. Now let's set up the - environment so that f2py is easy to use. - - 4. You need to set up a couple of environment variables. The path - "c:\python21\Scripts" needs to be added to your path variables. - To do this, go to the environment variables settings page. This is - where it is on windows 2000: - - Desktop->(right click)My Computer->Properties->Advanced-> - Environment Variables - - a. Add "c:\python21\Scripts" to the end of the Path variable. - b. If it isn't already there, add ".py" to the PATHEXT variable. - This tells the OS to execute f2py.py even when just "f2py" is - typed at a command prompt. - - 5. Well, there actually isn't anything to be done here. The Python - installation should have taken care of associating .py files with - Python for execution, so you shouldn't have to do anything to - registry settings. - -To test your installation, open a new cmd prompt, and type the following: - - C:\WINDOWS\SYSTEM32> f2py - Usage: - f2py [] [[[only:]||[skip:]] \ - ] \ - [: ...] - ... - -This prints out the usage information for f2py. If it doesn't, there is -something wrong with the installation. - -Testing: -The f2py test scripts are kinda Unix-centric, so they don't work under windows. - -XXX include test script XXX. - -Compiler and setup.py issues: - -XXX diff -Nru python-numpy-1.13.3/doc/Makefile python-numpy-1.14.5/doc/Makefile --- python-numpy-1.13.3/doc/Makefile 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/Makefile 2018-06-12 18:28:52.000000000 +0000 @@ -90,19 +90,18 @@ # SSH must be correctly configured for this to work. # Assumes that ``make dist`` was already run # Example usage: ``make upload USERNAME=rgommers RELEASE=1.10.1`` - ssh $(USERNAME)@new.scipy.org mkdir $(UPLOAD_DIR) - scp build/dist.tar.gz $(USERNAME)@new.scipy.org:$(UPLOAD_DIR) - ssh $(USERNAME)@new.scipy.org tar xvC $(UPLOAD_DIR) \ + ssh $(USERNAME)@docs.scipy.org mkdir $(UPLOAD_DIR) + scp build/dist.tar.gz $(USERNAME)@docs.scipy.org:$(UPLOAD_DIR) + ssh $(USERNAME)@docs.scipy.org tar xvC $(UPLOAD_DIR) \ -zf $(UPLOAD_DIR)/dist.tar.gz - ssh $(USERNAME)@new.scipy.org mv $(UPLOAD_DIR)/numpy-ref.pdf \ + ssh $(USERNAME)@docs.scipy.org mv $(UPLOAD_DIR)/numpy-ref.pdf \ $(UPLOAD_DIR)/numpy-ref-$(RELEASE).pdf - ssh $(USERNAME)@new.scipy.org mv $(UPLOAD_DIR)/numpy-user.pdf \ + ssh $(USERNAME)@docs.scipy.org mv $(UPLOAD_DIR)/numpy-user.pdf \ $(UPLOAD_DIR)/numpy-user-$(RELEASE).pdf - ssh $(USERNAME)@new.scipy.org mv $(UPLOAD_DIR)/numpy-html.zip \ + ssh $(USERNAME)@docs.scipy.org mv $(UPLOAD_DIR)/numpy-html.zip \ $(UPLOAD_DIR)/numpy-html-$(RELEASE).zip - ssh $(USERNAME)@new.scipy.org rm $(UPLOAD_DIR)/dist.tar.gz - ssh $(USERNAME)@new.scipy.org ln -snf numpy-$(RELEASE) /srv/docs_scipy_org/doc/numpy - ssh $(USERNAME)@new.scipy.org /srv/bin/fixperm-scipy_org.sh + ssh $(USERNAME)@docs.scipy.org rm $(UPLOAD_DIR)/dist.tar.gz + ssh $(USERNAME)@docs.scipy.org ln -snf numpy-$(RELEASE) /srv/docs_scipy_org/doc/numpy #------------------------------------------------------------------------------ # Basic Sphinx generation rules for different formats diff -Nru python-numpy-1.13.3/doc/release/1.11.0-notes.rst python-numpy-1.14.5/doc/release/1.11.0-notes.rst --- python-numpy-1.13.3/doc/release/1.11.0-notes.rst 2017-09-24 22:47:22.000000000 +0000 +++ python-numpy-1.14.5/doc/release/1.11.0-notes.rst 2018-06-12 18:28:52.000000000 +0000 @@ -180,7 +180,7 @@ corresponding estimator being used. * A benchmark suite using `Airspeed Velocity - `__ has been added, converting the + `__ has been added, converting the previous vbench-based one. You can run the suite locally via ``python runtests.py --bench``. For more details, see ``benchmarks/README.rst``. diff -Nru python-numpy-1.13.3/doc/release/1.12.0-notes.rst python-numpy-1.14.5/doc/release/1.12.0-notes.rst --- python-numpy-1.13.3/doc/release/1.12.0-notes.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/release/1.12.0-notes.rst 2018-06-12 18:28:52.000000000 +0000 @@ -493,574 +493,3 @@ for every offending user code line or user module instead of only once. On python versions before 3.4, this can cause warnings to appear that were falsely ignored before, which may be surprising especially in test suits. - - -Contributors -============ - -A total of 139 people contributed to this release. People with a "+" by their -names contributed a patch for the first time. - -* Aditya Panchal + -* Ales Erjavec + -* Alex Griffing -* Alexandr Shadchin + -* Alistair Muldal -* Allan Haldane -* Amit Aronovitch + -* Andrei Kucharavy + -* Antony Lee -* Antti Kaihola + -* Arne de Laat + -* Auke Wiggers + -* AustereCuriosity + -* Badhri Narayanan Krishnakumar + -* Ben North + -* Ben Rowland + -* Bertrand Lefebvre -* Boxiang Sun -* CJ Carey -* Charles Harris -* Christoph Gohlke -* Daniel Ching + -* Daniel Rasmussen + -* Daniel Smith + -* David Schaich + -* Denis Alevi + -* Devin Jeanpierre + -* Dmitry Odzerikho -* Dongjoon Hyun + -* Edward Richards + -* Ekaterina Tuzova + -* Emilien Kofman + -* Endolith -* Eren Sezener + -* Eric Moore -* Eric Quintero + -* Eric Wieser + -* Erik M. Bray -* Frederic Bastien -* Friedrich Dunne + -* Gerrit Holl -* Golnaz Irannejad + -* Graham Markall + -* Greg Knoll + -* Greg Young -* Gustavo Serra Scalet + -* Ines Wichert + -* Irvin Probst + -* Jaime Fernandez -* James Sanders + -* Jan David Mol + -* Jan Schlüter -* Jeremy Tuloup + -* John Kirkham -* John Zwinck + -* Jonathan Helmus -* Joseph Fox-Rabinovitz -* Josh Wilson + -* Joshua Warner + -* Julian Taylor -* Ka Wo Chen + -* Kamil Rytarowski + -* Kelsey Jordahl + -* Kevin Deldycke + -* Khaled Ben Abdallah Okuda + -* Lion Krischer + -* Loïc Estève + -* Luca Mussi + -* Mads Ohm Larsen + -* Manoj Kumar + -* Mario Emmenlauer + -* Marshall Bockrath-Vandegrift + -* Marshall Ward + -* Marten van Kerkwijk -* Mathieu Lamarre + -* Matthew Brett -* Matthew Harrigan + -* Matthias Geier -* Matti Picus + -* Meet Udeshi + -* Michael Felt + -* Michael Goerz + -* Michael Martin + -* Michael Seifert + -* Mike Nolta + -* Nathaniel Beaver + -* Nathaniel J. Smith -* Naveen Arunachalam + -* Nick Papior -* Nikola Forró + -* Oleksandr Pavlyk + -* Olivier Grisel -* Oren Amsalem + -* Pauli Virtanen -* Pavel Potocek + -* Pedro Lacerda + -* Peter Creasey + -* Phil Elson + -* Philip Gura + -* Phillip J. Wolfram + -* Pierre de Buyl + -* Raghav RV + -* Ralf Gommers -* Ray Donnelly + -* Rehas Sachdeva -* Rob Malouf + -* Robert Kern -* Samuel St-Jean -* Sanchez Gonzalez Alvaro + -* Saurabh Mehta + -* Scott Sanderson + -* Sebastian Berg -* Shayan Pooya + -* Shota Kawabuchi + -* Simon Conseil -* Simon Gibbons -* Sorin Sbarnea + -* Stefan van der Walt -* Stephan Hoyer -* Steven J Kern + -* Stuart Archibald -* Tadeu Manoel + -* Takuya Akiba + -* Thomas A Caswell -* Tom Bird + -* Tony Kelman + -* Toshihiro Kamishima + -* Valentin Valls + -* Varun Nayyar -* Victor Stinner + -* Warren Weckesser -* Wendell Smith -* Wojtek Ruszczewski + -* Xavier Abellan Ecija + -* Yaroslav Halchenko -* Yash Shah + -* Yinon Ehrlich + -* Yu Feng + -* nevimov + - -Pull requests merged -==================== - -A total of 418 pull requests were merged for this release. - -* `#4073 `__: BUG: change real output checking to test if all imaginary parts... -* `#4619 `__: BUG : np.sum silently drops keepdims for sub-classes of ndarray -* `#5488 `__: ENH: add `contract`: optimizing numpy's einsum expression -* `#5706 `__: ENH: make some masked array methods behave more like ndarray... -* `#5822 `__: Allow many distributions to have a scale of 0. -* `#6054 `__: WIP: MAINT: Add deprecation warning to views of multi-field indexes -* `#6298 `__: Check lower base limit in base_repr. -* `#6430 `__: Fix issues with zero-width string fields -* `#6656 `__: ENH: usecols now accepts an int when only one column has to be... -* `#6660 `__: Added pathlib support for several functions -* `#6872 `__: ENH: linear interpolation of complex values in lib.interp -* `#6997 `__: MAINT: Simplify mtrand.pyx helpers -* `#7003 `__: BUG: Fix string copying for np.place -* `#7026 `__: DOC: Clarify behavior in np.random.uniform -* `#7055 `__: BUG: One Element Array Inputs Return Scalars in np.random -* `#7063 `__: REL: Update master branch after 1.11.x branch has been made. -* `#7073 `__: DOC: Update the 1.11.0 release notes. -* `#7076 `__: MAINT: Update the git .mailmap file. -* `#7082 `__: TST, DOC: Added Broadcasting Tests in test_random.py -* `#7087 `__: BLD: fix compilation on non glibc-Linuxes -* `#7088 `__: BUG: Have `norm` cast non-floating point arrays to 64-bit float... -* `#7090 `__: ENH: Added 'doane' and 'sqrt' estimators to np.histogram in numpy.function_base -* `#7091 `__: Revert "BLD: fix compilation on non glibc-Linuxes" -* `#7092 `__: BLD: fix compilation on non glibc-Linuxes -* `#7099 `__: TST: Suppressed warnings -* `#7102 `__: MAINT: Removed conditionals that are always false in datetime_strings.c -* `#7105 `__: DEP: Deprecate as_strided returning a writable array as default -* `#7109 `__: DOC: update Python versions requirements in the install docs -* `#7114 `__: MAINT: Fix typos in docs -* `#7116 `__: TST: Fixed f2py test for win32 virtualenv -* `#7118 `__: TST: Fixed f2py test for non-versioned python executables -* `#7119 `__: BUG: Fixed mingw.lib error -* `#7125 `__: DOC: Updated documentation wording and examples for np.percentile. -* `#7129 `__: BUG: Fixed 'midpoint' interpolation of np.percentile in odd cases. -* `#7131 `__: Fix setuptools sdist -* `#7133 `__: ENH: savez: temporary file alongside with target file and improve... -* `#7134 `__: MAINT: Fix some typos in a code string and comments -* `#7141 `__: BUG: Unpickled void scalars should be contiguous -* `#7144 `__: MAINT: Change `call_fortran` into `callfortran` in comments. -* `#7145 `__: BUG: Fixed regressions in np.piecewise in ref to #5737 and #5729. -* `#7147 `__: Temporarily disable __numpy_ufunc__ -* `#7148 `__: ENH,TST: Bump stacklevel and add tests for warnings -* `#7149 `__: TST: Add missing suffix to temppath manager -* `#7152 `__: BUG: mode kwargs passed as unicode to np.pad raises an exception -* `#7156 `__: BUG: Reascertain that linspace respects ndarray subclasses in... -* `#7167 `__: DOC: Update Wikipedia references for mtrand.pyx -* `#7171 `__: TST: Fixed f2py test for Anaconda non-win32 -* `#7174 `__: DOC: Fix broken pandas link in release notes -* `#7177 `__: ENH: added axis param for np.count_nonzero -* `#7178 `__: BUG: Fix binary_repr for negative numbers -* `#7180 `__: BUG: Fixed previous attempt to fix dimension mismatch in nanpercentile -* `#7181 `__: DOC: Updated minor typos in function_base.py and test_function_base.py -* `#7191 `__: DOC: add vstack, hstack, dstack reference to stack documentation. -* `#7193 `__: MAINT: Removed supurious assert in histogram estimators -* `#7194 `__: BUG: Raise a quieter `MaskedArrayFutureWarning` for mask changes. -* `#7195 `__: STY: Drop some trailing spaces in `numpy.ma.core`. -* `#7196 `__: Revert "DOC: add vstack, hstack, dstack reference to stack documentation." -* `#7197 `__: TST: Pin virtualenv used on Travis CI. -* `#7198 `__: ENH: Unlock the GIL for gufuncs -* `#7199 `__: MAINT: Cleanup for histogram bin estimator selection -* `#7201 `__: Raise IOError on not a file in python2 -* `#7202 `__: MAINT: Made `iterable` return a boolean -* `#7209 `__: TST: Bump `virtualenv` to 14.0.6 -* `#7211 `__: DOC: Fix fmin examples -* `#7215 `__: MAINT: Use PySlice_GetIndicesEx instead of custom reimplementation -* `#7229 `__: ENH: implement __complex__ -* `#7231 `__: MRG: allow distributors to run custom init -* `#7232 `__: BLD: Switch order of test for lapack_mkl and openblas_lapack -* `#7239 `__: DOC: Removed residual merge markup from previous commit -* `#7240 `__: Change 'pubic' to 'public'. -* `#7241 `__: MAINT: update doc/sphinxext to numpydoc 0.6.0, and fix up some... -* `#7243 `__: ENH: Adding support to the range keyword for estimation of the... -* `#7246 `__: DOC: metion writeable keyword in as_strided in release notes -* `#7247 `__: TST: Fail quickly on AppVeyor for superseded PR builds -* `#7248 `__: DOC: remove link to documentation wiki editor from HOWTO_DOCUMENT. -* `#7250 `__: DOC,REL: Update 1.11.0 notes. -* `#7251 `__: BUG: only benchmark complex256 if it exists -* `#7252 `__: Forward port a fix and enhancement from 1.11.x -* `#7253 `__: DOC: note in h/v/dstack points users to stack/concatenate -* `#7254 `__: BUG: Enforce dtype for randint singletons -* `#7256 `__: MAINT: Use `is None` or `is not None` instead of `== None` or... -* `#7257 `__: DOC: Fix mismatched variable names in docstrings. -* `#7258 `__: ENH: Make numpy floor_divide and remainder agree with Python... -* `#7260 `__: BUG/TST: Fix #7259, do not "force scalar" for already scalar... -* `#7261 `__: Added self to mailmap -* `#7266 `__: BUG: Segfault for classes with deceptive __len__ -* `#7268 `__: ENH: add geomspace function -* `#7274 `__: BUG: Preserve array order in np.delete -* `#7275 `__: DEP: Warn about assigning 'data' attribute of ndarray -* `#7276 `__: DOC: apply_along_axis missing whitespace inserted (before colon) -* `#7278 `__: BUG: Make returned unravel_index arrays writeable -* `#7279 `__: TST: Fixed elements being shuffled -* `#7280 `__: MAINT: Remove redundant trailing semicolons. -* `#7285 `__: BUG: Make Randint Backwards Compatible with Pandas -* `#7286 `__: MAINT: Fix typos in docs/comments of `ma` and `polynomial` modules. -* `#7292 `__: Clarify error on repr failure in assert_equal. -* `#7294 `__: ENH: add support for BLIS to numpy.distutils -* `#7295 `__: DOC: understanding code and getting started section to dev doc -* `#7296 `__: Revert part of #3907 which incorrectly propogated MaskedArray... -* `#7299 `__: DOC: Fix mismatched variable names in docstrings. -* `#7300 `__: DOC: dev: stop recommending keeping local master updated with... -* `#7301 `__: DOC: Update release notes -* `#7305 `__: BUG: Remove data race in mtrand: two threads could mutate the... -* `#7307 `__: DOC: Missing some characters in link. -* `#7308 `__: BUG: Incrementing the wrong reference on return -* `#7310 `__: STY: Fix GitHub rendering of ordered lists >9 -* `#7311 `__: ENH: Make _pointer_type_cache functional -* `#7313 `__: DOC: corrected grammatical error in quickstart doc -* `#7325 `__: BUG, MAINT: Improve fromnumeric.py interface for downstream compatibility -* `#7328 `__: DEP: Deprecated using a float index in linspace -* `#7331 `__: Add comment, TST: fix MemoryError on win32 -* `#7332 `__: Check for no solution in np.irr Fixes #6744 -* `#7338 `__: TST: Install `pytz` in the CI. -* `#7340 `__: DOC: Fixed math rendering in tensordot docs. -* `#7341 `__: TST: Add test for #6469 -* `#7344 `__: DOC: Fix more typos in docs and comments. -* `#7346 `__: Generalized flip -* `#7347 `__: ENH Generalized rot90 -* `#7348 `__: Maint: Removed extra space from `ureduce` -* `#7349 `__: MAINT: Hide nan warnings for masked internal MA computations -* `#7350 `__: BUG: MA ufuncs should set mask to False, not array([False]) -* `#7351 `__: TST: Fix some MA tests to avoid looking at the .data attribute -* `#7358 `__: BUG: pull request related to the issue #7353 -* `#7359 `__: Update 7314, DOC: Clarify valid integer range for random.seed... -* `#7361 `__: MAINT: Fix copy and paste oversight. -* `#7363 `__: ENH: Make no unshare mask future warnings less noisy -* `#7366 `__: TST: fix #6542, add tests to check non-iterable argument raises... -* `#7373 `__: ENH: Add bitwise_and identity -* `#7378 `__: added NumPy logo and separator -* `#7382 `__: MAINT: cleanup np.average -* `#7385 `__: DOC: note about wheels / windows wheels for pypi -* `#7386 `__: Added label icon to Travis status -* `#7397 `__: BUG: incorrect type for objects whose __len__ fails -* `#7398 `__: DOC: fix typo -* `#7404 `__: Use PyMem_RawMalloc on Python 3.4 and newer -* `#7406 `__: ENH ufunc called on memmap return a ndarray -* `#7407 `__: BUG: Fix decref before incref for in-place accumulate -* `#7410 `__: DOC: add nanprod to the list of math routines -* `#7414 `__: Tweak corrcoef -* `#7415 `__: DOC: Documention fixes -* `#7416 `__: BUG: Incorrect handling of range in `histogram` with automatic... -* `#7418 `__: DOC: Minor typo fix, hermefik -> hermefit. -* `#7421 `__: ENH: adds np.nancumsum and np.nancumprod -* `#7423 `__: BUG: Ongoing fixes to PR#7416 -* `#7430 `__: DOC: Update 1.11.0-notes. -* `#7433 `__: MAINT: FutureWarning for changes to np.average subclass handling -* `#7437 `__: np.full now defaults to the filling value's dtype. -* `#7438 `__: Allow rolling multiple axes at the same time. -* `#7439 `__: BUG: Do not try sequence repeat unless necessary -* `#7442 `__: MANT: Simplify diagonal length calculation logic -* `#7445 `__: BUG: reference count leak in bincount, fixes #6805 -* `#7446 `__: DOC: ndarray typo fix -* `#7447 `__: BUG: scalar integer negative powers gave wrong results. -* `#7448 `__: DOC: array "See also" link to full and full_like instead of fill -* `#7456 `__: BUG: int overflow in reshape, fixes #7455, fixes #7293 -* `#7463 `__: BUG: fix array too big error for wide dtypes. -* `#7466 `__: BUG: segfault inplace object reduceat, fixes #7465 -* `#7468 `__: BUG: more on inplace reductions, fixes #615 -* `#7469 `__: MAINT: Update git .mailmap -* `#7472 `__: MAINT: Update .mailmap. -* `#7477 `__: MAINT: Yet more .mailmap updates for recent contributors. -* `#7481 `__: BUG: Fix segfault in PyArray_OrderConverter -* `#7482 `__: BUG: Memory Leak in _GenericBinaryOutFunction -* `#7489 `__: Faster real_if_close. -* `#7491 `__: DOC: Update subclassing doc regarding downstream compatibility -* `#7496 `__: BUG: don't use pow for integer power ufunc loops. -* `#7504 `__: DOC: remove "arr" from keepdims docstrings -* `#7505 `__: MAIN: fix to #7382, make scl in np.average writeable -* `#7507 `__: MAINT: Remove nose.SkipTest import. -* `#7508 `__: DOC: link frompyfunc and vectorize -* `#7511 `__: numpy.power(0, 0) should return 1 -* `#7515 `__: BUG: MaskedArray.count treats negative axes incorrectly -* `#7518 `__: BUG: Extend glibc complex trig functions blacklist to glibc <... -* `#7521 `__: DOC: rephrase writeup of memmap changes -* `#7522 `__: BUG: Fixed iteration over additional bad commands -* `#7526 `__: DOC: Removed an extra `:const:` -* `#7529 `__: BUG: Floating exception with invalid axis in np.lexsort -* `#7534 `__: MAINT: Update setup.py to reflect supported python versions. -* `#7536 `__: MAINT: Always use PyCapsule instead of PyCObject in mtrand.pyx -* `#7539 `__: MAINT: Cleanup of random stuff -* `#7549 `__: BUG: allow graceful recovery for no Liux compiler -* `#7562 `__: BUG: Fix test_from_object_array_unicode (test_defchararray.TestBasic)… -* `#7565 `__: BUG: Fix test_ctypeslib and test_indexing for debug interpreter -* `#7566 `__: MAINT: use manylinux1 wheel for cython -* `#7568 `__: Fix a false positive OverflowError in Python 3.x when value above... -* `#7579 `__: DOC: clarify purpose of Attributes section -* `#7584 `__: BUG: fixes #7572, percent in path -* `#7586 `__: Make np.ma.take works on scalars -* `#7587 `__: BUG: linalg.norm(): Don't convert object arrays to float -* `#7598 `__: Cast array size to int64 when loading from archive -* `#7602 `__: DOC: Remove isreal and iscomplex from ufunc list -* `#7605 `__: DOC: fix incorrect Gamma distribution parameterization comments -* `#7609 `__: BUG: Fix TypeError when raising TypeError -* `#7611 `__: ENH: expose test runner raise_warnings option -* `#7614 `__: BLD: Avoid using os.spawnve in favor of os.spawnv in exec_command -* `#7618 `__: BUG: distance arg of np.gradient must be scalar, fix docstring -* `#7626 `__: DOC: RST definition list fixes -* `#7627 `__: MAINT: unify tup processing, move tup use to after all PyTuple_SetItem... -* `#7630 `__: MAINT: add ifdef around PyDictProxy_Check macro -* `#7631 `__: MAINT: linalg: fix comment, simplify math -* `#7634 `__: BLD: correct C compiler customization in system_info.py Closes... -* `#7635 `__: BUG: ma.median alternate fix for #7592 -* `#7636 `__: MAINT: clean up testing.assert_raises_regexp, 2.6-specific code... -* `#7637 `__: MAINT: clearer exception message when importing multiarray fails. -* `#7639 `__: TST: fix a set of test errors in master. -* `#7643 `__: DOC : minor changes to linspace docstring -* `#7651 `__: BUG: one to any power is still 1. Broken edgecase for int arrays -* `#7655 `__: BLD: Remove Intel compiler flag -xSSE4.2 -* `#7658 `__: BUG: fix incorrect printing of 1D masked arrays -* `#7659 `__: BUG: Temporary fix for str(mvoid) for object field types -* `#7664 `__: BUG: Fix unicode with byte swap transfer and copyswap -* `#7667 `__: Restore histogram consistency -* `#7668 `__: ENH: Do not check the type of module.__dict__ explicit in test. -* `#7669 `__: BUG: boolean assignment no GIL release when transfer needs API -* `#7673 `__: DOC: Create Numpy 1.11.1 release notes. -* `#7675 `__: BUG: fix handling of right edge of final bin. -* `#7678 `__: BUG: Fix np.clip bug NaN handling for Visual Studio 2015 -* `#7679 `__: MAINT: Fix up C++ comment in arraytypes.c.src. -* `#7681 `__: DOC: Update 1.11.1 release notes. -* `#7686 `__: ENH: Changing FFT cache to a bounded LRU cache -* `#7688 `__: DOC: fix broken genfromtxt examples in user guide. Closes gh-7662. -* `#7689 `__: BENCH: add correlate/convolve benchmarks. -* `#7696 `__: DOC: update wheel build / upload instructions -* `#7699 `__: BLD: preserve library order -* `#7704 `__: ENH: Add bits attribute to np.finfo -* `#7712 `__: BUG: Fix race condition with new FFT cache -* `#7715 `__: BUG: Remove memory leak in np.place -* `#7719 `__: BUG: Fix segfault in np.random.shuffle for arrays of different... -* `#7723 `__: Change mkl_info.dir_env_var from MKL to MKLROOT -* `#7727 `__: DOC: Corrections in Datetime Units-arrays.datetime.rst -* `#7729 `__: DOC: fix typo in savetxt docstring (closes #7620) -* `#7733 `__: Update 7525, DOC: Fix order='A' docs of np.array. -* `#7734 `__: Update 7542, ENH: Add `polyrootval` to numpy.polynomial -* `#7735 `__: BUG: fix issue on OS X with Python 3.x where npymath.ini was... -* `#7739 `__: DOC: Mention the changes of #6430 in the release notes. -* `#7740 `__: DOC: add reference to poisson rng -* `#7743 `__: Update 7476, DEP: deprecate Numeric-style typecodes, closes #2148 -* `#7744 `__: DOC: Remove "ones_like" from ufuncs list (it is not) -* `#7746 `__: DOC: Clarify the effect of rcond in numpy.linalg.lstsq. -* `#7747 `__: Update 7672, BUG: Make sure we don't divide by zero -* `#7748 `__: DOC: Update float32 mean example in docstring -* `#7754 `__: Update 7612, ENH: Add broadcast.ndim to match code elsewhere. -* `#7757 `__: Update 7175, BUG: Invalid read of size 4 in PyArray_FromFile -* `#7759 `__: BUG: Fix numpy.i support for numpy API < 1.7. -* `#7760 `__: ENH: Make assert_almost_equal & assert_array_almost_equal consistent. -* `#7766 `__: fix an English typo -* `#7771 `__: DOC: link geomspace from logspace -* `#7773 `__: DOC: Remove a redundant the -* `#7777 `__: DOC: Update Numpy 1.11.1 release notes. -* `#7785 `__: DOC: update wheel building procedure for release -* `#7789 `__: MRG: add note of 64-bit wheels on Windows -* `#7791 `__: f2py.compile issues (#7683) -* `#7799 `__: "lambda" is not allowed to use as keyword arguments in a sample... -* `#7803 `__: BUG: interpret 'c' PEP3118/struct type as 'S1'. -* `#7807 `__: DOC: Misplaced parens in formula -* `#7817 `__: BUG: Make sure npy_mul_with_overflow_ detects overflow. -* `#7818 `__: numpy/distutils/misc_util.py fix for #7809: check that _tmpdirs... -* `#7820 `__: MAINT: Allocate fewer bytes for empty arrays. -* `#7823 `__: BUG: Fixed masked array behavior for scalar inputs to np.ma.atleast_*d -* `#7834 `__: DOC: Added an example -* `#7839 `__: Pypy fixes -* `#7840 `__: Fix ATLAS version detection -* `#7842 `__: Fix versionadded tags -* `#7848 `__: MAINT: Fix remaining uses of deprecated Python imp module. -* `#7853 `__: BUG: Make sure numpy globals keep identity after reload. -* `#7863 `__: ENH: turn quicksort into introsort -* `#7866 `__: Document runtests extra argv -* `#7871 `__: BUG: handle introsort depth limit properly -* `#7879 `__: DOC: fix typo in documentation of loadtxt (closes #7878) -* `#7885 `__: Handle NetBSD specific -* `#7889 `__: DOC: #7881. Fix link to record arrays -* `#7894 `__: fixup-7790, BUG: construct ma.array from np.array which contains... -* `#7898 `__: Spelling and grammar fix. -* `#7903 `__: BUG: fix float16 type not being called due to wrong ordering -* `#7908 `__: BLD: Fixed detection for recent MKL versions -* `#7911 `__: BUG: fix for issue#7835 (ma.median of 1d) -* `#7912 `__: ENH: skip or avoid gc/objectmodel differences btwn pypy and cpython -* `#7918 `__: ENH: allow numpy.apply_along_axis() to work with ndarray subclasses -* `#7922 `__: ENH: Add ma.convolve and ma.correlate for #6458 -* `#7925 `__: Monkey-patch _msvccompile.gen_lib_option like any other compilators -* `#7931 `__: BUG: Check for HAVE_LDOUBLE_DOUBLE_DOUBLE_LE in npy_math_complex. -* `#7936 `__: ENH: improve duck typing inside iscomplexobj -* `#7937 `__: BUG: Guard against buggy comparisons in generic quicksort. -* `#7938 `__: DOC: add cbrt to math summary page -* `#7941 `__: BUG: Make sure numpy globals keep identity after reload. -* `#7943 `__: DOC: #7927. Remove deprecated note for memmap relevant for Python... -* `#7952 `__: BUG: Use keyword arguments to initialize Extension base class. -* `#7956 `__: BLD: remove __NUMPY_SETUP__ from builtins at end of setup.py -* `#7963 `__: BUG: MSVCCompiler grows 'lib' & 'include' env strings exponentially. -* `#7965 `__: BUG: cannot modify tuple after use -* `#7976 `__: DOC: Fixed documented dimension of return value -* `#7977 `__: DOC: Create 1.11.2 release notes. -* `#7979 `__: DOC: Corrected allowed keywords in ``add_installed_library`` -* `#7980 `__: ENH: Add ability to runtime select ufunc loops, add AVX2 integer... -* `#7985 `__: Rebase 7763, ENH: Add new warning suppression/filtering context -* `#7987 `__: DOC: See also np.load and np.memmap in np.lib.format.open_memmap -* `#7988 `__: DOC: Include docstring for cbrt, spacing and fabs in documentation -* `#7999 `__: ENH: add inplace cases to fast ufunc loop macros -* `#8006 `__: DOC: Update 1.11.2 release notes. -* `#8008 `__: MAINT: Remove leftover imp module imports. -* `#8009 `__: DOC: Fixed three typos in the c-info.ufunc-tutorial -* `#8011 `__: DOC: Update 1.11.2 release notes. -* `#8014 `__: BUG: Fix fid.close() to use os.close(fid) -* `#8016 `__: BUG: Fix numpy.ma.median. -* `#8018 `__: BUG: Fixes return for np.ma.count if keepdims is True and axis... -* `#8021 `__: DOC: change all non-code instances of Numpy to NumPy -* `#8027 `__: ENH: Add platform indepedent lib dir to PYTHONPATH -* `#8028 `__: DOC: Update 1.11.2 release notes. -* `#8030 `__: BUG: fix np.ma.median with only one non-masked value and an axis... -* `#8038 `__: MAINT: Update error message in rollaxis. -* `#8040 `__: Update add_newdocs.py -* `#8042 `__: BUG: core: fix bug in NpyIter buffering with discontinuous arrays -* `#8045 `__: DOC: Update 1.11.2 release notes. -* `#8050 `__: remove refcount semantics, now a.resize() almost always requires... -* `#8051 `__: Clear signaling NaN exceptions -* `#8054 `__: ENH: add signature argument to vectorize for vectorizing like... -* `#8057 `__: BUG: lib: Simplify (and fix) pad's handling of the pad_width -* `#8061 `__: BUG : financial.pmt modifies input (issue #8055) -* `#8064 `__: MAINT: Add PMIP files to .gitignore -* `#8065 `__: BUG: Assert fromfile ending earlier in pyx_processing -* `#8066 `__: BUG, TST: Fix python3-dbg bug in Travis script -* `#8071 `__: MAINT: Add Tempita to randint helpers -* `#8075 `__: DOC: Fix description of isinf in nan_to_num -* `#8080 `__: BUG: non-integers can end up in dtype offsets -* `#8081 `__: Update outdated Nose URL to nose.readthedocs.io -* `#8083 `__: ENH: Deprecation warnings for `/` integer division when running... -* `#8084 `__: DOC: Fix erroneous return type description for np.roots. -* `#8087 `__: BUG: financial.pmt modifies input #8055 -* `#8088 `__: MAINT: Remove duplicate randint helpers code. -* `#8093 `__: MAINT: fix assert_raises_regex when used as a context manager -* `#8096 `__: ENH: Vendorize tempita. -* `#8098 `__: DOC: Enhance description/usage for np.linalg.eig*h -* `#8103 `__: Pypy fixes -* `#8104 `__: Fix test code on cpuinfo's main function -* `#8107 `__: BUG: Fix array printing with precision=0. -* `#8109 `__: Fix bug in ravel_multi_index for big indices (Issue #7546) -* `#8110 `__: BUG: distutils: fix issue with rpath in fcompiler/gnu.py -* `#8111 `__: ENH: Add a tool for release authors and PRs. -* `#8112 `__: DOC: Fix "See also" links in linalg. -* `#8114 `__: BUG: core: add missing error check after PyLong_AsSsize_t -* `#8121 `__: DOC: Improve histogram2d() example. -* `#8122 `__: BUG: Fix broken pickle in MaskedArray when dtype is object (Return... -* `#8124 `__: BUG: Fixed build break -* `#8125 `__: Rebase, BUG: Fixed deepcopy of F-order object arrays. -* `#8127 `__: BUG: integers to a negative integer powers should error. -* `#8141 `__: improve configure checks for broken systems -* `#8142 `__: BUG: np.ma.mean and var should return scalar if no mask -* `#8148 `__: BUG: import full module path in npy_load_module -* `#8153 `__: MAINT: Expose void-scalar "base" attribute in python -* `#8156 `__: DOC: added example with empty indices for a scalar, #8138 -* `#8160 `__: BUG: fix _array2string for structured array (issue #5692) -* `#8164 `__: MAINT: Update mailmap for NumPy 1.12.0 -* `#8165 `__: Fixup 8152, BUG: assert_allclose(..., equal_nan=False) doesn't... -* `#8167 `__: Fixup 8146, DOC: Clarify when PyArray_{Max, Min, Ptp} return... -* `#8168 `__: DOC: Minor spelling fix in genfromtxt() docstring. -* `#8173 `__: BLD: Enable build on AIX -* `#8174 `__: DOC: warn that dtype.descr is only for use in PEP3118 -* `#8177 `__: MAINT: Add python 3.6 support to suppress_warnings -* `#8178 `__: MAINT: Fix ResourceWarning new in Python 3.6. -* `#8180 `__: FIX: protect stolen ref by PyArray_NewFromDescr in array_empty -* `#8181 `__: ENH: Improve announce to find github squash-merge commits. -* `#8182 `__: MAINT: Update .mailmap -* `#8183 `__: MAINT: Ediff1d performance -* `#8184 `__: MAINT: make `assert_allclose` behavior on nans match pre 1.12 -* `#8188 `__: DOC: 'highest' is exclusive for randint() -* `#8189 `__: BUG: setfield should raise if arr is not writeable -* `#8190 `__: ENH: Add a float_power function with at least float64 precision. -* `#8197 `__: DOC: Add missing arguments to np.ufunc.outer -* `#8198 `__: DEP: Deprecate the keepdims argument to accumulate -* `#8199 `__: MAINT: change path to env in distutils.system_info. Closes gh-8195. -* `#8200 `__: BUG: Fix structured array format functions -* `#8202 `__: ENH: specialize name of dev package by interpreter -* `#8205 `__: DOC: change development instructions from SSH to HTTPS access. -* `#8216 `__: DOC: Patch doc errors for atleast_nd and frombuffer -* `#8218 `__: BUG: ediff1d should return subclasses -* `#8219 `__: DOC: Turn SciPy references into links. -* `#8222 `__: ENH: Make numpy.mean() do more precise computation -* `#8227 `__: BUG: Better check for invalid bounds in np.random.uniform. -* `#8231 `__: ENH: Refactor numpy ** operators for numpy scalar integer powers -* `#8234 `__: DOC: Clarified when a copy is made in numpy.asarray -* `#8236 `__: DOC: Fix documentation pull requests. -* `#8238 `__: MAINT: Update pavement.py -* `#8239 `__: ENH: Improve announce tool. -* `#8240 `__: REL: Prepare for 1.12.x branch -* `#8243 `__: BUG: Update operator `**` tests for new behavior. -* `#8246 `__: REL: Reset strides for RELAXED_STRIDE_CHECKING for 1.12 releases. -* `#8265 `__: BUG: np.piecewise not working for scalars -* `#8272 `__: TST: Path test should resolve symlinks when comparing -* `#8282 `__: DOC: Update 1.12.0 release notes. -* `#8286 `__: BUG: Fix pavement.py write_release_task. -* `#8296 `__: BUG: Fix iteration over reversed subspaces in mapiter_@name@. -* `#8304 `__: BUG: Fix PyPy crash in PyUFunc_GenericReduction. -* `#8319 `__: BLD: blacklist powl (longdouble power function) on OS X. -* `#8320 `__: BUG: do not link to Accelerate if OpenBLAS, MKL or BLIS are found. -* `#8322 `__: BUG: fixed kind specifications for parameters -* `#8336 `__: BUG: fix packbits and unpackbits to correctly handle empty arrays -* `#8338 `__: BUG: fix test_api test that fails intermittently in python 3 -* `#8339 `__: BUG: Fix ndarray.tofile large file corruption in append mode. -* `#8359 `__: BUG: Fix suppress_warnings (again) for Python 3.6. -* `#8372 `__: BUG: Fixes for ma.median and nanpercentile. -* `#8373 `__: BUG: correct letter case -* `#8379 `__: DOC: Update 1.12.0-notes.rst. -* `#8390 `__: ENH: retune apply_along_axis nanmedian cutoff in 1.12 -* `#8391 `__: DEP: Fix escaped string characters deprecated in Python 3.6. -* `#8394 `__: DOC: create 1.11.3 release notes. -* `#8399 `__: BUG: Fix author search in announce.py -* `#8402 `__: DOC, MAINT: Update 1.12.0 notes and mailmap. -* `#8418 `__: BUG: Fix ma.median even elements for 1.12 -* `#8424 `__: DOC: Fix tools and release notes to be more markdown compatible. -* `#8427 `__: BUG: Add a lock to assert_equal and other testing functions -* `#8431 `__: BUG: Fix apply_along_axis() for when func1d() returns a non-ndarray. -* `#8432 `__: BUG: Let linspace accept input that has an array_interface. -* `#8437 `__: TST: Update 3.6-dev tests to 3.6 after Python final release. -* `#8439 `__: DOC: Update 1.12.0 release notes. -* `#8466 `__: MAINT: Update mailmap entries. -* `#8467 `__: DOC: Back-port the missing part of gh-8464. -* `#8476 `__: DOC: Update 1.12.0 release notes. -* `#8477 `__: DOC: Update 1.12.0 release notes. diff -Nru python-numpy-1.13.3/doc/release/1.12.1-notes.rst python-numpy-1.14.5/doc/release/1.12.1-notes.rst --- python-numpy-1.13.3/doc/release/1.12.1-notes.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/release/1.12.1-notes.rst 2018-06-12 17:31:56.000000000 +0000 @@ -6,39 +6,21 @@ found in NumPy 1.12.0. In particular, the regression in f2py constant parsing is fixed. Wheels for Linux, Windows, and OSX can be found on pypi, +Bugs Fixed +========== -Contributors -============ - -A total of 10 people contributed to this release. People with a "+" by their -names contributed a patch for the first time. - -* Charles Harris -* Eric Wieser -* Greg Young -* Joerg Behrmann + -* John Kirkham -* Julian Taylor -* Marten van Kerkwijk -* Matthew Brett -* Shota Kawabuchi -* Jean Utke + - -Fixes Backported -================ - -* `#8483 `__: BUG: Fix wrong future nat warning and equiv type logic error... -* `#8489 `__: BUG: Fix wrong masked median for some special cases -* `#8490 `__: DOC: Place np.average in inline code -* `#8491 `__: TST: Work around isfinite inconsistency on i386 -* `#8494 `__: BUG: Guard against replacing constants without '_' spec in f2py. -* `#8524 `__: BUG: Fix mean for float 16 non-array inputs for 1.12 -* `#8571 `__: BUG: Fix calling python api with error set and minor leaks for... -* `#8602 `__: BUG: Make iscomplexobj compatible with custom dtypes again -* `#8618 `__: BUG: Fix undefined behaviour induced by bad __array_wrap__ -* `#8648 `__: BUG: Fix MaskedArray.__setitem__ -* `#8659 `__: BUG: PPC64el machines are POWER for Fortran in f2py -* `#8665 `__: BUG: Look up methods on MaskedArray in `_frommethod` -* `#8674 `__: BUG: Remove extra digit in binary_repr at limit -* `#8704 `__: BUG: Fix deepcopy regression for empty arrays. -* `#8707 `__: BUG: Fix ma.median for empty ndarrays +* BUG: Fix wrong future nat warning and equiv type logic error... +* BUG: Fix wrong masked median for some special cases +* DOC: Place np.average in inline code +* TST: Work around isfinite inconsistency on i386 +* BUG: Guard against replacing constants without '_' spec in f2py. +* BUG: Fix mean for float 16 non-array inputs for 1.12 +* BUG: Fix calling python api with error set and minor leaks for... +* BUG: Make iscomplexobj compatible with custom dtypes again +* BUG: Fix undefined behaviour induced by bad __array_wrap__ +* BUG: Fix MaskedArray.__setitem__ +* BUG: PPC64el machines are POWER for Fortran in f2py +* BUG: Look up methods on MaskedArray in `_frommethod` +* BUG: Remove extra digit in binary_repr at limit +* BUG: Fix deepcopy regression for empty arrays. +* BUG: Fix ma.median for empty ndarrays diff -Nru python-numpy-1.13.3/doc/release/1.14.0-notes.rst python-numpy-1.14.5/doc/release/1.14.0-notes.rst --- python-numpy-1.13.3/doc/release/1.14.0-notes.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/doc/release/1.14.0-notes.rst 2018-06-12 18:28:52.000000000 +0000 @@ -0,0 +1,669 @@ +========================== +NumPy 1.14.0 Release Notes +========================== + +Numpy 1.14.0 is the result of seven months of work and contains a large number +of bug fixes and new features, along with several changes with potential +compatibility issues. The major change that users will notice are the +stylistic changes in the way numpy arrays and scalars are printed, a change +that will affect doctests. See below for details on how to preserve the +old style printing when needed. + +A major decision affecting future development concerns the schedule for +dropping Python 2.7 support in the runup to 2020. The decision has been made to +support 2.7 for all releases made in 2018, with the last release being +designated a long term release with support for bug fixes extending through +2019. In 2019 support for 2.7 will be dropped in all new releases. More details +can be found in the relevant NEP_. + +This release supports Python 2.7 and 3.4 - 3.6. + +.. _NEP: https://github.com/numpy/numpy/blob/master/doc/neps/dropping-python2.7-proposal.rst + + +Highlights +========== + +* The `np.einsum` function uses BLAS when possible + +* ``genfromtxt``, ``loadtxt``, ``fromregex`` and ``savetxt`` can now handle + files with arbitrary Python supported encoding. + +* Major improvements to printing of NumPy arrays and scalars. + + +New functions +============= + +* ``parametrize``: decorator added to numpy.testing + +* ``chebinterpolate``: Interpolate function at Chebyshev points. + +* ``format_float_positional`` and ``format_float_scientific`` : format + floating-point scalars unambiguously with control of rounding and padding. + +* ``PyArray_ResolveWritebackIfCopy`` and ``PyArray_SetWritebackIfCopyBase``, + new C-API functions useful in achieving PyPy compatibity. + + +Deprecations +============ + +* Using ``np.bool_`` objects in place of integers is deprecated. Previously + ``operator.index(np.bool_)`` was legal and allowed constructs such as + ``[1, 2, 3][np.True_]``. That was misleading, as it behaved differently from + ``np.array([1, 2, 3])[np.True_]``. + +* Truth testing of an empty array is deprecated. To check if an array is not + empty, use ``array.size > 0``. + +* Calling ``np.bincount`` with ``minlength=None`` is deprecated. + ``minlength=0`` should be used instead. + +* Calling ``np.fromstring`` with the default value of the ``sep`` argument is + deprecated. When that argument is not provided, a broken version of + ``np.frombuffer`` is used that silently accepts unicode strings and -- after + encoding them as either utf-8 (python 3) or the default encoding + (python 2) -- treats them as binary data. If reading binary data is + desired, ``np.frombuffer`` should be used directly. + +* The ``style`` option of array2string is deprecated in non-legacy printing mode. + +* ``PyArray_SetUpdateIfCopyBase`` has been deprecated. For NumPy versions >= 1.14 + use ``PyArray_SetWritebackIfCopyBase`` instead, see `C API changes` below for + more details. + + + +* The use of ``UPDATEIFCOPY`` arrays is deprecated, see `C API changes` below + for details. We will not be dropping support for those arrays, but they are + not compatible with PyPy. + + +Future Changes +============== + +* ``np.issubdtype`` will stop downcasting dtype-like arguments. + It might be expected that ``issubdtype(np.float32, 'float64')`` and + ``issubdtype(np.float32, np.float64)`` mean the same thing - however, there + was an undocumented special case that translated the former into + ``issubdtype(np.float32, np.floating)``, giving the surprising result of True. + + This translation now gives a warning that explains what translation is + occurring. In the future, the translation will be disabled, and the first + example will be made equivalent to the second. + +* ``np.linalg.lstsq`` default for ``rcond`` will be changed. The ``rcond`` + parameter to ``np.linalg.lstsq`` will change its default to machine precision + times the largest of the input array dimensions. A FutureWarning is issued + when ``rcond`` is not passed explicitly. + +* ``a.flat.__array__()`` will return a writeable copy of ``a`` when ``a`` is + non-contiguous. Previously it returned an UPDATEIFCOPY array when ``a`` was + writeable. Currently it returns a non-writeable copy. See gh-7054 for a + discussion of the issue. + +* Unstructured void array's ``.item`` method will return a bytes object. In the + future, calling ``.item()`` on arrays or scalars of ``np.void`` datatype will + return a ``bytes`` object instead of a buffer or int array, the same as + returned by ``bytes(void_scalar)``. This may affect code which assumed the + return value was mutable, which will no longer be the case. A + ``FutureWarning`` is now issued when this would occur. + + +Compatibility notes +=================== + +The mask of a masked array view is also a view rather than a copy +----------------------------------------------------------------- +There was a FutureWarning about this change in NumPy 1.11.x. In short, it is +now the case that, when changing a view of a masked array, changes to the mask +are propagated to the original. That was not previously the case. This change +affects slices in particular. Note that this does not yet work properly if the +mask of the original array is ``nomask`` and the mask of the view is changed. +See gh-5580 for an extended discussion. The original behavior of having a copy +of the mask can be obtained by calling the ``unshare_mask`` method of the view. + +``np.ma.masked`` is no longer writeable +--------------------------------------- +Attempts to mutate the ``masked`` constant now error, as the underlying arrays +are marked readonly. In the past, it was possible to get away with:: + + # emulating a function that sometimes returns np.ma.masked + val = random.choice([np.ma.masked, 10]) + var_arr = np.asarray(val) + val_arr += 1 # now errors, previously changed np.ma.masked.data + +``np.ma`` functions producing ``fill_value``s have changed +---------------------------------------------------------- +Previously, ``np.ma.default_fill_value`` would return a 0d array, but +``np.ma.minimum_fill_value`` and ``np.ma.maximum_fill_value`` would return a +tuple of the fields. Instead, all three methods return a structured ``np.void`` +object, which is what you would already find in the ``.fill_value`` attribute. + +Additionally, the dtype guessing now matches that of ``np.array`` - so when +passing a python scalar ``x``, ``maximum_fill_value(x)`` is always the same as +``maximum_fill_value(np.array(x))``. Previously ``x = long(1)`` on Python 2 +violated this assumption. + +``a.flat.__array__()`` returns non-writeable arrays when ``a`` is non-contiguous +-------------------------------------------------------------------------------- +The intent is that the UPDATEIFCOPY array previously returned when ``a`` was +non-contiguous will be replaced by a writeable copy in the future. This +temporary measure is aimed to notify folks who expect the underlying array be +modified in this situation that that will no longer be the case. The most +likely places for this to be noticed is when expressions of the form +``np.asarray(a.flat)`` are used, or when ``a.flat`` is passed as the out +parameter to a ufunc. + +``np.tensordot`` now returns zero array when contracting over 0-length dimension +-------------------------------------------------------------------------------- +Previously ``np.tensordot`` raised a ValueError when contracting over 0-length +dimension. Now it returns a zero array, which is consistent with the behaviour +of ``np.dot`` and ``np.einsum``. + +``numpy.testing`` reorganized +----------------------------- +This is not expected to cause problems, but possibly something has been left +out. If you experience an unexpected import problem using ``numpy.testing`` +let us know. + +``np.asfarray`` no longer accepts non-dtypes through the ``dtype`` argument +--------------------------------------------------------------------------- +This previously would accept ``dtype=some_array``, with the implied semantics +of ``dtype=some_array.dtype``. This was undocumented, unique across the numpy +functions, and if used would likely correspond to a typo. + +1D ``np.linalg.norm`` preserves float input types, even for arbitrary orders +---------------------------------------------------------------------------- +Previously, this would promote to ``float64`` when arbitrary orders were +passed, despite not doing so under the simple cases:: + + >>> f32 = np.float32([[1, 2]]) + >>> np.linalg.norm(f32, 2.0, axis=-1).dtype + dtype('float32') + >>> np.linalg.norm(f32, 2.0001, axis=-1).dtype + dtype('float64') # numpy 1.13 + dtype('float32') # numpy 1.14 + +This change affects only ``float32`` and ``float16`` arrays. + +``count_nonzero(arr, axis=())`` now counts over no axes, not all axes +--------------------------------------------------------------------- +Elsewhere, ``axis==()`` is always understood as "no axes", but +`count_nonzero` had a special case to treat this as "all axes". This was +inconsistent and surprising. The correct way to count over all axes has always +been to pass ``axis == None``. + +``__init__.py`` files added to test directories +----------------------------------------------- +This is for pytest compatibility in the case of duplicate test file names in +the different directories. As a result, ``run_module_suite`` no longer works, +i.e., ``python `` results in an error. + +``.astype(bool)`` on unstructured void arrays now calls ``bool`` on each element +-------------------------------------------------------------------------------- +On Python 2, ``void_array.astype(bool)`` would always return an array of +``True``, unless the dtype is ``V0``. On Python 3, this operation would usually +crash. Going forwards, `astype` matches the behavior of ``bool(np.void)``, +considering a buffer of all zeros as false, and anything else as true. +Checks for ``V0`` can still be done with ``arr.dtype.itemsize == 0``. + +``MaskedArray.squeeze`` never returns ``np.ma.masked`` +------------------------------------------------------ +``np.squeeze`` is documented as returning a view, but the masked variant would +sometimes return ``masked``, which is not a view. This has been fixed, so that +the result is always a view on the original masked array. +This breaks any code that used ``masked_arr.squeeze() is np.ma.masked``, but +fixes code that writes to the result of `.squeeze()`. + +Renamed first parameter of ``can_cast`` from ``from`` to ``from_`` +------------------------------------------------------------------ +The previous parameter name ``from`` is a reserved keyword in Python, which made +it difficult to pass the argument by name. This has been fixed by renaming +the parameter to ``from_``. + +``isnat`` raises ``TypeError`` when passed wrong type +------------------------------------------------------ +The ufunc ``isnat`` used to raise a ``ValueError`` when it was not passed +variables of type ``datetime`` or ``timedelta``. This has been changed to +raising a ``TypeError``. + +``dtype.__getitem__`` raises ``TypeError`` when passed wrong type +----------------------------------------------------------------- +When indexed with a float, the dtype object used to raise ``ValueError``. + +User-defined types now need to implement ``__str__`` and ``__repr__`` +--------------------------------------------------------------------- +Previously, user-defined types could fall back to a default implementation of +``__str__`` and ``__repr__`` implemented in numpy, but this has now been +removed. Now user-defined types will fall back to the python default +``object.__str__`` and ``object.__repr__``. + +Many changes to array printing, disableable with the new "legacy" printing mode +------------------------------------------------------------------------------- +The ``str`` and ``repr`` of ndarrays and numpy scalars have been changed in +a variety of ways. These changes are likely to break downstream user's +doctests. + +These new behaviors can be disabled to mostly reproduce numpy 1.13 behavior by +enabling the new 1.13 "legacy" printing mode. This is enabled by calling +``np.set_printoptions(legacy="1.13")``, or using the new ``legacy`` argument to +``np.array2string``, as ``np.array2string(arr, legacy='1.13')``. + +In summary, the major changes are: + +* For floating-point types: + + * The ``repr`` of float arrays often omits a space previously printed + in the sign position. See the new ``sign`` option to ``np.set_printoptions``. + * Floating-point arrays and scalars use a new algorithm for decimal + representations, giving the shortest unique representation. This will + usually shorten ``float16`` fractional output, and sometimes ``float32`` and + ``float128`` output. ``float64`` should be unaffected. See the new + ``floatmode`` option to ``np.set_printoptions``. + * Float arrays printed in scientific notation no longer use fixed-precision, + and now instead show the shortest unique representation. + * The ``str`` of floating-point scalars is no longer truncated in python2. + +* For other data types: + + * Non-finite complex scalars print like ``nanj`` instead of ``nan*j``. + * ``NaT`` values in datetime arrays are now properly aligned. + * Arrays and scalars of ``np.void`` datatype are now printed using hex + notation. + +* For line-wrapping: + + * The "dtype" part of ndarray reprs will now be printed on the next line + if there isn't space on the last line of array output. + * The ``linewidth`` format option is now always respected. + The `repr` or `str` of an array will never exceed this, unless a single + element is too wide. + * The last line of an array string will never have more elements than earlier + lines. + * An extra space is no longer inserted on the first line if the elements are + too wide. + +* For summarization (the use of ``...`` to shorten long arrays): + + * A trailing comma is no longer inserted for ``str``. + Previously, ``str(np.arange(1001))`` gave + ``'[ 0 1 2 ..., 998 999 1000]'``, which has an extra comma. + * For arrays of 2-D and beyond, when ``...`` is printed on its own line in + order to summarize any but the last axis, newlines are now appended to that + line to match its leading newlines and a trailing space character is + removed. + +* ``MaskedArray`` arrays now separate printed elements with commas, always + print the dtype, and correctly wrap the elements of long arrays to multiple + lines. If there is more than 1 dimension, the array attributes are now + printed in a new "left-justified" printing style. +* ``recarray`` arrays no longer print a trailing space before their dtype, and + wrap to the right number of columns. +* 0d arrays no longer have their own idiosyncratic implementations of ``str`` + and ``repr``. The ``style`` argument to ``np.array2string`` is deprecated. +* Arrays of ``bool`` datatype will omit the datatype in the ``repr``. +* User-defined ``dtypes`` (subclasses of ``np.generic``) now need to + implement ``__str__`` and ``__repr__``. + +Some of these changes are described in more detail below. If you need to retain +the previous behavior for doctests or other reasons, you may want to do +something like:: + + # FIXME: We need the str/repr formatting used in Numpy < 1.14. + try: + np.set_printoptions(legacy='1.13') + except TypeError: + pass +You may want to do something like:: + + # FIXME: Set numpy array str/repr to legacy behaviour on numpy > 1.13 + try: + np.set_printoptions(legacy='1.13') + except TypeError: + pass + +after :: + + import numpy as np + +Some of these changes are described in more detail below. + + +C API changes +============= + +PyPy compatible alternative to ``UPDATEIFCOPY`` arrays +------------------------------------------------------ +``UPDATEIFCOPY`` arrays are contiguous copies of existing arrays, possibly with +different dimensions, whose contents are copied back to the original array when +their refcount goes to zero and they are deallocated. Because PyPy does not use +refcounts, they do not function correctly with PyPy. NumPy is in the process of +eliminating their use internally and two new C-API functions, + +* ``PyArray_SetWritebackIfCopyBase`` +* ``PyArray_ResolveWritebackIfCopy``, + +have been added together with a complimentary flag, +``NPY_ARRAY_WRITEBACKIFCOPY``. Using the new functionality also requires that +some flags be changed when new arrays are created, to wit: +``NPY_ARRAY_INOUT_ARRAY`` should be replaced by ``NPY_ARRAY_INOUT_ARRAY2`` and +``NPY_ARRAY_INOUT_FARRAY`` should be replaced by ``NPY_ARRAY_INOUT_FARRAY2``. +Arrays created with these new flags will then have the ``WRITEBACKIFCOPY`` +semantics. + +If PyPy compatibility is not a concern, these new functions can be ignored, +although there will be a ``DeprecationWarning``. If you do wish to pursue PyPy +compatibility, more information on these functions and their use may be found +in the c-api_ documentation and the example in how-to-extend_. + +.. _c-api: https://github.com/numpy/numpy/blob/master/doc/source/reference/c-api.array.rst +.. _how-to-extend: https://github.com/numpy/numpy/blob/master/doc/source/user/c-info.how-to-extend.rst + + +New Features +============ + +Encoding argument for text IO functions +--------------------------------------- +``genfromtxt``, ``loadtxt``, ``fromregex`` and ``savetxt`` can now handle files +with arbitrary encoding supported by Python via the encoding argument. +For backward compatibility the argument defaults to the special ``bytes`` value +which continues to treat text as raw byte values and continues to pass latin1 +encoded bytes to custom converters. +Using any other value (including ``None`` for system default) will switch the +functions to real text IO so one receives unicode strings instead of bytes in +the resulting arrays. + +External ``nose`` plugins are usable by ``numpy.testing.Tester`` +---------------------------------------------------------------- +``numpy.testing.Tester`` is now aware of ``nose`` plugins that are outside the +``nose`` built-in ones. This allows using, for example, ``nose-timer`` like +so: ``np.test(extra_argv=['--with-timer', '--timer-top-n', '20'])`` to +obtain the runtime of the 20 slowest tests. An extra keyword ``timer`` was +also added to ``Tester.test``, so ``np.test(timer=20)`` will also report the 20 +slowest tests. + +``parametrize`` decorator added to ``numpy.testing`` +---------------------------------------------------- +A basic ``parametrize`` decorator is now available in ``numpy.testing``. It is +intended to allow rewriting yield based tests that have been deprecated in +pytest so as to facilitate the transition to pytest in the future. The nose +testing framework has not been supported for several years and looks like +abandonware. + +The new ``parametrize`` decorator does not have the full functionality of the +one in pytest. It doesn't work for classes, doesn't support nesting, and does +not substitute variable names. Even so, it should be adequate to rewrite the +NumPy tests. + +``chebinterpolate`` function added to ``numpy.polynomial.chebyshev`` +-------------------------------------------------------------------- +The new ``chebinterpolate`` function interpolates a given function at the +Chebyshev points of the first kind. A new ``Chebyshev.interpolate`` class +method adds support for interpolation over arbitrary intervals using the scaled +and shifted Chebyshev points of the first kind. + +Support for reading lzma compressed text files in Python 3 +---------------------------------------------------------- +With Python versions containing the ``lzma`` module the text IO functions can +now transparently read from files with ``xz`` or ``lzma`` extension. + +``sign`` option added to ``np.setprintoptions`` and ``np.array2string`` +----------------------------------------------------------------------- +This option controls printing of the sign of floating-point types, and may be +one of the characters '-', '+' or ' '. With '+' numpy always prints the sign of +positive values, with ' ' it always prints a space (whitespace character) in +the sign position of positive values, and with '-' it will omit the sign +character for positive values. The new default is '-'. + +This new default changes the float output relative to numpy 1.13. The old +behavior can be obtained in 1.13 "legacy" printing mode, see compatibility +notes above. + +``hermitian`` option added to``np.linalg.matrix_rank`` +------------------------------------------------------ +The new ``hermitian`` option allows choosing between standard SVD based matrix +rank calculation and the more efficient eigenvalue based method for +symmetric/hermitian matrices. + +``threshold`` and ``edgeitems`` options added to ``np.array2string`` +-------------------------------------------------------------------- +These options could previously be controlled using ``np.set_printoptions``, but +now can be changed on a per-call basis as arguments to ``np.array2string``. + +``concatenate`` and ``stack`` gained an ``out`` argument +-------------------------------------------------------- +A preallocated buffer of the desired dtype can now be used for the output of +these functions. + +Support for PGI flang compiler on Windows +----------------------------------------- +The PGI flang compiler is a Fortran front end for LLVM released by NVIDIA under +the Apache 2 license. It can be invoked by :: + + python setup.py config --compiler=clang --fcompiler=flang install + +There is little experience with this new compiler, so any feedback from people +using it will be appreciated. + + +Improvements +============ + +Numerator degrees of freedom in ``random.noncentral_f`` need only be positive. +------------------------------------------------------------------------------ +Prior to NumPy 1.14.0, the numerator degrees of freedom needed to be > 1, but +the distribution is valid for values > 0, which is the new requirement. + +The GIL is released for all ``np.einsum`` variations +---------------------------------------------------- +Some specific loop structures which have an accelerated loop version +did not release the GIL prior to NumPy 1.14.0. This oversight has been +fixed. + +The `np.einsum` function will use BLAS when possible and optimize by default +---------------------------------------------------------------------------- +The ``np.einsum`` function will now call ``np.tensordot`` when appropriate. +Because ``np.tensordot`` uses BLAS when possible, that will speed up execution. +By default, ``np.einsum`` will also attempt optimization as the overhead is +small relative to the potential improvement in speed. + +``f2py`` now handles arrays of dimension 0 +------------------------------------------ +``f2py`` now allows for the allocation of arrays of dimension 0. This allows +for more consistent handling of corner cases downstream. + +``numpy.distutils`` supports using MSVC and mingw64-gfortran together +--------------------------------------------------------------------- +Numpy distutils now supports using Mingw64 gfortran and MSVC compilers +together. This enables the production of Python extension modules on Windows +containing Fortran code while retaining compatibility with the +binaries distributed by Python.org. Not all use cases are supported, +but most common ways to wrap Fortran for Python are functional. + +Compilation in this mode is usually enabled automatically, and can be +selected via the ``--fcompiler`` and ``--compiler`` options to +``setup.py``. Moreover, linking Fortran codes to static OpenBLAS is +supported; by default a gfortran compatible static archive +``openblas.a`` is looked for. + +``np.linalg.pinv`` now works on stacked matrices +------------------------------------------------ +Previously it was limited to a single 2d array. + +``numpy.save`` aligns data to 64 bytes instead of 16 +---------------------------------------------------- +Saving NumPy arrays in the ``npy`` format with ``numpy.save`` inserts +padding before the array data to align it at 64 bytes. Previously +this was only 16 bytes (and sometimes less due to a bug in the code +for version 2). Now the alignment is 64 bytes, which matches the +widest SIMD instruction set commonly available, and is also the most +common cache line size. This makes ``npy`` files easier to use in +programs which open them with ``mmap``, especially on Linux where an +``mmap`` offset must be a multiple of the page size. + +NPZ files now can be written without using temporary files +---------------------------------------------------------- +In Python 3.6+ ``numpy.savez`` and ``numpy.savez_compressed`` now write +directly to a ZIP file, without creating intermediate temporary files. + +Better support for empty structured and string types +---------------------------------------------------- +Structured types can contain zero fields, and string dtypes can contain zero +characters. Zero-length strings still cannot be created directly, and must be +constructed through structured dtypes:: + + str0 = np.empty(10, np.dtype([('v', str, N)]))['v'] + void0 = np.empty(10, np.void) + +It was always possible to work with these, but the following operations are +now supported for these arrays: + + * `arr.sort()` + * `arr.view(bytes)` + * `arr.resize(...)` + * `pickle.dumps(arr)` + +Support for ``decimal.Decimal`` in ``np.lib.financial`` +------------------------------------------------------- +Unless otherwise stated all functions within the ``financial`` package now +support using the ``decimal.Decimal`` built-in type. + +Float printing now uses "dragon4" algorithm for shortest decimal representation +------------------------------------------------------------------------------- +The ``str`` and ``repr`` of floating-point values (16, 32, 64 and 128 bit) are +now printed to give the shortest decimal representation which uniquely +identifies the value from others of the same type. Previously this was only +true for ``float64`` values. The remaining float types will now often be shorter +than in numpy 1.13. Arrays printed in scientific notation now also use the +shortest scientific representation, instead of fixed precision as before. + + Additionally, the `str` of float scalars scalars will no longer be truncated + in python2, unlike python2 `float`s. `np.double` scalars now have a ``str`` + and ``repr`` identical to that of a python3 float. + +New functions ``np.format_float_scientific`` and ``np.format_float_positional`` +are provided to generate these decimal representations. + +A new option ``floatmode`` has been added to ``np.set_printoptions`` and +``np.array2string``, which gives control over uniqueness and rounding of +printed elements in an array. The new default is ``floatmode='maxprec'`` with +``precision=8``, which will print at most 8 fractional digits, or fewer if an +element can be uniquely represented with fewer. A useful new mode is +``floatmode="unique"``, which will output enough digits to specify the array +elements uniquely. + +Numpy complex-floating-scalars with values like ``inf*j`` or ``nan*j`` now +print as ``infj`` and ``nanj``, like the pure-python ``complex`` type. + +The ``FloatFormat`` and ``LongFloatFormat`` classes are deprecated and should +both be replaced by ``FloatingFormat``. Similarly ``ComplexFormat`` and +``LongComplexFormat`` should be replaced by ``ComplexFloatingFormat``. + +``void`` datatype elements are now printed in hex notation +---------------------------------------------------------- +A hex representation compatible with the python ``bytes`` type is now printed +for unstructured ``np.void`` elements, e.g., ``V4`` datatype. Previously, in +python2 the raw void data of the element was printed to stdout, or in python3 +the integer byte values were shown. + +printing style for ``void`` datatypes is now independently customizable +----------------------------------------------------------------------- +The printing style of ``np.void`` arrays is now independently customizable +using the ``formatter`` argument to ``np.set_printoptions``, using the +``'void'`` key, instead of the catch-all ``numpystr`` key as before. + +Reduced memory usage of ``np.loadtxt`` +-------------------------------------- +``np.loadtxt`` now reads files in chunks instead of all at once which decreases +its memory usage significantly for large files. + + +Changes +======= + +Multiple-field indexing/assignment of structured arrays +------------------------------------------------------- +The indexing and assignment of structured arrays with multiple fields has +changed in a number of ways, as warned about in previous releases. + +First, indexing a structured array with multiple fields, e.g., +``arr[['f1', 'f3']]``, returns a view into the original array instead of a +copy. The returned view will have extra padding bytes corresponding to +intervening fields in the original array, unlike the copy in 1.13, which will +affect code such as ``arr[['f1', 'f3']].view(newdtype)``. + +Second, assignment between structured arrays will now occur "by position" +instead of "by field name". The Nth field of the destination will be set to the +Nth field of the source regardless of field name, unlike in numpy versions 1.6 +to 1.13 in which fields in the destination array were set to the +identically-named field in the source array or to 0 if the source did not have +a field. + +Correspondingly, the order of fields in a structured dtypes now matters when +computing dtype equality. For example, with the dtypes :: + + x = dtype({'names': ['A', 'B'], 'formats': ['i4', 'f4'], 'offsets': [0, 4]}) + y = dtype({'names': ['B', 'A'], 'formats': ['f4', 'i4'], 'offsets': [4, 0]}) + +the expression ``x == y`` will now return ``False``, unlike before. +This makes dictionary based dtype specifications like +``dtype({'a': ('i4', 0), 'b': ('f4', 4)})`` dangerous in python < 3.6 +since dict key order is not preserved in those versions. + +Assignment from a structured array to a boolean array now raises a ValueError, +unlike in 1.13, where it always set the destination elements to ``True``. + +Assignment from structured array with more than one field to a non-structured +array now raises a ValueError. In 1.13 this copied just the first field of the +source to the destination. + +Using field "titles" in multiple-field indexing is now disallowed, as is +repeating a field name in a multiple-field index. + +The documentation for structured arrays in the user guide has been +significantly updated to reflect these changes. + +Integer and Void scalars are now unaffected by ``np.set_string_function`` +------------------------------------------------------------------------- +Previously, unlike most other numpy scalars, the ``str`` and ``repr`` of +integer and void scalars could be controlled by ``np.set_string_function``. +This is no longer possible. + +0d array printing changed, ``style`` arg of array2string deprecated +------------------------------------------------------------------- +Previously the ``str`` and ``repr`` of 0d arrays had idiosyncratic +implementations which returned ``str(a.item())`` and ``'array(' + +repr(a.item()) + ')'`` respectively for 0d array ``a``, unlike both numpy +scalars and higher dimension ndarrays. + +Now, the ``str`` of a 0d array acts like a numpy scalar using ``str(a[()])`` +and the ``repr`` acts like higher dimension arrays using ``formatter(a[()])``, +where ``formatter`` can be specified using ``np.set_printoptions``. The +``style`` argument of ``np.array2string`` is deprecated. + +This new behavior is disabled in 1.13 legacy printing mode, see compatibility +notes above. + +Seeding ``RandomState`` using an array requires a 1-d array +----------------------------------------------------------- +``RandomState`` previously would accept empty arrays or arrays with 2 or more +dimensions, which resulted in either a failure to seed (empty arrays) or for +some of the passed values to be ignored when setting the seed. + +``MaskedArray`` objects show a more useful ``repr`` +--------------------------------------------------- +The ``repr`` of a ``MaskedArray`` is now closer to the python code that would +produce it, with arrays now being shown with commas and dtypes. Like the other +formatting changes, this can be disabled with the 1.13 legacy printing mode in +order to help transition doctests. + +The ``repr`` of ``np.polynomial`` classes is more explicit +---------------------------------------------------------- +It now shows the domain and window parameters as keyword arguments to make +them more clear:: + + >>> np.polynomial.Polynomial(range(4)) + Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) diff -Nru python-numpy-1.13.3/doc/release/1.14.1-notes.rst python-numpy-1.14.5/doc/release/1.14.1-notes.rst --- python-numpy-1.13.3/doc/release/1.14.1-notes.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/doc/release/1.14.1-notes.rst 2018-06-12 18:28:52.000000000 +0000 @@ -0,0 +1,92 @@ +========================== +NumPy 1.14.1 Release Notes +========================== + +This is a bugfix release for some problems reported following the 1.14.0 release. The major +problems fixed are the following. + +* Problems with the new array printing, particularly the printing of complex + values, Please report any additional problems that may turn up. +* Problems with ``np.einsum`` due to the new ``optimized=True`` default. Some + fixes for optimization have been applied and ``optimize=False`` is now the + default. +* The sort order in ``np.unique`` when ``axis=`` will now always + be lexicographic in the subarray elements. In previous NumPy versions there + was an optimization that could result in sorting the subarrays as unsigned + byte strings. +* The change in 1.14.0 that multi-field indexing of structured arrays returns a + view instead of a copy has been reverted but remains on track for NumPy 1.15. + Affected users should read the 1.14.1 Numpy User Guide section + "basics/structured arrays/accessing multiple fields" for advice on how to + manage this transition. + +The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python +3.6 wheels available from PIP are built with Python 3.6.2 and should be +compatible with all previous versions of Python 3.6. The source releases were +cythonized with Cython 0.26.1, which is known to **not** support the upcoming +Python 3.7 release. People who wish to run Python 3.7 should check out the +NumPy repo and try building with the, as yet, unreleased master branch of +Cython. + +Contributors +============ + +A total of 14 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Allan Haldane +* Charles Harris +* Daniel Smith +* Dennis Weyland + +* Eric Larson +* Eric Wieser +* Jarrod Millman +* Kenichi Maehashi + +* Marten van Kerkwijk +* Mathieu Lamarre +* Sebastian Berg +* Simon Conseil +* Simon Gibbons +* xoviat + +Pull requests merged +==================== + +A total of 36 pull requests were merged for this release. + +* `#10339 `__: BUG: restrict the __config__ modifications to win32 +* `#10368 `__: MAINT: Adjust type promotion in linalg.norm +* `#10375 `__: BUG: add missing paren and remove quotes from repr of fieldless... +* `#10395 `__: MAINT: Update download URL in setup.py. +* `#10396 `__: BUG: fix einsum issue with unicode input and py2 +* `#10397 `__: BUG: fix error message not formatted in einsum +* `#10398 `__: DOC: add documentation about how to handle new array printing +* `#10403 `__: BUG: Set einsum optimize parameter default to `False`. +* `#10424 `__: ENH: Fix repr of np.record objects to match np.void types #10412 +* `#10425 `__: MAINT: Update zesty to artful for i386 testing +* `#10431 `__: REL: Add 1.14.1 release notes template +* `#10435 `__: MAINT: Use ValueError for duplicate field names in lookup (backport) +* `#10534 `__: BUG: Provide a better error message for out-of-order fields +* `#10536 `__: BUG: Resize bytes_ columns in genfromtxt (backport of #10401) +* `#10537 `__: BUG: multifield-indexing adds padding bytes: revert for 1.14.1 +* `#10539 `__: BUG: fix np.save issue with python 2.7.5 +* `#10540 `__: BUG: Add missing DECREF in Py2 int() cast +* `#10541 `__: TST: Add circleci document testing to maintenance/1.14.x +* `#10542 `__: BUG: complex repr has extra spaces, missing + (1.14 backport) +* `#10550 `__: BUG: Set missing exception after malloc +* `#10557 `__: BUG: In numpy.i, clear CARRAY flag if wrapped buffer is not C_CONTIGUOUS. +* `#10558 `__: DEP: Issue FutureWarning when malformed records detected. +* `#10559 `__: BUG: Fix einsum optimize logic for singleton dimensions +* `#10560 `__: BUG: Fix calling ufuncs with a positional output argument. +* `#10561 `__: BUG: Fix various Big-Endian test failures (ppc64) +* `#10562 `__: BUG: Make dtype.descr error for out-of-order fields. +* `#10563 `__: BUG: arrays not being flattened in `union1d` +* `#10607 `__: MAINT: Update sphinxext submodule hash. +* `#10608 `__: BUG: Revert sort optimization in np.unique. +* `#10609 `__: BUG: infinite recursion in str of 0d subclasses +* `#10610 `__: BUG: Align type definition with generated lapack +* `#10612 `__: BUG/ENH: Improve output for structured non-void types +* `#10622 `__: BUG: deallocate recursive closure in arrayprint.py (1.14 backport) +* `#10624 `__: BUG: Correctly identify comma seperated dtype strings +* `#10629 `__: BUG: deallocate recursive closure in arrayprint.py (backport... +* `#10630 `__: REL: Prepare for 1.14.1 release. diff -Nru python-numpy-1.13.3/doc/release/1.14.2-notes.rst python-numpy-1.14.5/doc/release/1.14.2-notes.rst --- python-numpy-1.13.3/doc/release/1.14.2-notes.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/doc/release/1.14.2-notes.rst 2018-06-12 17:31:56.000000000 +0000 @@ -0,0 +1,40 @@ +========================== +NumPy 1.14.2 Release Notes +========================== + +This is a bugfix release for some bugs reported following the 1.14.1 release. The major +problems dealt with are as follows. + +* Residual bugs in the new array printing functionality. +* Regression resulting in a relocation problem with shared library. +* Improved PyPy compatibility. + +The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python +3.6 wheels available from PIP are built with Python 3.6.2 and should be +compatible with all previous versions of Python 3.6. The source releases were +cythonized with Cython 0.26.1, which is known to **not** support the upcoming +Python 3.7 release. People who wish to run Python 3.7 should check out the +NumPy repo and try building with the, as yet, unreleased master branch of +Cython. + +Contributors +============ + +A total of 4 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Allan Haldane +* Charles Harris +* Eric Wieser +* Pauli Virtanen + +Pull requests merged +==================== + +A total of 5 pull requests were merged for this release. + +* `#10674 `__: BUG: Further back-compat fix for subclassed array repr +* `#10725 `__: BUG: dragon4 fractional output mode adds too many trailing zeros +* `#10726 `__: BUG: Fix f2py generated code to work on PyPy +* `#10727 `__: BUG: Fix missing NPY_VISIBILITY_HIDDEN on npy_longdouble_to_PyLong +* `#10729 `__: DOC: Create 1.14.2 notes and changelog. diff -Nru python-numpy-1.13.3/doc/release/1.14.3-notes.rst python-numpy-1.14.5/doc/release/1.14.3-notes.rst --- python-numpy-1.13.3/doc/release/1.14.3-notes.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/doc/release/1.14.3-notes.rst 2018-06-12 17:31:56.000000000 +0000 @@ -0,0 +1,41 @@ +========================== +NumPy 1.14.3 Release Notes +========================== + +This is a bugfix release for a few bugs reported following the 1.14.2 release: + +* np.lib.recfunctions.fromrecords accepts a list-of-lists, until 1.15 +* In python2, float types use the new print style when printing to a file +* style arg in "legacy" print mode now works for 0d arrays + +The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python +3.6 wheels available from PIP are built with Python 3.6.2 and should be +compatible with all previous versions of Python 3.6. The source releases were +cythonized with Cython 0.28.2. + +Contributors +============ + +A total of 6 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Allan Haldane +* Charles Harris +* Jonathan March + +* Malcolm Smith + +* Matti Picus +* Pauli Virtanen + +Pull requests merged +==================== + +A total of 8 pull requests were merged for this release. + +* `#10862 `__: BUG: floating types should override tp_print (1.14 backport) +* `#10905 `__: BUG: for 1.14 back-compat, accept list-of-lists in fromrecords +* `#10947 `__: BUG: 'style' arg to array2string broken in legacy mode (1.14... +* `#10959 `__: BUG: test, fix for missing flags['WRITEBACKIFCOPY'] key +* `#10960 `__: BUG: Add missing underscore to prototype in check_embedded_lapack +* `#10961 `__: BUG: Fix encoding regression in ma/bench.py (Issue #10868) +* `#10962 `__: BUG: core: fix NPY_TITLE_KEY macro on pypy +* `#10974 `__: BUG: test, fix PyArray_DiscardWritebackIfCopy... diff -Nru python-numpy-1.13.3/doc/release/1.14.4-notes.rst python-numpy-1.14.5/doc/release/1.14.4-notes.rst --- python-numpy-1.13.3/doc/release/1.14.4-notes.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/doc/release/1.14.4-notes.rst 2018-06-12 18:28:52.000000000 +0000 @@ -0,0 +1,60 @@ +========================== +NumPy 1.14.4 Release Notes +========================== + +This is a bugfix release for bugs reported following the 1.14.3 release. The +most significant fixes are: + +* fixes for compiler instruction reordering that resulted in NaN's not being + properly propagated in `np.max` and `np.min`, + +* fixes for bus faults on SPARC and older ARM due to incorrect alignment + checks. + +There are also improvements to printing of long doubles on PPC platforms. All +is not yet perfect on that platform, the whitespace padding is still incorrect +and is to be fixed in numpy 1.15, consequently NumPy still fails some +printing-related (and other) unit tests on ppc systems. However, the printed +values are now correct. + +Note that NumPy will error on import if it detects incorrect float32 `dot` +results. This problem has been seen on the Mac when working in the Anaconda +enviroment and is due to a subtle interaction between MKL and PyQt5. It is not +strictly a NumPy problem, but it is best that users be aware of it. See the +gh-8577 NumPy issue for more information. + +The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python +3.6 wheels available from PIP are built with Python 3.6.2 and should be +compatible with all previous versions of Python 3.6. The source releases were +cythonized with Cython 0.28.2 and should work for the upcoming Python 3.7. + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Allan Haldane +* Charles Harris +* Marten van Kerkwijk +* Matti Picus +* Pauli Virtanen +* Ryan Soklaski + +* Sebastian Berg + +Pull requests merged +==================== + +A total of 11 pull requests were merged for this release. + +* `#11104 `__: BUG: str of DOUBLE_DOUBLE format wrong on ppc64 +* `#11170 `__: TST: linalg: add regression test for gh-8577 +* `#11174 `__: MAINT: add sanity-checks to be run at import time +* `#11181 `__: BUG: void dtype setup checked offset not actual pointer for alignment +* `#11194 `__: BUG: Python2 doubles don't print correctly in interactive shell. +* `#11198 `__: BUG: optimizing compilers can reorder call to npy_get_floatstatus +* `#11199 `__: BUG: reduce using SSE only warns if inside SSE loop +* `#11203 `__: BUG: Bytes delimiter/comments in genfromtxt should be decoded +* `#11211 `__: BUG: Fix reference count/memory leak exposed by better testing +* `#11219 `__: BUG: Fixes einsum broadcasting bug when optimize=True +* `#11251 `__: DOC: Document 1.14.4 release. diff -Nru python-numpy-1.13.3/doc/release/1.14.5-notes.rst python-numpy-1.14.5/doc/release/1.14.5-notes.rst --- python-numpy-1.13.3/doc/release/1.14.5-notes.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/doc/release/1.14.5-notes.rst 2018-06-12 18:28:52.000000000 +0000 @@ -0,0 +1,30 @@ +========================== +NumPy 1.14.5 Release Notes +========================== + +This is a bugfix release for bugs reported following the 1.14.4 release. The +most significant fixes are: + +* fixes for compilation errors on alpine and NetBSD + +The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python +3.6 wheels available from PIP are built with Python 3.6.2 and should be +compatible with all previous versions of Python 3.6. The source releases were +cythonized with Cython 0.28.2 and should work for the upcoming Python 3.7. + +Contributors +============ + +A total of 1 person contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris + +Pull requests merged +==================== + +A total of 2 pull requests were merged for this release. + +* `#11274 `__: BUG: Correct use of NPY_UNUSED. +* `#11294 `__: BUG: Remove extra trailing parentheses. + diff -Nru python-numpy-1.13.3/doc/source/about.rst python-numpy-1.14.5/doc/source/about.rst --- python-numpy-1.13.3/doc/source/about.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/source/about.rst 2018-06-12 18:28:52.000000000 +0000 @@ -40,8 +40,7 @@ - `Old NumPy Trac `__ (no longer used) -More information about the development of NumPy can be found at -http://scipy.org/Developer_Zone +More information about the development of NumPy can be found at our `Developer Zone `__. If you want to fix issues in this documentation, the easiest way is to participate in `our ongoing documentation marathon diff -Nru python-numpy-1.13.3/doc/source/conf.py python-numpy-1.14.5/doc/source/conf.py --- python-numpy-1.13.3/doc/source/conf.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/source/conf.py 2018-06-12 18:28:52.000000000 +0000 @@ -19,12 +19,18 @@ sys.path.insert(0, os.path.abspath('../sphinxext')) -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc', +extensions = ['sphinx.ext.autodoc', 'numpydoc', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.doctest', 'sphinx.ext.autosummary', 'sphinx.ext.graphviz', 'matplotlib.sphinxext.plot_directive'] +if sphinx.__version__ >= "1.4": + extensions.append('sphinx.ext.imgmath') + imgmath_image_format = 'svg' +else: + extensions.append('sphinx.ext.pngmath') + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -123,8 +129,9 @@ htmlhelp_basename = 'numpy' -pngmath_use_preview = True -pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent'] +if 'sphinx.ext.pngmath' in extensions: + pngmath_use_preview = True + pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent'] plot_html_show_formats = False plot_html_show_source_link = False @@ -308,19 +315,19 @@ for part in fullname.split('.'): try: obj = getattr(obj, part) - except: + except Exception: return None try: fn = inspect.getsourcefile(obj) - except: + except Exception: fn = None if not fn: return None try: source, lineno = inspect.getsourcelines(obj) - except: + except Exception: lineno = None if lineno: diff -Nru python-numpy-1.13.3/doc/source/dev/gitwash/development_setup.rst python-numpy-1.14.5/doc/source/dev/gitwash/development_setup.rst --- python-numpy-1.13.3/doc/source/dev/gitwash/development_setup.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/source/dev/gitwash/development_setup.rst 2018-06-12 17:31:56.000000000 +0000 @@ -62,7 +62,7 @@ git clone https://github.com/your-user-name/numpy.git cd numpy - git remote add upstream git://github.com/numpy/numpy.git + git remote add upstream https://github.com/numpy/numpy.git In detail ========= @@ -95,21 +95,16 @@ :: cd numpy - git remote add upstream git://github.com/numpy/numpy.git + git remote add upstream https://github.com/numpy/numpy.git ``upstream`` here is just the arbitrary name we're using to refer to the main NumPy_ repository at `NumPy github`_. -Note that we've used ``git://`` for the URL rather than ``https://``. The -``git://`` URL is read only. This means we that we can't accidentally -(or deliberately) write to the upstream repo, and we are only going to -use it to merge into our own code. - Just for your own satisfaction, show yourself that you now have a new 'remote', with ``git remote -v show``, giving you something like:: - upstream git://github.com/numpy/numpy.git (fetch) - upstream git://github.com/numpy/numpy.git (push) + upstream https://github.com/numpy/numpy.git (fetch) + upstream https://github.com/numpy/numpy.git (push) origin https://github.com/your-user-name/numpy.git (fetch) origin https://github.com/your-user-name/numpy.git (push) @@ -122,7 +117,7 @@ You may also want to have easy access to all pull requests sent to the NumPy repository:: - git config --add remote.upstream.fetch '+refs/pull//head:refs/remotes/upstream/pr/' + git config --add remote.upstream.fetch '+refs/pull/*/head:refs/remotes/upstream/pr/*' Your config file should now look something like (from ``$ cat .git/config``):: @@ -138,7 +133,7 @@ url = https://github.com/your-user-name/numpy.git fetch = +refs/heads/*:refs/remotes/origin/* [remote "upstream"] - url = git://github.com/numpy/numpy.git + url = https://github.com/numpy/numpy.git fetch = +refs/heads/*:refs/remotes/upstream/* fetch = +refs/pull/*/head:refs/remotes/upstream/pr/* [branch "master"] diff -Nru python-numpy-1.13.3/doc/source/dev/governance/people.rst python-numpy-1.14.5/doc/source/dev/governance/people.rst --- python-numpy-1.13.3/doc/source/dev/governance/people.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/source/dev/governance/people.rst 2018-06-12 17:31:56.000000000 +0000 @@ -12,8 +12,6 @@ * Ralf Gommers -* Alex Griffing - * Charles Harris * Nathaniel Smith @@ -22,12 +20,22 @@ * Pauli Virtanen +* Eric Wieser + +* Marten van Kerkwijk + +* Stephan Hoyer + +* Allan Haldane + Emeritus members ---------------- * Travis Oliphant - Project Founder / Emeritus Leader (served: 2005-2012) +* Alex Griffing (served: 2015-2017) + NumFOCUS Subcommittee --------------------- diff -Nru python-numpy-1.13.3/doc/source/neps/dropping-python2.7-proposal.rst python-numpy-1.14.5/doc/source/neps/dropping-python2.7-proposal.rst --- python-numpy-1.13.3/doc/source/neps/dropping-python2.7-proposal.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/doc/source/neps/dropping-python2.7-proposal.rst 2018-06-12 18:28:52.000000000 +0000 @@ -0,0 +1 @@ +.. include:: ../../neps/dropping-python2.7-proposal.rst diff -Nru python-numpy-1.13.3/doc/source/neps/index.rst python-numpy-1.14.5/doc/source/neps/index.rst --- python-numpy-1.13.3/doc/source/neps/index.rst 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/doc/source/neps/index.rst 2018-06-12 18:28:52.000000000 +0000 @@ -35,3 +35,4 @@ structured_array_extensions datetime-proposal datetime-proposal3 + dropping-python2.7-proposal diff -Nru python-numpy-1.13.3/doc/source/reference/arrays.indexing.rst python-numpy-1.14.5/doc/source/reference/arrays.indexing.rst --- python-numpy-1.13.3/doc/source/reference/arrays.indexing.rst 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/arrays.indexing.rst 2018-06-12 18:28:52.000000000 +0000 @@ -170,7 +170,7 @@ .. data:: newaxis - The :const:`newaxis` object can be used in all slicing operations to + The :const:`newaxis` object can be used in all slicing operations to create an axis of length one. :const:`newaxis` is an alias for 'None', and 'None' can be used in place of this with the same result. @@ -431,7 +431,7 @@ ... [ 9, 10, 11]]) >>> rows = (x.sum(-1) % 2) == 0 >>> rows - array([False, True, False, True], dtype=bool) + array([False, True, False, True]) >>> columns = [0, 2] >>> x[np.ix_(rows, columns)] array([[ 3, 5], @@ -503,7 +503,7 @@ Indexing ``x['field-name']`` returns a new :term:`view` to the array, which is of the same shape as *x* (except when the field is a sub-array) but of data type ``x.dtype['field-name']`` and contains -only the part of the data in the specified field. Also +only the part of the data in the specified field. Also :ref:`record array ` scalars can be "indexed" this way. Indexing into a structured array can also be done with a list of field names, diff -Nru python-numpy-1.13.3/doc/source/reference/c-api.array.rst python-numpy-1.14.5/doc/source/reference/c-api.array.rst --- python-numpy-1.13.3/doc/source/reference/c-api.array.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/c-api.array.rst 2018-06-12 18:28:52.000000000 +0000 @@ -76,9 +76,10 @@ your own memory, you should use the function :c:func:`PyArray_SetBaseObject` to set the base to an object which owns the memory. - If the :c:data:`NPY_ARRAY_UPDATEIFCOPY` flag is set, it has a different + If the (deprecated) :c:data:`NPY_ARRAY_UPDATEIFCOPY` or the + :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flags are set, it has a different meaning, namely base is the array into which the current array will - be copied upon destruction. This overloading of the base property + be copied upon copy resolution. This overloading of the base property for two functions is likely to change in a future version of NumPy. .. c:function:: PyArray_Descr *PyArray_DESCR(PyArrayObject* arr) @@ -137,7 +138,7 @@ .. c:function:: npy_intp PyArray_Size(PyArrayObject* obj) - Returns 0 if *obj* is not a sub-class of bigndarray. Otherwise, + Returns 0 if *obj* is not a sub-class of ndarray. Otherwise, returns the total number of elements in the array. Safer version of :c:func:`PyArray_SIZE` (*obj*). @@ -217,9 +218,9 @@ can be non-zero to indicate a Fortran-style contiguous array. If *data* is not ``NULL``, then it is assumed to point to the memory to be used for the array and the *flags* argument is used as the - new flags for the array (except the state of :c:data:`NPY_OWNDATA` - and :c:data:`NPY_ARRAY_UPDATEIFCOPY` flags of the new array will - be reset). + new flags for the array (except the state of :c:data:`NPY_OWNDATA`, + :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` and :c:data:`NPY_ARRAY_UPDATEIFCOPY` + flags of the new array will be reset). In addition, if *data* is non-NULL, then *strides* can also be provided. If *strides* is ``NULL``, then the array strides @@ -257,7 +258,7 @@ PyTypeObject* subtype, int nd, npy_intp* dims, int type_num, \ npy_intp* strides, void* data, int itemsize, int flags, PyObject* obj) - This is similar to :c:func:`PyArray_DescrNew` (...) except you + This is similar to :c:func:`PyArray_NewFromDescr` (...) except you specify the data-type descriptor with *type_num* and *itemsize*, where *type_num* corresponds to a builtin (or user-defined) type. If the type always has the same number of bytes, then @@ -303,7 +304,7 @@ .. c:function:: PyArray_FILLWBYTE(PyObject* obj, int val) Fill the array pointed to by *obj* ---which must be a (subclass - of) bigndarray---with the contents of *val* (evaluated as a byte). + of) ndarray---with the contents of *val* (evaluated as a byte). This macro calls memset, so obj must be contiguous. .. c:function:: PyObject* PyArray_Zeros( \ @@ -433,9 +434,9 @@ .. c:var:: NPY_ARRAY_ENSUREARRAY - Make sure the result is a base-class ndarray or bigndarray. By - default, if *op* is an instance of a subclass of the - bigndarray, an instance of that same subclass is returned. If + Make sure the result is a base-class ndarray. By + default, if *op* is an instance of a subclass of + ndarray, an instance of that same subclass is returned. If this flag is set, an ndarray object will be returned instead. .. c:var:: NPY_ARRAY_FORCECAST @@ -444,19 +445,25 @@ safely. Without this flag, a data cast will occur only if it can be done safely, otherwise an error is raised. - .. c:var:: NPY_ARRAY_UPDATEIFCOPY + .. c:var:: NPY_ARRAY_WRITEBACKIFCOPY If *op* is already an array, but does not satisfy the requirements, then a copy is made (which will satisfy the requirements). If this flag is present and a copy (of an object that is already an array) must be made, then the corresponding - :c:data:`NPY_ARRAY_UPDATEIFCOPY` flag is set in the returned - copy and *op* is made to be read-only. When the returned copy - is deleted (presumably after your calculations are complete), - its contents will be copied back into *op* and the *op* array + :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag is set in the returned + copy and *op* is made to be read-only. You must be sure to call + :c:func:`PyArray_ResolveWritebackIfCopy` to copy the contents + back into *op* and the *op* array will be made writeable again. If *op* is not writeable to begin - with, then an error is raised. If *op* is not already an array, - then this flag has no effect. + with, or if it is not already an array, then an error is raised. + + .. c:var:: NPY_ARRAY_UPDATEIFCOPY + + Deprecated. Use :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, which is similar. + This flag "automatically" copies the data back when the returned + array is deallocated, which is not supported in all python + implementations. .. c:var:: NPY_ARRAY_BEHAVED @@ -503,12 +510,14 @@ .. c:var:: NPY_ARRAY_INOUT_ARRAY :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \| - :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_UPDATEIFCOPY` + :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` \| + :c:data:`NPY_ARRAY_UPDATEIFCOPY` .. c:var:: NPY_ARRAY_INOUT_FARRAY :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \| - :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_UPDATEIFCOPY` + :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` \| + :c:data:`NPY_ARRAY_UPDATEIFCOPY` .. c:function:: int PyArray_GetArrayParamsFromObject( \ PyObject* op, PyArray_Descr* requested_dtype, npy_bool writeable, \ @@ -753,7 +762,8 @@ :c:data:`NPY_ARRAY_C_CONTIGUOUS`, :c:data:`NPY_ARRAY_F_CONTIGUOUS`, :c:data:`NPY_ARRAY_ALIGNED`, :c:data:`NPY_ARRAY_WRITEABLE`, :c:data:`NPY_ARRAY_NOTSWAPPED`, :c:data:`NPY_ARRAY_ENSURECOPY`, - :c:data:`NPY_ARRAY_UPDATEIFCOPY`, :c:data:`NPY_ARRAY_FORCECAST`, and + :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, :c:data:`NPY_ARRAY_UPDATEIFCOPY`, + :c:data:`NPY_ARRAY_FORCECAST`, and :c:data:`NPY_ARRAY_ENSUREARRAY`. Standard combinations of flags can also be used: @@ -948,6 +958,12 @@ Type represents one of the flexible array types ( :c:data:`NPY_STRING`, :c:data:`NPY_UNICODE`, or :c:data:`NPY_VOID` ). +.. c:function:: PyDataType_ISUNSIZED(descr): + + Type has no size information attached, and can be resized. Should only be + called on flexible dtypes. Types that are attached to an array will always + be sized, hence the array form of this macro not existing. + .. c:function:: PyTypeNum_ISUSERDEF(num) .. c:function:: PyDataType_ISUSERDEF(descr) @@ -1331,6 +1347,26 @@ decrement all the items in the object array prior to calling this function. +.. c:function:: int PyArray_SetUpdateIfCopyBase(PyArrayObject* arr, PyArrayObject* base) + + Precondition: ``arr`` is a copy of ``base`` (though possibly with different + strides, ordering, etc.) Set the UPDATEIFCOPY flag and ``arr->base`` so + that when ``arr`` is destructed, it will copy any changes back to ``base``. + DEPRECATED, use :c:func:`PyArray_SetWritebackIfCopyBase``. + + Returns 0 for success, -1 for failure. + +.. c:function:: int PyArray_SetWritebackIfCopyBase(PyArrayObject* arr, PyArrayObject* base) + + Precondition: ``arr`` is a copy of ``base`` (though possibly with different + strides, ordering, etc.) Sets the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag + and ``arr->base``, and set ``base`` to READONLY. Call + :c:func:`PyArray_ResolveWritebackIfCopy` before calling + `Py_DECREF`` in order copy any changes back to ``base`` and + reset the READONLY flag. + + Returns 0 for success, -1 for failure. + Array flags ----------- @@ -1410,24 +1446,33 @@ Notice that the above 3 flags are defined so that a new, well- behaved array has these flags defined as true. -.. c:var:: NPY_ARRAY_UPDATEIFCOPY +.. c:var:: NPY_ARRAY_WRITEBACKIFCOPY The data area represents a (well-behaved) copy whose information - should be transferred back to the original when this array is deleted. + should be transferred back to the original when + :c:func:`PyArray_ResolveWritebackIfCopy` is called. This is a special flag that is set if this array represents a copy made because a user required certain flags in :c:func:`PyArray_FromAny` and a copy had to be made of some other array (and the user asked for this flag to be set in such a situation). The base attribute then points to the "misbehaved" - array (which is set read_only). When the array with this flag set - is deallocated, it will copy its contents back to the "misbehaved" + array (which is set read_only). :c:func`PyArray_ResolveWritebackIfCopy` + will copy its contents back to the "misbehaved" array (casting if necessary) and will reset the "misbehaved" array to :c:data:`NPY_ARRAY_WRITEABLE`. If the "misbehaved" array was not :c:data:`NPY_ARRAY_WRITEABLE` to begin with then :c:func:`PyArray_FromAny` - would have returned an error because :c:data:`NPY_ARRAY_UPDATEIFCOPY` + would have returned an error because :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` would not have been possible. +.. c:var:: NPY_ARRAY_UPDATEIFCOPY + + A deprecated version of :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` which + depends upon ``dealloc`` to trigger the writeback. For backwards + compatibility, :c:func:`PyArray_ResolveWritebackIfCopy` is called at + ``dealloc`` but relying + on that behavior is deprecated and not supported in PyPy. + :c:func:`PyArray_UpdateFlags` (obj, flags) will update the ``obj->flags`` for ``flags`` which can be any of :c:data:`NPY_ARRAY_C_CONTIGUOUS`, :c:data:`NPY_ARRAY_F_CONTIGUOUS`, :c:data:`NPY_ARRAY_ALIGNED`, or @@ -1483,8 +1528,7 @@ .. c:var:: NPY_ARRAY_ENSUREARRAY - Make sure the resulting object is an actual ndarray (or bigndarray), - and not a sub-class. + Make sure the resulting object is an actual ndarray, and not a sub-class. .. c:var:: NPY_ARRAY_NOTSWAPPED @@ -1509,7 +1553,8 @@ combinations of the possible flags an array can have: :c:data:`NPY_ARRAY_C_CONTIGUOUS`, :c:data:`NPY_ARRAY_F_CONTIGUOUS`, :c:data:`NPY_ARRAY_OWNDATA`, :c:data:`NPY_ARRAY_ALIGNED`, - :c:data:`NPY_ARRAY_WRITEABLE`, :c:data:`NPY_ARRAY_UPDATEIFCOPY`. + :c:data:`NPY_ARRAY_WRITEABLE`, :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, + :c:data:`NPY_ARRAY_UPDATEIFCOPY`. .. c:function:: PyArray_IS_C_CONTIGUOUS(arr) @@ -2888,10 +2933,10 @@ to a C-array of :c:type:`npy_intp`. The Python object could also be a single number. The *seq* variable is a pointer to a structure with members ptr and len. On successful return, *seq* ->ptr contains a - pointer to memory that must be freed to avoid a memory leak. The - restriction on memory size allows this converter to be - conveniently used for sequences intended to be interpreted as - array shapes. + pointer to memory that must be freed, by calling :c:func:`PyDimMem_FREE`, + to avoid a memory leak. The restriction on memory size allows this + converter to be conveniently used for sequences intended to be + interpreted as array shapes. .. c:function:: int PyArray_BufferConverter(PyObject* obj, PyArray_Chunk* buf) @@ -3015,7 +3060,7 @@ ^^^^^^^^^^^^^^^^^ In order to make use of the C-API from another extension module, the -``import_array`` () command must be used. If the extension module is +:c:func:`import_array` function must be called. If the extension module is self-contained in a single .c file, then that is all that needs to be done. If, however, the extension module involves multiple files where the C-API is needed then some additional steps must be taken. @@ -3036,7 +3081,7 @@ :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` to some name that will hold the C-API (*e.g.* myextension_ARRAY_API). This must be done **before** including the numpy/arrayobject.h file. In the module - initialization routine you call ``import_array`` (). In addition, + initialization routine you call :c:func:`import_array`. In addition, in the files that do not have the module initialization sub_routine define :c:macro:`NO_IMPORT_ARRAY` prior to including numpy/arrayobject.h. @@ -3064,6 +3109,24 @@ header file as long as you make sure that NO_IMPORT_ARRAY is #defined before #including that file. + Internally, these #defines work as follows: + + * If neither is defined, the C-API is declared to be + :c:type:`static void**`, so it is only visible within the + compilation unit that #includes numpy/arrayobject.h. + * If :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` is #defined, but + :c:macro:`NO_IMPORT_ARRAY` is not, the C-API is declared to + be :c:type:`void**`, so that it will also be visible to other + compilation units. + * If :c:macro:`NO_IMPORT_ARRAY` is #defined, regardless of + whether :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` is, the C-API is + declared to be :c:type:`extern void**`, so it is expected to + be defined in another compilation unit. + * Whenever :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` is #defined, it + also changes the name of the variable holding the C-API, which + defaults to :c:data:`PyArray_API`, to whatever the macro is + #defined to. + Checking the API Version ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -3099,7 +3162,7 @@ it is in the C-API, however, comparing the output of this function from the value defined in the current header gives a way to test if the C-API has changed thus requiring a re-compilation of extension modules that use the - C-API. This is automatically checked in the function import_array. + C-API. This is automatically checked in the function :c:func:`import_array`. .. c:function:: unsigned int PyArray_GetNDArrayCFeatureVersion(void) @@ -3193,6 +3256,19 @@ :c:data:`NPY_USE_PYMEM` is 0, if :c:data:`NPY_USE_PYMEM` is 1, then the Python memory allocator is used. +.. c:function:: int PyArray_ResolveWritebackIfCopy(PyArrayObject* obj) + + If ``obj.flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` or (deprecated) + :c:data:`NPY_ARRAY_UPDATEIFCOPY`, this function clears the flags, `DECREF` s + `obj->base` and makes it writeable, and sets ``obj->base`` to NULL. It then + copies ``obj->data`` to `obj->base->data`, and returns the error state of + the copy operation. This is the opposite of + :c:func:`PyArray_SetWritebackIfCopyBase`. Usually this is called once + you are finished with ``obj``, just before ``Py_DECREF(obj)``. It may be called + multiple times, or with ``NULL`` input. See also + :c:func:`PyArray_DiscardWritebackIfCopy`. + + Returns 0 if nothing was done, -1 on error, and 1 if action was taken. Threading support ^^^^^^^^^^^^^^^^^ @@ -3410,13 +3486,28 @@ Returns the reference count of any Python object. -.. c:function:: PyArray_XDECREF_ERR(PyObject \*obj) +.. c:function:: PyArray_DiscardWritebackIfCopy(PyObject* obj) + + If ``obj.flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` or (deprecated) + :c:data:`NPY_ARRAY_UPDATEIFCOPY`, this function clears the flags, `DECREF` s + `obj->base` and makes it writeable, and sets ``obj->base`` to NULL. In + contrast to :c:func:`PyArray_DiscardWritebackIfCopy` it makes no attempt + to copy the data from `obj->base` This undoes + :c:func:`PyArray_SetWritebackIfCopyBase`. Usually this is called after an + error when you are finished with ``obj``, just before ``Py_DECREF(obj)``. + It may be called multiple times, or with ``NULL`` input. + +.. c:function:: PyArray_XDECREF_ERR(PyObject* obj) + + Deprecated in 1.14, use :c:func:`PyArray_DiscardWritebackIfCopy` + followed by ``Py_XDECREF`` - DECREF's an array object which may have the :c:data:`NPY_ARRAY_UPDATEIFCOPY` + DECREF's an array object which may have the (deprecated) + :c:data:`NPY_ARRAY_UPDATEIFCOPY` or :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set without causing the contents to be copied back into the original array. Resets the :c:data:`NPY_ARRAY_WRITEABLE` flag on the base object. This is useful for recovering from an error condition when - :c:data:`NPY_ARRAY_UPDATEIFCOPY` is used. + writeback semantics are used, but will lead to wrong results. Enumerated Types diff -Nru python-numpy-1.13.3/doc/source/reference/c-api.coremath.rst python-numpy-1.14.5/doc/source/reference/c-api.coremath.rst --- python-numpy-1.13.3/doc/source/reference/c-api.coremath.rst 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/c-api.coremath.rst 2018-06-12 17:31:56.000000000 +0000 @@ -183,14 +183,46 @@ * NPY_FPE_UNDERFLOW * NPY_FPE_INVALID + Note that :c:func:`npy_get_floatstatus_barrier` is preferable as it prevents + agressive compiler optimizations reordering the call relative to + the code setting the status, which could lead to incorrect results. + .. versionadded:: 1.9.0 +.. c:function:: int npy_get_floatstatus_barrier(char*) + + Get floating point status. A pointer to a local variable is passed in to + prevent aggresive compiler optimizations from reodering this function call + relative to the code setting the status, which could lead to incorrect + results. + + Returns a bitmask with following possible flags: + + * NPY_FPE_DIVIDEBYZERO + * NPY_FPE_OVERFLOW + * NPY_FPE_UNDERFLOW + * NPY_FPE_INVALID + + .. versionadded:: 1.15.0 + .. c:function:: int npy_clear_floatstatus() Clears the floating point status. Returns the previous status mask. + Note that :c:func:`npy_clear_floatstatus_barrier` is preferable as it + prevents agressive compiler optimizations reordering the call relative to + the code setting the status, which could lead to incorrect results. + .. versionadded:: 1.9.0 +.. c:function:: int npy_clear_floatstatus_barrier(char*) + + Clears the floating point status. A pointer to a local variable is passed in to + prevent aggresive compiler optimizations from reodering this function call. + Returns the previous status mask. + + .. versionadded:: 1.15.0 +n Complex functions ~~~~~~~~~~~~~~~~~ @@ -237,7 +269,7 @@ Like for other types, NumPy includes a typedef npy_half for the 16 bit float. Unlike for most of the other types, you cannot use this as a -normal type in C, since is is a typedef for npy_uint16. For example, +normal type in C, since it is a typedef for npy_uint16. For example, 1.0 looks like 0x3c00 to C, and if you do an equality comparison between the different signed zeros, you will get -0.0 != 0.0 (0x8000 != 0x0000), which is incorrect. diff -Nru python-numpy-1.13.3/doc/source/reference/c-api.types-and-structures.rst python-numpy-1.14.5/doc/source/reference/c-api.types-and-structures.rst --- python-numpy-1.13.3/doc/source/reference/c-api.types-and-structures.rst 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/c-api.types-and-structures.rst 2018-06-12 18:28:52.000000000 +0000 @@ -133,10 +133,11 @@ is related to this array. There are two use cases: 1) If this array does not own its own memory, then base points to the Python object that owns it (perhaps another array object), 2) If this array has - the :c:data:`NPY_ARRAY_UPDATEIFCOPY` flag set, then this array is - a working copy of a "misbehaved" array. As soon as this array is - deleted, the array pointed to by base will be updated with the - contents of this array. + the (deprecated) :c:data:`NPY_ARRAY_UPDATEIFCOPY` or + :c:data:NPY_ARRAY_WRITEBACKIFCOPY`: flag set, then this array is + a working copy of a "misbehaved" array. When + ``PyArray_ResolveWritebackIfCopy`` is called, the array pointed to by base + will be updated with the contents of this array. .. c:member:: PyArray_Descr *PyArrayObject.descr @@ -153,8 +154,8 @@ Flags indicating how the memory pointed to by data is to be interpreted. Possible flags are :c:data:`NPY_ARRAY_C_CONTIGUOUS`, :c:data:`NPY_ARRAY_F_CONTIGUOUS`, :c:data:`NPY_ARRAY_OWNDATA`, - :c:data:`NPY_ARRAY_ALIGNED`, :c:data:`NPY_ARRAY_WRITEABLE`, and - :c:data:`NPY_ARRAY_UPDATEIFCOPY`. + :c:data:`NPY_ARRAY_ALIGNED`, :c:data:`NPY_ARRAY_WRITEABLE`, + :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, and :c:data:`NPY_ARRAY_UPDATEIFCOPY`. .. c:member:: PyObject *PyArrayObject.weakreflist diff -Nru python-numpy-1.13.3/doc/source/reference/internals.code-explanations.rst python-numpy-1.14.5/doc/source/reference/internals.code-explanations.rst --- python-numpy-1.13.3/doc/source/reference/internals.code-explanations.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/internals.code-explanations.rst 2018-06-12 17:31:56.000000000 +0000 @@ -105,7 +105,7 @@ For the general case, the iteration works by keeping track of a list of coordinate counters in the iterator object. At each iteration, the last coordinate counter is increased (starting from 0). If this -counter is smaller then one less than the size of the array in that +counter is smaller than one less than the size of the array in that dimension (a pre-computed and stored value), then the counter is increased and the dataptr member is increased by the strides in that dimension and the macro ends. If the end of a dimension is reached, @@ -368,8 +368,9 @@ return arrays are constructed. If any provided output array doesn't have the correct type (or is mis-aligned) and is smaller than the buffer size, then a new output array is constructed with the special -UPDATEIFCOPY flag set so that when it is DECREF'd on completion of the -function, it's contents will be copied back into the output array. +:c:data:`WRITEBACKIFCOPY` flag set. At the end of the function, +:c:func:`PyArray_ResolveWritebackIfCopy` is called so that +its contents will be copied back into the output array. Iterators for the output arguments are then processed. Finally, the decision is made about how to execute the looping @@ -475,7 +476,7 @@ Methods ------- -Their are three methods of ufuncs that require calculation similar to +There are three methods of ufuncs that require calculation similar to the general-purpose ufuncs. These are reduce, accumulate, and reduceat. Each of these methods requires a setup command followed by a loop. There are four loop styles possible for the methods @@ -508,10 +509,11 @@ accumulate, or reduceat. If an output array is already provided, then it's shape is checked. If the output array is not C-contiguous, aligned, and of the correct data type, then a temporary copy is made -with the UPDATEIFCOPY flag set. In this way, the methods will be able +with the WRITEBACKIFCOPY flag set. In this way, the methods will be able to work with a well-behaved output array but the result will be copied -back into the true output array when the method computation is -complete. Finally, iterators are set up to loop over the correct axis +back into the true output array when :c:func:`PyArray_ResolveWritebackIfCopy` +is called at function completion. +Finally, iterators are set up to loop over the correct axis (depending on the value of axis provided to the method) and the setup routine returns to the actual computation routine. diff -Nru python-numpy-1.13.3/doc/source/reference/maskedarray.baseclass.rst python-numpy-1.14.5/doc/source/reference/maskedarray.baseclass.rst --- python-numpy-1.13.3/doc/source/reference/maskedarray.baseclass.rst 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/maskedarray.baseclass.rst 2018-06-12 17:31:56.000000000 +0000 @@ -99,7 +99,7 @@ ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)], ... dtype=[('a', int), ('b', int)]) >>> x.recordmask - array([False, False, True, False, False], dtype=bool) + array([False, False, True, False, False]) .. attribute:: MaskedArray.fill_value diff -Nru python-numpy-1.13.3/doc/source/reference/maskedarray.generic.rst python-numpy-1.14.5/doc/source/reference/maskedarray.generic.rst --- python-numpy-1.13.3/doc/source/reference/maskedarray.generic.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/maskedarray.generic.rst 2018-06-12 17:31:56.000000000 +0000 @@ -379,8 +379,8 @@ When accessing a slice, the output is a masked array whose :attr:`~MaskedArray.data` attribute is a view of the original data, and whose mask is either :attr:`nomask` (if there was no invalid entries in the original -array) or a copy of the corresponding slice of the original mask. The copy is -required to avoid propagation of any modification of the mask to the original. +array) or a view of the corresponding slice of the original mask. The view is +required to ensure propagation of any modification of the mask to the original. >>> x = ma.array([1, 2, 3, 4, 5], mask=[0, 1, 0, 0, 1]) >>> mx = x[:3] @@ -394,7 +394,7 @@ mask = [False False False], fill_value = 999999) >>> x.mask - array([False, True, False, False, True], dtype=bool) + array([False, True, False, False, True]) >>> x.data array([ 1, -1, 3, 4, 5]) diff -Nru python-numpy-1.13.3/doc/source/reference/routines.datetime.rst python-numpy-1.14.5/doc/source/reference/routines.datetime.rst --- python-numpy-1.13.3/doc/source/reference/routines.datetime.rst 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/routines.datetime.rst 2018-06-12 17:31:56.000000000 +0000 @@ -5,6 +5,13 @@ .. currentmodule:: numpy +.. autosummary:: + :toctree: generated/ + + datetime_as_string + datetime_data + + Business Day Functions ====================== diff -Nru python-numpy-1.13.3/doc/source/reference/routines.io.rst python-numpy-1.14.5/doc/source/reference/routines.io.rst --- python-numpy-1.13.3/doc/source/reference/routines.io.rst 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/routines.io.rst 2018-06-12 18:28:52.000000000 +0000 @@ -14,7 +14,7 @@ savez_compressed The format of these binary file types is documented in -http://docs.scipy.org/doc/numpy/neps/npy-format.html +http://docs.scipy.org/doc/numpy/neps/npy-format.html Text files ---------- @@ -45,6 +45,8 @@ array2string array_repr array_str + format_float_positional + format_float_scientific Memory mapping files -------------------- diff -Nru python-numpy-1.13.3/doc/source/reference/routines.linalg.rst python-numpy-1.14.5/doc/source/reference/routines.linalg.rst --- python-numpy-1.13.3/doc/source/reference/routines.linalg.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/routines.linalg.rst 2018-06-12 17:31:56.000000000 +0000 @@ -18,6 +18,7 @@ matmul tensordot einsum + einsum_path linalg.matrix_power kron @@ -71,6 +72,8 @@ linalg.LinAlgError +.. _routines.linalg-broadcasting: + Linear algebra on several matrices at once ------------------------------------------ diff -Nru python-numpy-1.13.3/doc/source/reference/routines.polynomials.classes.rst python-numpy-1.14.5/doc/source/reference/routines.polynomials.classes.rst --- python-numpy-1.13.3/doc/source/reference/routines.polynomials.classes.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/routines.polynomials.classes.rst 2018-06-12 17:31:56.000000000 +0000 @@ -52,7 +52,7 @@ >>> from numpy.polynomial import Polynomial as P >>> p = P([1,2,3]) >>> p - Polynomial([ 1., 2., 3.], [-1., 1.], [-1., 1.]) + Polynomial([ 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) Note that there are three parts to the long version of the printout. The first is the coefficients, the second is the domain, and the third is the @@ -77,19 +77,19 @@ Addition and Subtraction:: >>> p + p - Polynomial([ 2., 4., 6.], [-1., 1.], [-1., 1.]) + Polynomial([ 2., 4., 6.], domain=[-1, 1], window=[-1, 1]) >>> p - p - Polynomial([ 0.], [-1., 1.], [-1., 1.]) + Polynomial([ 0.], domain=[-1, 1], window=[-1, 1]) Multiplication:: >>> p * p - Polynomial([ 1., 4., 10., 12., 9.], [-1., 1.], [-1., 1.]) + Polynomial([ 1., 4., 10., 12., 9.], domain=[-1, 1], window=[-1, 1]) Powers:: >>> p**2 - Polynomial([ 1., 4., 10., 12., 9.], [-1., 1.], [-1., 1.]) + Polynomial([ 1., 4., 10., 12., 9.], domain=[-1, 1], window=[-1, 1]) Division: @@ -100,20 +100,20 @@ will be deprecated:: >>> p // P([-1, 1]) - Polynomial([ 5., 3.], [-1., 1.], [-1., 1.]) + Polynomial([ 5., 3.], domain=[-1, 1], window=[-1, 1]) Remainder:: >>> p % P([-1, 1]) - Polynomial([ 6.], [-1., 1.], [-1., 1.]) + Polynomial([ 6.], domain=[-1, 1], window=[-1, 1]) Divmod:: >>> quo, rem = divmod(p, P([-1, 1])) >>> quo - Polynomial([ 5., 3.], [-1., 1.], [-1., 1.]) + Polynomial([ 5., 3.], domain=[-1, 1], window=[-1, 1]) >>> rem - Polynomial([ 6.], [-1., 1.], [-1., 1.]) + Polynomial([ 6.], domain=[-1, 1], window=[-1, 1]) Evaluation:: @@ -134,7 +134,7 @@ functions:: >>> p(p) - Polynomial([ 6., 16., 36., 36., 27.], [-1., 1.], [-1., 1.]) + Polynomial([ 6., 16., 36., 36., 27.], domain=[-1, 1], window=[-1, 1]) Roots:: @@ -148,11 +148,11 @@ operations:: >>> p + [1, 2, 3] - Polynomial([ 2., 4., 6.], [-1., 1.], [-1., 1.]) + Polynomial([ 2., 4., 6.], domain=[-1, 1], window=[-1, 1]) >>> [1, 2, 3] * p - Polynomial([ 1., 4., 10., 12., 9.], [-1., 1.], [-1., 1.]) + Polynomial([ 1., 4., 10., 12., 9.], domain=[-1, 1], window=[-1, 1]) >>> p / 2 - Polynomial([ 0.5, 1. , 1.5], [-1., 1.], [-1., 1.]) + Polynomial([ 0.5, 1. , 1.5], domain=[-1, 1], window=[-1, 1]) Polynomials that differ in domain, window, or class can't be mixed in arithmetic:: @@ -180,7 +180,7 @@ and window casting:: >>> p(T([0, 1])) - Chebyshev([ 2.5, 2. , 1.5], [-1., 1.], [-1., 1.]) + Chebyshev([ 2.5, 2. , 1.5], domain=[-1, 1], window=[-1, 1]) Which gives the polynomial `p` in Chebyshev form. This works because :math:`T_1(x) = x` and substituting :math:`x` for :math:`x` doesn't change @@ -195,18 +195,18 @@ >>> from numpy.polynomial import Polynomial as P >>> p = P([2, 6]) >>> p.integ() - Polynomial([ 0., 2., 3.], [-1., 1.], [-1., 1.]) + Polynomial([ 0., 2., 3.], domain=[-1, 1], window=[-1, 1]) >>> p.integ(2) - Polynomial([ 0., 0., 1., 1.], [-1., 1.], [-1., 1.]) + Polynomial([ 0., 0., 1., 1.], domain=[-1, 1], window=[-1, 1]) The first example integrates `p` once, the second example integrates it twice. By default, the lower bound of the integration and the integration constant are 0, but both can be specified.:: >>> p.integ(lbnd=-1) - Polynomial([-1., 2., 3.], [-1., 1.], [-1., 1.]) + Polynomial([-1., 2., 3.], domain=[-1, 1], window=[-1, 1]) >>> p.integ(lbnd=-1, k=1) - Polynomial([ 0., 2., 3.], [-1., 1.], [-1., 1.]) + Polynomial([ 0., 2., 3.], domain=[-1, 1], window=[-1, 1]) In the first case the lower bound of the integration is set to -1 and the integration constant is 0. In the second the constant of integration is set @@ -215,9 +215,9 @@ >>> p = P([1, 2, 3]) >>> p.deriv(1) - Polynomial([ 2., 6.], [-1., 1.], [-1., 1.]) + Polynomial([ 2., 6.], domain=[-1, 1], window=[-1, 1]) >>> p.deriv(2) - Polynomial([ 6.], [-1., 1.], [-1., 1.]) + Polynomial([ 6.], domain=[-1, 1], window=[-1, 1]) Other Polynomial Constructors @@ -233,9 +233,9 @@ >>> from numpy.polynomial import Chebyshev as T >>> p = P.fromroots([1, 2, 3]) >>> p - Polynomial([ -6., 11., -6., 1.], [-1., 1.], [-1., 1.]) + Polynomial([ -6., 11., -6., 1.], domain=[-1, 1], window=[-1, 1]) >>> p.convert(kind=T) - Chebyshev([ -9. , 11.75, -3. , 0.25], [-1., 1.], [-1., 1.]) + Chebyshev([ -9. , 11.75, -3. , 0.25], domain=[-1, 1], window=[-1, 1]) The convert method can also convert domain and window:: @@ -249,9 +249,9 @@ method returns the basis polynomial of given degree:: >>> P.basis(3) - Polynomial([ 0., 0., 0., 1.], [-1., 1.], [-1., 1.]) + Polynomial([ 0., 0., 0., 1.], domain=[-1, 1], window=[-1, 1]) >>> T.cast(p) - Chebyshev([ -9. , 11.75, -3. , 0.25], [-1., 1.], [-1., 1.]) + Chebyshev([ -9. , 11.75, -3. , 0.25], domain=[-1, 1], window=[-1, 1]) Conversions between types can be useful, but it is *not* recommended for routine use. The loss of numerical precision in passing from a diff -Nru python-numpy-1.13.3/doc/source/reference/routines.polynomials.package.rst python-numpy-1.14.5/doc/source/reference/routines.polynomials.package.rst --- python-numpy-1.13.3/doc/source/reference/routines.polynomials.package.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/routines.polynomials.package.rst 2018-06-12 17:31:56.000000000 +0000 @@ -15,3 +15,4 @@ routines.polynomials.laguerre routines.polynomials.hermite routines.polynomials.hermite_e + routines.polynomials.polyutils diff -Nru python-numpy-1.13.3/doc/source/reference/routines.polynomials.polyutils.rst python-numpy-1.14.5/doc/source/reference/routines.polynomials.polyutils.rst --- python-numpy-1.13.3/doc/source/reference/routines.polynomials.polyutils.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/routines.polynomials.polyutils.rst 2018-06-12 17:31:56.000000000 +0000 @@ -0,0 +1,4 @@ +Polyutils +========= + +.. automodule:: numpy.polynomial.polyutils diff -Nru python-numpy-1.13.3/doc/source/reference/routines.set.rst python-numpy-1.14.5/doc/source/reference/routines.set.rst --- python-numpy-1.13.3/doc/source/reference/routines.set.rst 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/routines.set.rst 2018-06-12 17:31:56.000000000 +0000 @@ -17,7 +17,7 @@ in1d intersect1d - isin + isin setdiff1d setxor1d union1d diff -Nru python-numpy-1.13.3/doc/source/reference/routines.testing.rst python-numpy-1.14.5/doc/source/reference/routines.testing.rst --- python-numpy-1.13.3/doc/source/reference/routines.testing.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/routines.testing.rst 2018-06-12 17:31:56.000000000 +0000 @@ -41,7 +41,6 @@ decorators.slow decorate_methods - Test Running ------------ .. autosummary:: @@ -50,3 +49,4 @@ Tester run_module_suite rundocs + suppress_warnings diff -Nru python-numpy-1.13.3/doc/source/reference/ufuncs.rst python-numpy-1.14.5/doc/source/reference/ufuncs.rst --- python-numpy-1.13.3/doc/source/reference/ufuncs.rst 2017-09-24 22:47:22.000000000 +0000 +++ python-numpy-1.14.5/doc/source/reference/ufuncs.rst 2018-06-12 18:28:52.000000000 +0000 @@ -17,13 +17,16 @@ supporting :ref:`array broadcasting `, :ref:`type casting `, and several other standard features. That is, a ufunc is a ":term:`vectorized`" wrapper for a function that -takes a fixed number of scalar inputs and produces a fixed number of -scalar outputs. +takes a fixed number of specific inputs and produces a fixed number of +specific outputs. In NumPy, universal functions are instances of the :class:`numpy.ufunc` class. Many of the built-in functions are -implemented in compiled C code, but :class:`ufunc` instances can also -be produced using the :func:`frompyfunc` factory function. +implemented in compiled C code. The basic ufuncs operate on scalars, but +there is also a generalized kind for which the basic elements are sub-arrays +(vectors, matrices, etc.), and broadcasting is done over other dimensions. +One can also produce custom :class:`ufunc` instances using the +:func:`frompyfunc` factory function. .. _ufuncs.broadcasting: @@ -34,7 +37,9 @@ .. index:: broadcasting Each universal function takes array inputs and produces array outputs -by performing the core function element-wise on the inputs. Standard +by performing the core function element-wise on the inputs (where an +element is generally a scalar, but can be a vector or higher-order +sub-array for generalized ufuncs). Standard broadcasting rules are applied so that inputs not sharing exactly the same shapes can still be usefully operated on. Broadcasting can be understood by four rules: @@ -102,8 +107,12 @@ The output of the ufunc (and its methods) is not necessarily an :class:`ndarray`, if all input arguments are not :class:`ndarrays `. +Indeed, if any input defines an :obj:`~class.__array_ufunc__` method, +control will be passed completely to that function, i.e., the ufunc is +`overridden `_. -All output arrays will be passed to the :obj:`~class.__array_prepare__` and +If none of the inputs overrides the ufunc, then +all output arrays will be passed to the :obj:`~class.__array_prepare__` and :obj:`~class.__array_wrap__` methods of the input (besides :class:`ndarrays `, and scalars) that defines it **and** has the highest :obj:`~class.__array_priority__` of any other input to the @@ -275,6 +284,8 @@ your large (small precision) array. +.. _ufuncs.overrides: + Overriding Ufunc behavior ========================= @@ -322,7 +333,8 @@ Accepts a boolean array which is broadcast together with the operands. Values of True indicate to calculate the ufunc at that position, values - of False indicate to leave the value in the output alone. + of False indicate to leave the value in the output alone. This argument + cannot be used for generalized ufuncs as those take non-scalar input. *casting* @@ -375,7 +387,9 @@ search and choose a particular loop. A list of available signatures is provided by the **types** attribute of the ufunc object. For backwards compatibility this argument can also be provided as *sig*, although - the long form is preferred. + the long form is preferred. Note that this should not be confused with + the generalized ufunc signature that is stored in the **signature** + attribute of the of the ufunc object. *extobj* @@ -417,13 +431,14 @@ ufunc.ntypes ufunc.types ufunc.identity + ufunc.signature .. _ufuncs.methods: Methods ------- -All ufuncs have four methods. However, these methods only make sense on +All ufuncs have four methods. However, these methods only make sense on scalar ufuncs that take two input arguments and return one output argument. Attempting to call these methods on other ufuncs will cause a :exc:`ValueError`. The reduce-like methods all take an *axis* keyword, a *dtype* @@ -489,7 +504,7 @@ call in order to use the optional output argument(s) to place the output(s) in an object (or objects) of your choice. -Recall that each ufunc operates element-by-element. Therefore, each +Recall that each ufunc operates element-by-element. Therefore, each scalar ufunc will be described as if acting on a set of scalar inputs to return a set of scalar outputs. diff -Nru python-numpy-1.13.3/doc/source/release.rst python-numpy-1.14.5/doc/source/release.rst --- python-numpy-1.13.3/doc/source/release.rst 2017-09-29 18:03:50.000000000 +0000 +++ python-numpy-1.14.5/doc/source/release.rst 2018-06-12 18:28:52.000000000 +0000 @@ -2,6 +2,12 @@ Release Notes ************* +.. include:: ../release/1.14.5-notes.rst +.. include:: ../release/1.14.4-notes.rst +.. include:: ../release/1.14.3-notes.rst +.. include:: ../release/1.14.2-notes.rst +.. include:: ../release/1.14.1-notes.rst +.. include:: ../release/1.14.0-notes.rst .. include:: ../release/1.13.3-notes.rst .. include:: ../release/1.13.2-notes.rst .. include:: ../release/1.13.1-notes.rst diff -Nru python-numpy-1.13.3/doc/source/_templates/defindex.html python-numpy-1.14.5/doc/source/_templates/defindex.html --- python-numpy-1.13.3/doc/source/_templates/defindex.html 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/doc/source/_templates/defindex.html 2018-06-12 17:31:56.000000000 +0000 @@ -0,0 +1,35 @@ +{# + basic/defindex.html + ~~~~~~~~~~~~~~~~~~~ + + Default template for the "index" page. + + :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +#} +{%- extends "layout.html" %} +{% set title = _('Overview') %} +{% block body %} +

{{ docstitle|e }}

+

+ {{ _('Welcome! This is') }} + {% block description %}{{ _('the documentation for') }} {{ project|e }} + {{ release|e }}{% if last_updated %}, {{ _('last updated') }} {{ last_updated|e }}{% endif %}{% endblock %}. +

+ {% block tables %} +

{{ _('Indices and tables:') }}

+ + +
+ + + + + +
+ {% endblock %} +{% endblock %} diff -Nru python-numpy-1.13.3/doc/source/user/building.rst python-numpy-1.14.5/doc/source/user/building.rst --- python-numpy-1.13.3/doc/source/user/building.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/source/user/building.rst 2018-06-12 18:28:52.000000000 +0000 @@ -32,7 +32,7 @@ FORTRAN 77 compiler installed. Note that NumPy is developed mainly using GNU compilers. Compilers from - other vendors such as Intel, Absoft, Sun, NAG, Compaq, Vast, Porland, + other vendors such as Intel, Absoft, Sun, NAG, Compaq, Vast, Portland, Lahey, HP, IBM, Microsoft are only supported in the form of community feedback, and may not work out of the box. GCC 4.x (and later) compilers are recommended. @@ -137,7 +137,7 @@ Building with ATLAS support --------------------------- -Ubuntu +Ubuntu ~~~~~~ You can install the necessary package for optimized ATLAS with this command:: diff -Nru python-numpy-1.13.3/doc/source/user/c-info.how-to-extend.rst python-numpy-1.14.5/doc/source/user/c-info.how-to-extend.rst --- python-numpy-1.13.3/doc/source/user/c-info.how-to-extend.rst 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/doc/source/user/c-info.how-to-extend.rst 2018-06-12 17:31:56.000000000 +0000 @@ -56,8 +56,8 @@ be called init{name} where {name} is the name of the module from Python. This function must be declared so that it is visible to code outside of the routine. Besides adding the methods and constants you -desire, this subroutine must also contain calls to import_array() -and/or import_ufunc() depending on which C-API is needed. Forgetting +desire, this subroutine must also contain calls like ``import_array()`` +and/or ``import_ufunc()`` depending on which C-API is needed. Forgetting to place these commands will show itself as an ugly segmentation fault (crash) as soon as any C-API subroutine is actually called. It is actually possible to have multiple init{name} functions in a single @@ -468,18 +468,20 @@ Equivalent to :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE` \| + :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` \| :c:data:`NPY_ARRAY_UPDATEIFCOPY`. This combination of flags is useful to specify an array that will be used for both - input and output. If a copy is needed, then when the - temporary is deleted (by your use of :c:func:`Py_DECREF` at - the end of the interface routine), the temporary array - will be copied back into the original array passed in. Use - of the :c:data:`NPY_ARRAY_UPDATEIFCOPY` flag requires that the input + input and output. :c:func:`PyArray_ResolveWritebackIfCopy` + must be called before :func:`Py_DECREF` at + the end of the interface routine to write back the temporary data + into the original array passed in. Use + of the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` or + :c:data:`NPY_ARRAY_UPDATEIFCOPY` flags requires that the input object is already an array (because other objects cannot be automatically updated in this fashion). If an error - occurs use :c:func:`PyArray_DECREF_ERR` (obj) on an array - with the :c:data:`NPY_ARRAY_UPDATEIFCOPY` flag set. This will - delete the array without causing the contents to be copied + occurs use :c:func:`PyArray_DiscardWritebackIfCopy` (obj) on an + array with these flags set. This will set the underlying base array + writable without causing the contents to be copied back into the original array. @@ -603,7 +605,8 @@ The following example shows how you might write a wrapper that accepts two input arguments (that will be converted to an array) and an output argument (that must be an array). The function returns None and -updates the output array. +updates the output array. Note the updated use of WRITEBACKIFCOPY semantics +for NumPy v1.14 and above .. code-block:: c @@ -616,11 +619,15 @@ if (!PyArg_ParseTuple(args, "OOO!", &arg1, &arg2, &PyArray_Type, &out)) return NULL; - arr1 = PyArray_FROM_OTF(arg1, NPY_DOUBLE, NPY_IN_ARRAY); + arr1 = PyArray_FROM_OTF(arg1, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); if (arr1 == NULL) return NULL; - arr2 = PyArray_FROM_OTF(arg2, NPY_DOUBLE, NPY_IN_ARRAY); + arr2 = PyArray_FROM_OTF(arg2, NPY_DOUBLE, NPY_ARRAY_IN_ARRAY); if (arr2 == NULL) goto fail; - oarr = PyArray_FROM_OTF(out, NPY_DOUBLE, NPY_INOUT_ARRAY); + #if NPY_API_VERSION >= 0x0000000c + oarr = PyArray_FROM_OTF(out, NPY_DOUBLE, NPY_ARRAY_INOUT_ARRAY2); + #else + oarr = PyArray_FROM_OTF(out, NPY_DOUBLE, NPY_ARRAY_INOUT_ARRAY); + #endif if (oarr == NULL) goto fail; /* code that makes use of arguments */ @@ -635,6 +642,9 @@ Py_DECREF(arr1); Py_DECREF(arr2); + #if NPY_API_VERSION >= 0x0000000c + PyArray_ResolveWritebackIfCopy(oarr); + #endif Py_DECREF(oarr); Py_INCREF(Py_None); return Py_None; @@ -642,6 +652,9 @@ fail: Py_XDECREF(arr1); Py_XDECREF(arr2); - PyArray_XDECREF_ERR(oarr); + #if NPY_API_VERSION >= 0x0000000c + PyArray_DiscardWritebackIfCopy(oarr); + #endif + Py_XDECREF(oarr); return NULL; } diff -Nru python-numpy-1.13.3/doc/source/user/c-info.ufunc-tutorial.rst python-numpy-1.14.5/doc/source/user/c-info.ufunc-tutorial.rst --- python-numpy-1.13.3/doc/source/user/c-info.ufunc-tutorial.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/source/user/c-info.ufunc-tutorial.rst 2018-06-12 18:28:52.000000000 +0000 @@ -1098,7 +1098,7 @@ .. code-block:: c static void - double_add(char *args, npy_intp *dimensions, npy_intp *steps, + double_add(char **args, npy_intp *dimensions, npy_intp *steps, void *extra) { npy_intp i; diff -Nru python-numpy-1.13.3/doc/source/user/numpy-for-matlab-users.rst python-numpy-1.14.5/doc/source/user/numpy-for-matlab-users.rst --- python-numpy-1.13.3/doc/source/user/numpy-for-matlab-users.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/source/user/numpy-for-matlab-users.rst 2018-06-12 18:28:52.000000000 +0000 @@ -31,7 +31,7 @@ these arrays are designed to act more or less like matrix operations in linear algebra. - In NumPy the basic type is a multidimensional ``array``. Operations - on these arrays in all dimensionalities including 2D are elementwise + on these arrays in all dimensionalities including 2D are element-wise operations. However, there is a special ``matrix`` type for doing linear algebra, which is just a subclass of the ``array`` class. Operations on matrix-class arrays are linear algebra operations. @@ -77,9 +77,10 @@ linear algebra operations. - You can have standard vectors or row/column vectors if you like. -The only disadvantage of using the array type is that you will have to -use ``dot`` instead of ``*`` to multiply (reduce) two tensors (scalar -product, matrix vector multiplication etc.). +Until Python 3.5 the only disadvantage of using the array type was that you +had to use ``dot`` instead of ``*`` to multiply (reduce) two tensors +(scalar product, matrix vector multiplication etc.). Since Python 3.5 you +can use the matrix multiplication ``@`` operator. Long answer ----------- @@ -136,7 +137,9 @@ ``dot(v,A)`` treats ``v`` as a row vector. This can save you having to type a lot of transposes. - ``<:(`` Having to use the ``dot()`` function for matrix-multiply is - messy -- ``dot(dot(A,B),C)`` vs. ``A*B*C``. + messy -- ``dot(dot(A,B),C)`` vs. ``A*B*C``. This isn't an issue with + Python >= 3.5 because the ``@`` operator allows it to be written as + ``A @ B @ C``. - ``:)`` Element-wise multiplication is easy: ``A*B``. - ``:)`` ``array`` is the "default" NumPy type, so it gets the most testing, and is the type most likely to be returned by 3rd party @@ -145,7 +148,7 @@ - ``:)`` Closer in semantics to tensor algebra, if you are familiar with that. - ``:)`` *All* operations (``*``, ``/``, ``+``, ``-`` etc.) are - elementwise + element-wise. - ``matrix`` @@ -160,11 +163,12 @@ it's a bug), but 3rd party code based on NumPy may not honor type preservation like NumPy does. - ``:)`` ``A*B`` is matrix multiplication, so more convenient for - linear algebra. + linear algebra (For Python >= 3.5 plain arrays have the same convenience + with the ``@`` operator). - ``<:(`` Element-wise multiplication requires calling a function, ``multiply(A,B)``. - ``<:(`` The use of operator overloading is a bit illogical: ``*`` - does not work elementwise but ``/`` does. + does not work element-wise but ``/`` does. The ``array`` is thus much more advisable to use. @@ -265,11 +269,11 @@ - Distance between 1 and the nearest floating point number. * - ``ode45`` - - ``scipy.integrate.ode(f).set_integrator('dopri5')`` + - ``scipy.integrate.solve_ivp(f)`` - integrate an ODE with Runge-Kutta 4,5 * - ``ode15s`` - - ``scipy.integrate.ode(f).set_integrator('vode', method='bdf', order=5)`` + - ``scipy.integrate.solve_ivp(f, method='BDF')`` - integrate an ODE with BDF method Linear Algebra Equivalents diff -Nru python-numpy-1.13.3/doc/source/user/quickstart.rst python-numpy-1.14.5/doc/source/user/quickstart.rst --- python-numpy-1.13.3/doc/source/user/quickstart.rst 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/doc/source/user/quickstart.rst 2018-06-12 18:28:52.000000000 +0000 @@ -25,14 +25,12 @@ NumPy's main object is the homogeneous multidimensional array. It is a table of elements (usually numbers), all of the same type, indexed by a -tuple of positive integers. In NumPy dimensions are called *axes*. The -number of axes is *rank*. +tuple of positive integers. In NumPy dimensions are called *axes*. -For example, the coordinates of a point in 3D space ``[1, 2, 1]`` is an -array of rank 1, because it has one axis. That axis has a length of 3. -In the example pictured below, the array has rank 2 (it is 2-dimensional). -The first dimension (axis) has a length of 2, the second dimension has a -length of 3. +For example, the coordinates of a point in 3D space ``[1, 2, 1]`` has +one axis. That axis has 3 elements in it, so we say it has a length +of 3. In the example pictured below, the array has 2 axes. The first +axis has a length of 2, the second axis has a length of 3. :: @@ -46,14 +44,12 @@ an ``ndarray`` object are: ndarray.ndim - the number of axes (dimensions) of the array. In the Python world, - the number of dimensions is referred to as *rank*. + the number of axes (dimensions) of the array. ndarray.shape the dimensions of the array. This is a tuple of integers indicating the size of the array in each dimension. For a matrix with *n* rows and *m* columns, ``shape`` will be ``(n,m)``. The length of the - ``shape`` tuple is therefore the rank, or number of dimensions, - ``ndim``. + ``shape`` tuple is therefore the number of axes, ``ndim``. ndarray.size the total number of elements of the array. This is equal to the product of the elements of ``shape``. @@ -297,7 +293,7 @@ >>> 10*np.sin(a) array([ 9.12945251, -9.88031624, 7.4511316 , -2.62374854]) >>> a<35 - array([ True, True, False, False], dtype=bool) + array([ True, True, False, False]) Unlike in many matrix languages, the product operator ``*`` operates elementwise in NumPy arrays. The matrix product can be performed using @@ -537,8 +533,8 @@ ``b[i,...]``. The **dots** (``...``) represent as many colons as needed to produce a -complete indexing tuple. For example, if ``x`` is a rank 5 array (i.e., -it has 5 axes), then +complete indexing tuple. For example, if ``x`` is an array with 5 +axes, then - ``x[1,2,...]`` is equivalent to ``x[1,2,:,:,:]``, - ``x[...,3]`` to ``x[:,:,:,:,3]`` and @@ -1119,13 +1115,13 @@ [-0.53657292, 0.42016704, 0.99060736, 0.65028784], [-0.28790332, -0.96139749, -0.75098725, 0.14987721]]) >>> - >>> ind = data.argmax(axis=0) # index of the maxima for each series + >>> ind = data.argmax(axis=0) # index of the maxima for each series >>> ind array([2, 0, 3, 1]) >>> - >>> time_max = time[ ind] # times corresponding to the maxima + >>> time_max = time[ind] # times corresponding to the maxima >>> - >>> data_max = data[ind, xrange(data.shape[1])] # => data[ind[0],0], data[ind[1],1]... + >>> data_max = data[ind, range(data.shape[1])] # => data[ind[0],0], data[ind[1],1]... >>> >>> time_max array([ 82.5 , 20. , 113.75, 51.25]) @@ -1180,7 +1176,7 @@ >>> b # b is a boolean with a's shape array([[False, False, False, False], [False, True, True, True], - [ True, True, True, True]], dtype=bool) + [ True, True, True, True]]) >>> a[b] # 1d array with the selected elements array([ 5, 6, 7, 8, 9, 10, 11]) @@ -1245,9 +1241,9 @@ Note that the length of the 1D boolean array must coincide with the length of the dimension (or axis) you want to slice. In the previous -example, ``b1`` is a 1-rank array with length 3 (the number of *rows* in -``a``), and ``b2`` (of length 4) is suitable to index the 2nd rank -(columns) of ``a``. +example, ``b1`` has length 3 (the number of *rows* in ``a``), and +``b2`` (of length 4) is suitable to index the 2nd axis (columns) of +``a``. The ix_() function ------------------- diff -Nru python-numpy-1.13.3/doc/sphinxext/.gitignore python-numpy-1.14.5/doc/sphinxext/.gitignore --- python-numpy-1.13.3/doc/sphinxext/.gitignore 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/.gitignore 2018-06-07 19:30:04.000000000 +0000 @@ -4,5 +4,7 @@ *.pyc *.pyo *.egg-info +*.swp +*.swo build dist diff -Nru python-numpy-1.13.3/doc/sphinxext/MANIFEST.in python-numpy-1.14.5/doc/sphinxext/MANIFEST.in --- python-numpy-1.13.3/doc/sphinxext/MANIFEST.in 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/MANIFEST.in 2018-06-07 19:30:04.000000000 +0000 @@ -1,5 +1,5 @@ include MANIFEST.in -recursive-include numpydoc/tests *.py +recursive-include numpydoc * include *.txt include *.rst diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/comment_eater.py python-numpy-1.14.5/doc/sphinxext/numpydoc/comment_eater.py --- python-numpy-1.13.3/doc/sphinxext/numpydoc/comment_eater.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/comment_eater.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,169 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -if sys.version_info[0] >= 3: - from io import StringIO -else: - from io import StringIO - -import compiler -import inspect -import textwrap -import tokenize - -from .compiler_unparse import unparse - - -class Comment(object): - """ A comment block. - """ - is_comment = True - def __init__(self, start_lineno, end_lineno, text): - # int : The first line number in the block. 1-indexed. - self.start_lineno = start_lineno - # int : The last line number. Inclusive! - self.end_lineno = end_lineno - # str : The text block including '#' character but not any leading spaces. - self.text = text - - def add(self, string, start, end, line): - """ Add a new comment line. - """ - self.start_lineno = min(self.start_lineno, start[0]) - self.end_lineno = max(self.end_lineno, end[0]) - self.text += string - - def __repr__(self): - return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno, - self.end_lineno, self.text) - - -class NonComment(object): - """ A non-comment block of code. - """ - is_comment = False - def __init__(self, start_lineno, end_lineno): - self.start_lineno = start_lineno - self.end_lineno = end_lineno - - def add(self, string, start, end, line): - """ Add lines to the block. - """ - if string.strip(): - # Only add if not entirely whitespace. - self.start_lineno = min(self.start_lineno, start[0]) - self.end_lineno = max(self.end_lineno, end[0]) - - def __repr__(self): - return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno, - self.end_lineno) - - -class CommentBlocker(object): - """ Pull out contiguous comment blocks. - """ - def __init__(self): - # Start with a dummy. - self.current_block = NonComment(0, 0) - - # All of the blocks seen so far. - self.blocks = [] - - # The index mapping lines of code to their associated comment blocks. - self.index = {} - - def process_file(self, file): - """ Process a file object. - """ - if sys.version_info[0] >= 3: - nxt = file.__next__ - else: - nxt = file.next - for token in tokenize.generate_tokens(nxt): - self.process_token(*token) - self.make_index() - - def process_token(self, kind, string, start, end, line): - """ Process a single token. - """ - if self.current_block.is_comment: - if kind == tokenize.COMMENT: - self.current_block.add(string, start, end, line) - else: - self.new_noncomment(start[0], end[0]) - else: - if kind == tokenize.COMMENT: - self.new_comment(string, start, end, line) - else: - self.current_block.add(string, start, end, line) - - def new_noncomment(self, start_lineno, end_lineno): - """ We are transitioning from a noncomment to a comment. - """ - block = NonComment(start_lineno, end_lineno) - self.blocks.append(block) - self.current_block = block - - def new_comment(self, string, start, end, line): - """ Possibly add a new comment. - - Only adds a new comment if this comment is the only thing on the line. - Otherwise, it extends the noncomment block. - """ - prefix = line[:start[1]] - if prefix.strip(): - # Oops! Trailing comment, not a comment block. - self.current_block.add(string, start, end, line) - else: - # A comment block. - block = Comment(start[0], end[0], string) - self.blocks.append(block) - self.current_block = block - - def make_index(self): - """ Make the index mapping lines of actual code to their associated - prefix comments. - """ - for prev, block in zip(self.blocks[:-1], self.blocks[1:]): - if not block.is_comment: - self.index[block.start_lineno] = prev - - def search_for_comment(self, lineno, default=None): - """ Find the comment block just before the given line number. - - Returns None (or the specified default) if there is no such block. - """ - if not self.index: - self.make_index() - block = self.index.get(lineno, None) - text = getattr(block, 'text', default) - return text - - -def strip_comment_marker(text): - """ Strip # markers at the front of a block of comment text. - """ - lines = [] - for line in text.splitlines(): - lines.append(line.lstrip('#')) - text = textwrap.dedent('\n'.join(lines)) - return text - - -def get_class_traits(klass): - """ Yield all of the documentation for trait definitions on a class object. - """ - # FIXME: gracefully handle errors here or in the caller? - source = inspect.getsource(klass) - cb = CommentBlocker() - cb.process_file(StringIO(source)) - mod_ast = compiler.parse(source) - class_ast = mod_ast.node.nodes[0] - for node in class_ast.code.nodes: - # FIXME: handle other kinds of assignments? - if isinstance(node, compiler.ast.Assign): - name = node.nodes[0].name - rhs = unparse(node.expr).strip() - doc = strip_comment_marker(cb.search_for_comment(node.lineno, default='')) - yield name, rhs, doc - diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/compiler_unparse.py python-numpy-1.14.5/doc/sphinxext/numpydoc/compiler_unparse.py --- python-numpy-1.13.3/doc/sphinxext/numpydoc/compiler_unparse.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/compiler_unparse.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,865 +0,0 @@ -""" Turn compiler.ast structures back into executable python code. - - The unparse method takes a compiler.ast tree and transforms it back into - valid python code. It is incomplete and currently only works for - import statements, function calls, function definitions, assignments, and - basic expressions. - - Inspired by python-2.5-svn/Demo/parser/unparse.py - - fixme: We may want to move to using _ast trees because the compiler for - them is about 6 times faster than compiler.compile. -""" -from __future__ import division, absolute_import, print_function - -import sys -from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add - -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO - -def unparse(ast, single_line_functions=False): - s = StringIO() - UnparseCompilerAst(ast, s, single_line_functions) - return s.getvalue().lstrip() - -op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2, - 'compiler.ast.Add':1, 'compiler.ast.Sub':1 } - -class UnparseCompilerAst: - """ Methods in this class recursively traverse an AST and - output source code for the abstract syntax; original formatting - is disregarged. - """ - - ######################################################################### - # object interface. - ######################################################################### - - def __init__(self, tree, file = sys.stdout, single_line_functions=False): - """ Unparser(tree, file=sys.stdout) -> None. - - Print the source for tree to file. - """ - self.f = file - self._single_func = single_line_functions - self._do_indent = True - self._indent = 0 - self._dispatch(tree) - self._write("\n") - self.f.flush() - - ######################################################################### - # Unparser private interface. - ######################################################################### - - ### format, output, and dispatch methods ################################ - - def _fill(self, text = ""): - "Indent a piece of text, according to the current indentation level" - if self._do_indent: - self._write("\n"+" "*self._indent + text) - else: - self._write(text) - - def _write(self, text): - "Append a piece of text to the current line." - self.f.write(text) - - def _enter(self): - "Print ':', and increase the indentation." - self._write(": ") - self._indent += 1 - - def _leave(self): - "Decrease the indentation level." - self._indent -= 1 - - def _dispatch(self, tree): - "_dispatcher function, _dispatching tree type T to method _T." - if isinstance(tree, list): - for t in tree: - self._dispatch(t) - return - meth = getattr(self, "_"+tree.__class__.__name__) - if tree.__class__.__name__ == 'NoneType' and not self._do_indent: - return - meth(tree) - - - ######################################################################### - # compiler.ast unparsing methods. - # - # There should be one method per concrete grammar type. They are - # organized in alphabetical order. - ######################################################################### - - def _Add(self, t): - self.__binary_op(t, '+') - - def _And(self, t): - self._write(" (") - for i, node in enumerate(t.nodes): - self._dispatch(node) - if i != len(t.nodes)-1: - self._write(") and (") - self._write(")") - - def _AssAttr(self, t): - """ Handle assigning an attribute of an object - """ - self._dispatch(t.expr) - self._write('.'+t.attrname) - - def _Assign(self, t): - """ Expression Assignment such as "a = 1". - - This only handles assignment in expressions. Keyword assignment - is handled separately. - """ - self._fill() - for target in t.nodes: - self._dispatch(target) - self._write(" = ") - self._dispatch(t.expr) - if not self._do_indent: - self._write('; ') - - def _AssName(self, t): - """ Name on left hand side of expression. - - Treat just like a name on the right side of an expression. - """ - self._Name(t) - - def _AssTuple(self, t): - """ Tuple on left hand side of an expression. - """ - - # _write each elements, separated by a comma. - for element in t.nodes[:-1]: - self._dispatch(element) - self._write(", ") - - # Handle the last one without writing comma - last_element = t.nodes[-1] - self._dispatch(last_element) - - def _AugAssign(self, t): - """ +=,-=,*=,/=,**=, etc. operations - """ - - self._fill() - self._dispatch(t.node) - self._write(' '+t.op+' ') - self._dispatch(t.expr) - if not self._do_indent: - self._write(';') - - def _Bitand(self, t): - """ Bit and operation. - """ - - for i, node in enumerate(t.nodes): - self._write("(") - self._dispatch(node) - self._write(")") - if i != len(t.nodes)-1: - self._write(" & ") - - def _Bitor(self, t): - """ Bit or operation - """ - - for i, node in enumerate(t.nodes): - self._write("(") - self._dispatch(node) - self._write(")") - if i != len(t.nodes)-1: - self._write(" | ") - - def _CallFunc(self, t): - """ Function call. - """ - self._dispatch(t.node) - self._write("(") - comma = False - for e in t.args: - if comma: self._write(", ") - else: comma = True - self._dispatch(e) - if t.star_args: - if comma: self._write(", ") - else: comma = True - self._write("*") - self._dispatch(t.star_args) - if t.dstar_args: - if comma: self._write(", ") - else: comma = True - self._write("**") - self._dispatch(t.dstar_args) - self._write(")") - - def _Compare(self, t): - self._dispatch(t.expr) - for op, expr in t.ops: - self._write(" " + op + " ") - self._dispatch(expr) - - def _Const(self, t): - """ A constant value such as an integer value, 3, or a string, "hello". - """ - self._dispatch(t.value) - - def _Decorators(self, t): - """ Handle function decorators (eg. @has_units) - """ - for node in t.nodes: - self._dispatch(node) - - def _Dict(self, t): - self._write("{") - for i, (k, v) in enumerate(t.items): - self._dispatch(k) - self._write(": ") - self._dispatch(v) - if i < len(t.items)-1: - self._write(", ") - self._write("}") - - def _Discard(self, t): - """ Node for when return value is ignored such as in "foo(a)". - """ - self._fill() - self._dispatch(t.expr) - - def _Div(self, t): - self.__binary_op(t, '/') - - def _Ellipsis(self, t): - self._write("...") - - def _From(self, t): - """ Handle "from xyz import foo, bar as baz". - """ - # fixme: Are From and ImportFrom handled differently? - self._fill("from ") - self._write(t.modname) - self._write(" import ") - for i, (name,asname) in enumerate(t.names): - if i != 0: - self._write(", ") - self._write(name) - if asname is not None: - self._write(" as "+asname) - - def _Function(self, t): - """ Handle function definitions - """ - if t.decorators is not None: - self._fill("@") - self._dispatch(t.decorators) - self._fill("def "+t.name + "(") - defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults) - for i, arg in enumerate(zip(t.argnames, defaults)): - self._write(arg[0]) - if arg[1] is not None: - self._write('=') - self._dispatch(arg[1]) - if i < len(t.argnames)-1: - self._write(', ') - self._write(")") - if self._single_func: - self._do_indent = False - self._enter() - self._dispatch(t.code) - self._leave() - self._do_indent = True - - def _Getattr(self, t): - """ Handle getting an attribute of an object - """ - if isinstance(t.expr, (Div, Mul, Sub, Add)): - self._write('(') - self._dispatch(t.expr) - self._write(')') - else: - self._dispatch(t.expr) - - self._write('.'+t.attrname) - - def _If(self, t): - self._fill() - - for i, (compare,code) in enumerate(t.tests): - if i == 0: - self._write("if ") - else: - self._write("elif ") - self._dispatch(compare) - self._enter() - self._fill() - self._dispatch(code) - self._leave() - self._write("\n") - - if t.else_ is not None: - self._write("else") - self._enter() - self._fill() - self._dispatch(t.else_) - self._leave() - self._write("\n") - - def _IfExp(self, t): - self._dispatch(t.then) - self._write(" if ") - self._dispatch(t.test) - - if t.else_ is not None: - self._write(" else (") - self._dispatch(t.else_) - self._write(")") - - def _Import(self, t): - """ Handle "import xyz.foo". - """ - self._fill("import ") - - for i, (name,asname) in enumerate(t.names): - if i != 0: - self._write(", ") - self._write(name) - if asname is not None: - self._write(" as "+asname) - - def _Keyword(self, t): - """ Keyword value assignment within function calls and definitions. - """ - self._write(t.name) - self._write("=") - self._dispatch(t.expr) - - def _List(self, t): - self._write("[") - for i,node in enumerate(t.nodes): - self._dispatch(node) - if i < len(t.nodes)-1: - self._write(", ") - self._write("]") - - def _Module(self, t): - if t.doc is not None: - self._dispatch(t.doc) - self._dispatch(t.node) - - def _Mul(self, t): - self.__binary_op(t, '*') - - def _Name(self, t): - self._write(t.name) - - def _NoneType(self, t): - self._write("None") - - def _Not(self, t): - self._write('not (') - self._dispatch(t.expr) - self._write(')') - - def _Or(self, t): - self._write(" (") - for i, node in enumerate(t.nodes): - self._dispatch(node) - if i != len(t.nodes)-1: - self._write(") or (") - self._write(")") - - def _Pass(self, t): - self._write("pass\n") - - def _Printnl(self, t): - self._fill("print ") - if t.dest: - self._write(">> ") - self._dispatch(t.dest) - self._write(", ") - comma = False - for node in t.nodes: - if comma: self._write(', ') - else: comma = True - self._dispatch(node) - - def _Power(self, t): - self.__binary_op(t, '**') - - def _Return(self, t): - self._fill("return ") - if t.value: - if isinstance(t.value, Tuple): - text = ', '.join([ name.name for name in t.value.asList() ]) - self._write(text) - else: - self._dispatch(t.value) - if not self._do_indent: - self._write('; ') - - def _Slice(self, t): - self._dispatch(t.expr) - self._write("[") - if t.lower: - self._dispatch(t.lower) - self._write(":") - if t.upper: - self._dispatch(t.upper) - #if t.step: - # self._write(":") - # self._dispatch(t.step) - self._write("]") - - def _Sliceobj(self, t): - for i, node in enumerate(t.nodes): - if i != 0: - self._write(":") - if not (isinstance(node, Const) and node.value is None): - self._dispatch(node) - - def _Stmt(self, tree): - for node in tree.nodes: - self._dispatch(node) - - def _Sub(self, t): - self.__binary_op(t, '-') - - def _Subscript(self, t): - self._dispatch(t.expr) - self._write("[") - for i, value in enumerate(t.subs): - if i != 0: - self._write(",") - self._dispatch(value) - self._write("]") - - def _TryExcept(self, t): - self._fill("try") - self._enter() - self._dispatch(t.body) - self._leave() - - for handler in t.handlers: - self._fill('except ') - self._dispatch(handler[0]) - if handler[1] is not None: - self._write(', ') - self._dispatch(handler[1]) - self._enter() - self._dispatch(handler[2]) - self._leave() - - if t.else_: - self._fill("else") - self._enter() - self._dispatch(t.else_) - self._leave() - - def _Tuple(self, t): - - if not t.nodes: - # Empty tuple. - self._write("()") - else: - self._write("(") - - # _write each elements, separated by a comma. - for element in t.nodes[:-1]: - self._dispatch(element) - self._write(", ") - - # Handle the last one without writing comma - last_element = t.nodes[-1] - self._dispatch(last_element) - - self._write(")") - - def _UnaryAdd(self, t): - self._write("+") - self._dispatch(t.expr) - - def _UnarySub(self, t): - self._write("-") - self._dispatch(t.expr) - - def _With(self, t): - self._fill('with ') - self._dispatch(t.expr) - if t.vars: - self._write(' as ') - self._dispatch(t.vars.name) - self._enter() - self._dispatch(t.body) - self._leave() - self._write('\n') - - def _int(self, t): - self._write(repr(t)) - - def __binary_op(self, t, symbol): - # Check if parenthesis are needed on left side and then dispatch - has_paren = False - left_class = str(t.left.__class__) - if (left_class in op_precedence.keys() and - op_precedence[left_class] < op_precedence[str(t.__class__)]): - has_paren = True - if has_paren: - self._write('(') - self._dispatch(t.left) - if has_paren: - self._write(')') - # Write the appropriate symbol for operator - self._write(symbol) - # Check if parenthesis are needed on the right side and then dispatch - has_paren = False - right_class = str(t.right.__class__) - if (right_class in op_precedence.keys() and - op_precedence[right_class] < op_precedence[str(t.__class__)]): - has_paren = True - if has_paren: - self._write('(') - self._dispatch(t.right) - if has_paren: - self._write(')') - - def _float(self, t): - # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001' - # We prefer str here. - self._write(str(t)) - - def _str(self, t): - self._write(repr(t)) - - def _tuple(self, t): - self._write(str(t)) - - ######################################################################### - # These are the methods from the _ast modules unparse. - # - # As our needs to handle more advanced code increase, we may want to - # modify some of the methods below so that they work for compiler.ast. - ######################################################################### - -# # stmt -# def _Expr(self, tree): -# self._fill() -# self._dispatch(tree.value) -# -# def _Import(self, t): -# self._fill("import ") -# first = True -# for a in t.names: -# if first: -# first = False -# else: -# self._write(", ") -# self._write(a.name) -# if a.asname: -# self._write(" as "+a.asname) -# -## def _ImportFrom(self, t): -## self._fill("from ") -## self._write(t.module) -## self._write(" import ") -## for i, a in enumerate(t.names): -## if i == 0: -## self._write(", ") -## self._write(a.name) -## if a.asname: -## self._write(" as "+a.asname) -## # XXX(jpe) what is level for? -## -# -# def _Break(self, t): -# self._fill("break") -# -# def _Continue(self, t): -# self._fill("continue") -# -# def _Delete(self, t): -# self._fill("del ") -# self._dispatch(t.targets) -# -# def _Assert(self, t): -# self._fill("assert ") -# self._dispatch(t.test) -# if t.msg: -# self._write(", ") -# self._dispatch(t.msg) -# -# def _Exec(self, t): -# self._fill("exec ") -# self._dispatch(t.body) -# if t.globals: -# self._write(" in ") -# self._dispatch(t.globals) -# if t.locals: -# self._write(", ") -# self._dispatch(t.locals) -# -# def _Print(self, t): -# self._fill("print ") -# do_comma = False -# if t.dest: -# self._write(">>") -# self._dispatch(t.dest) -# do_comma = True -# for e in t.values: -# if do_comma:self._write(", ") -# else:do_comma=True -# self._dispatch(e) -# if not t.nl: -# self._write(",") -# -# def _Global(self, t): -# self._fill("global") -# for i, n in enumerate(t.names): -# if i != 0: -# self._write(",") -# self._write(" " + n) -# -# def _Yield(self, t): -# self._fill("yield") -# if t.value: -# self._write(" (") -# self._dispatch(t.value) -# self._write(")") -# -# def _Raise(self, t): -# self._fill('raise ') -# if t.type: -# self._dispatch(t.type) -# if t.inst: -# self._write(", ") -# self._dispatch(t.inst) -# if t.tback: -# self._write(", ") -# self._dispatch(t.tback) -# -# -# def _TryFinally(self, t): -# self._fill("try") -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# self._fill("finally") -# self._enter() -# self._dispatch(t.finalbody) -# self._leave() -# -# def _excepthandler(self, t): -# self._fill("except ") -# if t.type: -# self._dispatch(t.type) -# if t.name: -# self._write(", ") -# self._dispatch(t.name) -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# def _ClassDef(self, t): -# self._write("\n") -# self._fill("class "+t.name) -# if t.bases: -# self._write("(") -# for a in t.bases: -# self._dispatch(a) -# self._write(", ") -# self._write(")") -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# def _FunctionDef(self, t): -# self._write("\n") -# for deco in t.decorators: -# self._fill("@") -# self._dispatch(deco) -# self._fill("def "+t.name + "(") -# self._dispatch(t.args) -# self._write(")") -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# def _For(self, t): -# self._fill("for ") -# self._dispatch(t.target) -# self._write(" in ") -# self._dispatch(t.iter) -# self._enter() -# self._dispatch(t.body) -# self._leave() -# if t.orelse: -# self._fill("else") -# self._enter() -# self._dispatch(t.orelse) -# self._leave -# -# def _While(self, t): -# self._fill("while ") -# self._dispatch(t.test) -# self._enter() -# self._dispatch(t.body) -# self._leave() -# if t.orelse: -# self._fill("else") -# self._enter() -# self._dispatch(t.orelse) -# self._leave -# -# # expr -# def _Str(self, tree): -# self._write(repr(tree.s)) -## -# def _Repr(self, t): -# self._write("`") -# self._dispatch(t.value) -# self._write("`") -# -# def _Num(self, t): -# self._write(repr(t.n)) -# -# def _ListComp(self, t): -# self._write("[") -# self._dispatch(t.elt) -# for gen in t.generators: -# self._dispatch(gen) -# self._write("]") -# -# def _GeneratorExp(self, t): -# self._write("(") -# self._dispatch(t.elt) -# for gen in t.generators: -# self._dispatch(gen) -# self._write(")") -# -# def _comprehension(self, t): -# self._write(" for ") -# self._dispatch(t.target) -# self._write(" in ") -# self._dispatch(t.iter) -# for if_clause in t.ifs: -# self._write(" if ") -# self._dispatch(if_clause) -# -# def _IfExp(self, t): -# self._dispatch(t.body) -# self._write(" if ") -# self._dispatch(t.test) -# if t.orelse: -# self._write(" else ") -# self._dispatch(t.orelse) -# -# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"} -# def _UnaryOp(self, t): -# self._write(self.unop[t.op.__class__.__name__]) -# self._write("(") -# self._dispatch(t.operand) -# self._write(")") -# -# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%", -# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", -# "FloorDiv":"//", "Pow": "**"} -# def _BinOp(self, t): -# self._write("(") -# self._dispatch(t.left) -# self._write(")" + self.binop[t.op.__class__.__name__] + "(") -# self._dispatch(t.right) -# self._write(")") -# -# boolops = {_ast.And: 'and', _ast.Or: 'or'} -# def _BoolOp(self, t): -# self._write("(") -# self._dispatch(t.values[0]) -# for v in t.values[1:]: -# self._write(" %s " % self.boolops[t.op.__class__]) -# self._dispatch(v) -# self._write(")") -# -# def _Attribute(self,t): -# self._dispatch(t.value) -# self._write(".") -# self._write(t.attr) -# -## def _Call(self, t): -## self._dispatch(t.func) -## self._write("(") -## comma = False -## for e in t.args: -## if comma: self._write(", ") -## else: comma = True -## self._dispatch(e) -## for e in t.keywords: -## if comma: self._write(", ") -## else: comma = True -## self._dispatch(e) -## if t.starargs: -## if comma: self._write(", ") -## else: comma = True -## self._write("*") -## self._dispatch(t.starargs) -## if t.kwargs: -## if comma: self._write(", ") -## else: comma = True -## self._write("**") -## self._dispatch(t.kwargs) -## self._write(")") -# -# # slice -# def _Index(self, t): -# self._dispatch(t.value) -# -# def _ExtSlice(self, t): -# for i, d in enumerate(t.dims): -# if i != 0: -# self._write(': ') -# self._dispatch(d) -# -# # others -# def _arguments(self, t): -# first = True -# nonDef = len(t.args)-len(t.defaults) -# for a in t.args[0:nonDef]: -# if first:first = False -# else: self._write(", ") -# self._dispatch(a) -# for a,d in zip(t.args[nonDef:], t.defaults): -# if first:first = False -# else: self._write(", ") -# self._dispatch(a), -# self._write("=") -# self._dispatch(d) -# if t.vararg: -# if first:first = False -# else: self._write(", ") -# self._write("*"+t.vararg) -# if t.kwarg: -# if first:first = False -# else: self._write(", ") -# self._write("**"+t.kwarg) -# -## def _keyword(self, t): -## self._write(t.arg) -## self._write("=") -## self._dispatch(t.value) -# -# def _Lambda(self, t): -# self._write("lambda ") -# self._dispatch(t.args) -# self._write(": ") -# self._dispatch(t.body) - - - diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/docscrape.py python-numpy-1.14.5/doc/sphinxext/numpydoc/docscrape.py --- python-numpy-1.13.3/doc/sphinxext/numpydoc/docscrape.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/docscrape.py 2018-06-12 18:29:26.000000000 +0000 @@ -9,6 +9,7 @@ import pydoc from warnings import warn import collections +import copy import sys @@ -90,37 +91,39 @@ class ParseError(Exception): def __str__(self): - message = self.message + message = self.args[0] if hasattr(self, 'docstring'): message = "%s in %r" % (message, self.docstring) return message class NumpyDocString(collections.Mapping): + sections = { + 'Signature': '', + 'Summary': [''], + 'Extended Summary': [], + 'Parameters': [], + 'Returns': [], + 'Yields': [], + 'Raises': [], + 'Warns': [], + 'Other Parameters': [], + 'Attributes': [], + 'Methods': [], + 'See Also': [], + 'Notes': [], + 'Warnings': [], + 'References': '', + 'Examples': '', + 'index': {} + } + def __init__(self, docstring, config={}): orig_docstring = docstring docstring = textwrap.dedent(docstring).split('\n') self._doc = Reader(docstring) - self._parsed_data = { - 'Signature': '', - 'Summary': [''], - 'Extended Summary': [], - 'Parameters': [], - 'Returns': [], - 'Yields': [], - 'Raises': [], - 'Warns': [], - 'Other Parameters': [], - 'Attributes': [], - 'Methods': [], - 'See Also': [], - 'Notes': [], - 'Warnings': [], - 'References': '', - 'Examples': '', - 'index': {} - } + self._parsed_data = copy.deepcopy(self.sections) try: self._parse() @@ -327,6 +330,21 @@ if not section.startswith('..'): section = (s.capitalize() for s in section.split(' ')) section = ' '.join(section) + if self.get(section): + if hasattr(self, '_obj'): + # we know where the docs came from: + try: + filename = inspect.getsourcefile(self._obj) + except TypeError: + filename = None + msg = ("The section %s appears twice in " + "the docstring of %s in %s." % + (section, self._obj, filename)) + raise ValueError(msg) + else: + msg = ("The section %s appears twice" % section) + raise ValueError(msg) + if section in ('Parameters', 'Returns', 'Yields', 'Raises', 'Warns', 'Other Parameters', 'Attributes', 'Methods'): diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/docscrape_sphinx.py python-numpy-1.14.5/doc/sphinxext/numpydoc/docscrape_sphinx.py --- python-numpy-1.13.3/doc/sphinxext/numpydoc/docscrape_sphinx.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/docscrape_sphinx.py 2018-06-12 18:29:26.000000000 +0000 @@ -5,8 +5,13 @@ import inspect import textwrap import pydoc -import sphinx import collections +import os + +from jinja2 import FileSystemLoader +from jinja2.sandbox import SandboxedEnvironment +import sphinx +from sphinx.jinja2glue import BuiltinTemplateLoader from .docscrape import NumpyDocString, FunctionDoc, ClassDoc @@ -24,6 +29,12 @@ def load_config(self, config): self.use_plots = config.get('use_plots', False) self.class_members_toctree = config.get('class_members_toctree', True) + self.template = config.get('template', None) + if self.template is None: + template_dirs = [os.path.join(os.path.dirname(__file__), 'templates')] + template_loader = FileSystemLoader(template_dirs) + template_env = SandboxedEnvironment(loader=template_loader) + self.template = template_env.get_template('numpydoc_docstring.rst') # string conversion routines def _str_header(self, name, symbol='`'): @@ -132,15 +143,15 @@ out += [''] + autosum if others: - maxlen_0 = max(3, max([len(x[0]) for x in others])) - hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10 + maxlen_0 = max(3, max([len(x[0]) + 4 for x in others])) + hdr = sixu("=") * maxlen_0 + sixu(" ") + sixu("=") * 10 fmt = sixu('%%%ds %%s ') % (maxlen_0,) out += ['', '', hdr] for param, param_type, desc in others: desc = sixu(" ").join(x.strip() for x in desc).strip() if param_type: desc = "(%s) %s" % (param_type, desc) - out += [fmt % (param.strip(), desc)] + out += [fmt % ("**" + param.strip() + "**", desc)] out += [hdr] out += [''] return out @@ -223,25 +234,29 @@ return self._str_section('Examples') def __str__(self, indent=0, func_role="obj"): - out = [] - out += self._str_signature() - out += self._str_index() + [''] - out += self._str_summary() - out += self._str_extended_summary() - out += self._str_param_list('Parameters') - out += self._str_returns('Returns') - out += self._str_returns('Yields') - for param_list in ('Other Parameters', 'Raises', 'Warns'): - out += self._str_param_list(param_list) - out += self._str_warnings() - out += self._str_see_also(func_role) - out += self._str_section('Notes') - out += self._str_references() - out += self._str_examples() - for param_list in ('Attributes', 'Methods'): - out += self._str_member_list(param_list) - out = self._str_indent(out, indent) - return '\n'.join(out) + ns = { + 'signature': self._str_signature(), + 'index': self._str_index(), + 'summary': self._str_summary(), + 'extended_summary': self._str_extended_summary(), + 'parameters': self._str_param_list('Parameters'), + 'returns': self._str_returns('Returns'), + 'yields': self._str_returns('Yields'), + 'other_parameters': self._str_param_list('Other Parameters'), + 'raises': self._str_param_list('Raises'), + 'warns': self._str_param_list('Warns'), + 'warnings': self._str_warnings(), + 'see_also': self._str_see_also(func_role), + 'notes': self._str_section('Notes'), + 'references': self._str_references(), + 'examples': self._str_examples(), + 'attributes': self._str_member_list('Attributes'), + 'methods': self._str_member_list('Methods'), + } + ns = dict((k, '\n'.join(v)) for k, v in ns.items()) + + rendered = self.template.render(**ns) + return '\n'.join(self._str_indent(rendered.split('\n'), indent)) class SphinxFunctionDoc(SphinxDocString, FunctionDoc): @@ -263,7 +278,7 @@ SphinxDocString.__init__(self, doc, config=config) -def get_doc_object(obj, what=None, doc=None, config={}): +def get_doc_object(obj, what=None, doc=None, config={}, builder=None): if what is None: if inspect.isclass(obj): what = 'class' @@ -273,6 +288,16 @@ what = 'function' else: what = 'object' + + template_dirs = [os.path.join(os.path.dirname(__file__), 'templates')] + if builder is not None: + template_loader = BuiltinTemplateLoader() + template_loader.init(builder, dirs=template_dirs) + else: + template_loader = FileSystemLoader(template_dirs) + template_env = SandboxedEnvironment(loader=template_loader) + config['template'] = template_env.get_template('numpydoc_docstring.rst') + if what == 'class': return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/__init__.py python-numpy-1.14.5/doc/sphinxext/numpydoc/__init__.py --- python-numpy-1.13.3/doc/sphinxext/numpydoc/__init__.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/__init__.py 2018-06-12 18:29:26.000000000 +0000 @@ -1,3 +1,5 @@ from __future__ import division, absolute_import, print_function +__version__ = '0.7.0' + from .numpydoc import setup diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/linkcode.py python-numpy-1.14.5/doc/sphinxext/numpydoc/linkcode.py --- python-numpy-1.13.3/doc/sphinxext/numpydoc/linkcode.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/linkcode.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- -""" - linkcode - ~~~~~~~~ - - Add external links to module code in Python object descriptions. - - :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. - :license: BSD, see LICENSE for details. - -""" -from __future__ import division, absolute_import, print_function - -import warnings -import collections - -warnings.warn("This extension has been accepted to Sphinx upstream. " - "Use the version from there (Sphinx >= 1.2) " - "https://bitbucket.org/birkenfeld/sphinx/pull-request/47/sphinxextlinkcode", - FutureWarning, stacklevel=1) - - -from docutils import nodes - -from sphinx import addnodes -from sphinx.locale import _ -from sphinx.errors import SphinxError - -class LinkcodeError(SphinxError): - category = "linkcode error" - -def doctree_read(app, doctree): - env = app.builder.env - - resolve_target = getattr(env.config, 'linkcode_resolve', None) - if not isinstance(env.config.linkcode_resolve, collections.Callable): - raise LinkcodeError( - "Function `linkcode_resolve` is not given in conf.py") - - domain_keys = dict( - py=['module', 'fullname'], - c=['names'], - cpp=['names'], - js=['object', 'fullname'], - ) - - for objnode in doctree.traverse(addnodes.desc): - domain = objnode.get('domain') - uris = set() - for signode in objnode: - if not isinstance(signode, addnodes.desc_signature): - continue - - # Convert signode to a specified format - info = {} - for key in domain_keys.get(domain, []): - value = signode.get(key) - if not value: - value = '' - info[key] = value - if not info: - continue - - # Call user code to resolve the link - uri = resolve_target(domain, info) - if not uri: - # no source - continue - - if uri in uris or not uri: - # only one link per name, please - continue - uris.add(uri) - - onlynode = addnodes.only(expr='html') - onlynode += nodes.reference('', '', internal=False, refuri=uri) - onlynode[0] += nodes.inline('', _('[source]'), - classes=['viewcode-link']) - signode += onlynode - -def setup(app): - app.connect('doctree-read', doctree_read) - app.add_config_value('linkcode_resolve', None, '') diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/numpydoc.py python-numpy-1.14.5/doc/sphinxext/numpydoc/numpydoc.py --- python-numpy-1.13.3/doc/sphinxext/numpydoc/numpydoc.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/numpydoc.py 2018-06-12 18:29:26.000000000 +0000 @@ -29,7 +29,6 @@ raise RuntimeError("Sphinx 1.0.1 or newer is required") from .docscrape_sphinx import get_doc_object, SphinxDocString -from sphinx.util.compat import Directive if sys.version_info[0] >= 3: sixu = lambda s: s @@ -37,8 +36,33 @@ sixu = lambda s: unicode(s, 'unicode_escape') -def mangle_docstrings(app, what, name, obj, options, lines, +def rename_references(app, what, name, obj, options, lines, reference_offset=[0]): + # replace reference numbers so that there are no duplicates + references = [] + for line in lines: + line = line.strip() + m = re.match(sixu('^.. \\[(%s)\\]') % app.config.numpydoc_citation_re, + line, re.I) + if m: + references.append(m.group(1)) + + if references: + for i, line in enumerate(lines): + for r in references: + if re.match(sixu('^\\d+$'), r): + new_r = sixu("R%d") % (reference_offset[0] + int(r)) + else: + new_r = sixu("%s%d") % (r, reference_offset[0]) + lines[i] = lines[i].replace(sixu('[%s]_') % r, + sixu('[%s]_') % new_r) + lines[i] = lines[i].replace(sixu('.. [%s]') % r, + sixu('.. [%s]') % new_r) + + reference_offset[0] += len(references) + + +def mangle_docstrings(app, what, name, obj, options, lines): cfg = {'use_plots': app.config.numpydoc_use_plots, 'show_class_members': app.config.numpydoc_show_class_members, @@ -53,7 +77,8 @@ title_re = re.compile(sixu(pattern), re.I | re.S) lines[:] = title_re.sub(sixu(''), u_NL.join(lines)).split(u_NL) else: - doc = get_doc_object(obj, what, u_NL.join(lines), config=cfg) + doc = get_doc_object(obj, what, u_NL.join(lines), config=cfg, + builder=app.builder) if sys.version_info[0] >= 3: doc = str(doc) else: @@ -70,29 +95,9 @@ lines += [sixu(' %s') % x for x in (app.config.numpydoc_edit_link % v).split("\n")] - # replace reference numbers so that there are no duplicates - references = [] - for line in lines: - line = line.strip() - m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I) - if m: - references.append(m.group(1)) - - # start renaming from the longest string, to avoid overwriting parts - references.sort(key=lambda x: -len(x)) - if references: - for i, line in enumerate(lines): - for r in references: - if re.match(sixu('^\\d+$'), r): - new_r = sixu("R%d") % (reference_offset[0] + int(r)) - else: - new_r = sixu("%s%d") % (r, reference_offset[0]) - lines[i] = lines[i].replace(sixu('[%s]_') % r, - sixu('[%s]_') % new_r) - lines[i] = lines[i].replace(sixu('.. [%s]') % r, - sixu('.. [%s]') % new_r) - - reference_offset[0] += len(references) + # call function to replace reference numbers so that there are no + # duplicates + rename_references(app, what, name, obj, options, lines) def mangle_signature(app, what, name, obj, options, sig, retann): @@ -108,10 +113,10 @@ if not hasattr(obj, '__doc__'): return - doc = SphinxDocString(pydoc.getdoc(obj)) - if doc['Signature']: - sig = re.sub(sixu("^[^(]*"), sixu(""), doc['Signature']) + sig = doc['Signature'] or getattr(obj, '__text_signature__', None) + if sig: + sig = re.sub(sixu("^[^(]*"), sixu(""), sig) return sig, sixu('') @@ -129,11 +134,12 @@ app.add_config_value('numpydoc_show_class_members', True, True) app.add_config_value('numpydoc_show_inherited_class_members', True, True) app.add_config_value('numpydoc_class_members_toctree', True, True) + app.add_config_value('numpydoc_citation_re', '[a-z0-9_.-]+', True) # Extra mangling domains app.add_domain(NumpyPythonDomain) app.add_domain(NumpyCDomain) - + metadata = {'parallel_read_safe': True} return metadata @@ -184,6 +190,62 @@ } +def match_items(lines, content_old): + """Create items for mangled lines. + + This function tries to match the lines in ``lines`` with the items (source + file references and line numbers) in ``content_old``. The + ``mangle_docstrings`` function changes the actual docstrings, but doesn't + keep track of where each line came from. The manging does many operations + on the original lines, which are hard to track afterwards. + + Many of the line changes come from deleting or inserting blank lines. This + function tries to match lines by ignoring blank lines. All other changes + (such as inserting figures or changes in the references) are completely + ignored, so the generated line numbers will be off if ``mangle_docstrings`` + does anything non-trivial. + + This is a best-effort function and the real fix would be to make + ``mangle_docstrings`` actually keep track of the ``items`` together with + the ``lines``. + + Examples + -------- + >>> lines = ['', 'A', '', 'B', ' ', '', 'C', 'D'] + >>> lines_old = ['a', '', '', 'b', '', 'c'] + >>> items_old = [('file1.py', 0), ('file1.py', 1), ('file1.py', 2), + ... ('file2.py', 0), ('file2.py', 1), ('file2.py', 2)] + >>> content_old = ViewList(lines_old, items=items_old) + >>> match_items(lines, content_old) # doctest: +NORMALIZE_WHITESPACE + [('file1.py', 0), ('file1.py', 0), ('file2.py', 0), ('file2.py', 0), + ('file2.py', 2), ('file2.py', 2), ('file2.py', 2), ('file2.py', 2)] + >>> # first 2 ``lines`` are matched to 'a', second 2 to 'b', rest to 'c' + >>> # actual content is completely ignored. + + Notes + ----- + The algorithm tries to match any line in ``lines`` with one in + ``lines_old``. It skips over all empty lines in ``lines_old`` and assigns + this line number to all lines in ``lines``, unless a non-empty line is + found in ``lines`` in which case it goes to the next line in ``lines_old``. + + """ + items_new = [] + lines_old = content_old.data + items_old = content_old.items + j = 0 + for i, line in enumerate(lines): + # go to next non-empty line in old: + # line.strip() checks whether the string is all whitespace + while j < len(lines_old) - 1 and not lines_old[j].strip(): + j += 1 + items_new.append(items_old[j]) + if line.strip() and j < len(lines_old) - 1: + j += 1 + assert(len(items_new) == len(lines)) + return items_new + + def wrap_mangling_directive(base_directive, objtype): class directive(base_directive): def run(self): @@ -199,7 +261,10 @@ lines = list(self.content) mangle_docstrings(env.app, objtype, name, None, None, lines) - self.content = ViewList(lines, self.content.parent) + if self.content: + items = match_items(lines, self.content) + self.content = ViewList(lines, items=items, + parent=self.content.parent) return base_directive.run(self) diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/phantom_import.py python-numpy-1.14.5/doc/sphinxext/numpydoc/phantom_import.py --- python-numpy-1.13.3/doc/sphinxext/numpydoc/phantom_import.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/phantom_import.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,167 +0,0 @@ -""" -============== -phantom_import -============== - -Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar -extensions to use docstrings loaded from an XML file. - -This extension loads an XML file in the Pydocweb format [1] and -creates a dummy module that contains the specified docstrings. This -can be used to get the current docstrings from a Pydocweb instance -without needing to rebuild the documented module. - -.. [1] http://code.google.com/p/pydocweb - -""" -from __future__ import division, absolute_import, print_function - -import imp, sys, compiler, types, os, inspect, re - -def setup(app): - app.connect('builder-inited', initialize) - app.add_config_value('phantom_import_file', None, True) - -def initialize(app): - fn = app.config.phantom_import_file - if (fn and os.path.isfile(fn)): - print("[numpydoc] Phantom importing modules from", fn, "...") - import_phantom_module(fn) - -#------------------------------------------------------------------------------ -# Creating 'phantom' modules from an XML description -#------------------------------------------------------------------------------ -def import_phantom_module(xml_file): - """ - Insert a fake Python module to sys.modules, based on a XML file. - - The XML file is expected to conform to Pydocweb DTD. The fake - module will contain dummy objects, which guarantee the following: - - - Docstrings are correct. - - Class inheritance relationships are correct (if present in XML). - - Function argspec is *NOT* correct (even if present in XML). - Instead, the function signature is prepended to the function docstring. - - Class attributes are *NOT* correct; instead, they are dummy objects. - - Parameters - ---------- - xml_file : str - Name of an XML file to read - - """ - import lxml.etree as etree - - object_cache = {} - - tree = etree.parse(xml_file) - root = tree.getroot() - - # Sort items so that - # - Base classes come before classes inherited from them - # - Modules come before their contents - all_nodes = dict([(n.attrib['id'], n) for n in root]) - - def _get_bases(node, recurse=False): - bases = [x.attrib['ref'] for x in node.findall('base')] - if recurse: - j = 0 - while True: - try: - b = bases[j] - except IndexError: break - if b in all_nodes: - bases.extend(_get_bases(all_nodes[b])) - j += 1 - return bases - - type_index = ['module', 'class', 'callable', 'object'] - - def base_cmp(a, b): - x = cmp(type_index.index(a.tag), type_index.index(b.tag)) - if x != 0: return x - - if a.tag == 'class' and b.tag == 'class': - a_bases = _get_bases(a, recurse=True) - b_bases = _get_bases(b, recurse=True) - x = cmp(len(a_bases), len(b_bases)) - if x != 0: return x - if a.attrib['id'] in b_bases: return -1 - if b.attrib['id'] in a_bases: return 1 - - return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.')) - - nodes = root.getchildren() - nodes.sort(base_cmp) - - # Create phantom items - for node in nodes: - name = node.attrib['id'] - doc = (node.text or '').decode('string-escape') + "\n" - if doc == "\n": doc = "" - - # create parent, if missing - parent = name - while True: - parent = '.'.join(parent.split('.')[:-1]) - if not parent: break - if parent in object_cache: break - obj = imp.new_module(parent) - object_cache[parent] = obj - sys.modules[parent] = obj - - # create object - if node.tag == 'module': - obj = imp.new_module(name) - obj.__doc__ = doc - sys.modules[name] = obj - elif node.tag == 'class': - bases = [object_cache[b] for b in _get_bases(node) - if b in object_cache] - bases.append(object) - init = lambda self: None - init.__doc__ = doc - obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init}) - obj.__name__ = name.split('.')[-1] - elif node.tag == 'callable': - funcname = node.attrib['id'].split('.')[-1] - argspec = node.attrib.get('argspec') - if argspec: - argspec = re.sub('^[^(]*', '', argspec) - doc = "%s%s\n\n%s" % (funcname, argspec, doc) - obj = lambda: 0 - obj.__argspec_is_invalid_ = True - if sys.version_info[0] >= 3: - obj.__name__ = funcname - else: - obj.func_name = funcname - obj.__name__ = name - obj.__doc__ = doc - if inspect.isclass(object_cache[parent]): - obj.__objclass__ = object_cache[parent] - else: - class Dummy(object): pass - obj = Dummy() - obj.__name__ = name - obj.__doc__ = doc - if inspect.isclass(object_cache[parent]): - obj.__get__ = lambda: None - object_cache[name] = obj - - if parent: - if inspect.ismodule(object_cache[parent]): - obj.__module__ = parent - setattr(object_cache[parent], name.split('.')[-1], obj) - - # Populate items - for node in root: - obj = object_cache.get(node.attrib['id']) - if obj is None: continue - for ref in node.findall('ref'): - if node.tag == 'class': - if ref.attrib['ref'].startswith(node.attrib['id'] + '.'): - setattr(obj, ref.attrib['name'], - object_cache.get(ref.attrib['ref'])) - else: - setattr(obj, ref.attrib['name'], - object_cache.get(ref.attrib['ref'])) diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/plot_directive.py python-numpy-1.14.5/doc/sphinxext/numpydoc/plot_directive.py --- python-numpy-1.13.3/doc/sphinxext/numpydoc/plot_directive.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/plot_directive.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,642 +0,0 @@ -""" -A special directive for generating a matplotlib plot. - -.. warning:: - - This is a hacked version of plot_directive.py from Matplotlib. - It's very much subject to change! - - -Usage ------ - -Can be used like this:: - - .. plot:: examples/example.py - - .. plot:: - - import matplotlib.pyplot as plt - plt.plot([1,2,3], [4,5,6]) - - .. plot:: - - A plotting example: - - >>> import matplotlib.pyplot as plt - >>> plt.plot([1,2,3], [4,5,6]) - -The content is interpreted as doctest formatted if it has a line starting -with ``>>>``. - -The ``plot`` directive supports the options - - format : {'python', 'doctest'} - Specify the format of the input - - include-source : bool - Whether to display the source code. Default can be changed in conf.py - -and the ``image`` directive options ``alt``, ``height``, ``width``, -``scale``, ``align``, ``class``. - -Configuration options ---------------------- - -The plot directive has the following configuration options: - - plot_include_source - Default value for the include-source option - - plot_pre_code - Code that should be executed before each plot. - - plot_basedir - Base directory, to which plot:: file names are relative to. - (If None or empty, file names are relative to the directoly where - the file containing the directive is.) - - plot_formats - File formats to generate. List of tuples or strings:: - - [(suffix, dpi), suffix, ...] - - that determine the file format and the DPI. For entries whose - DPI was omitted, sensible defaults are chosen. - - plot_html_show_formats - Whether to show links to the files in HTML. - -TODO ----- - -* Refactor Latex output; now it's plain images, but it would be nice - to make them appear side-by-side, or in floats. - -""" -from __future__ import division, absolute_import, print_function - -import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback -import sphinx - -if sys.version_info[0] >= 3: - from io import StringIO -else: - from io import StringIO - -import warnings -warnings.warn("A plot_directive module is also available under " - "matplotlib.sphinxext; expect this numpydoc.plot_directive " - "module to be deprecated after relevant features have been " - "integrated there.", - FutureWarning, stacklevel=2) - - -#------------------------------------------------------------------------------ -# Registration hook -#------------------------------------------------------------------------------ - -def setup(app): - setup.app = app - setup.config = app.config - setup.confdir = app.confdir - - app.add_config_value('plot_pre_code', '', True) - app.add_config_value('plot_include_source', False, True) - app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True) - app.add_config_value('plot_basedir', None, True) - app.add_config_value('plot_html_show_formats', True, True) - - app.add_directive('plot', plot_directive, True, (0, 1, False), - **plot_directive_options) - -#------------------------------------------------------------------------------ -# plot:: directive -#------------------------------------------------------------------------------ -from docutils.parsers.rst import directives -from docutils import nodes - -def plot_directive(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - return run(arguments, content, options, state_machine, state, lineno) -plot_directive.__doc__ = __doc__ - -def _option_boolean(arg): - if not arg or not arg.strip(): - # no argument given, assume used as a flag - return True - elif arg.strip().lower() in ('no', '0', 'false'): - return False - elif arg.strip().lower() in ('yes', '1', 'true'): - return True - else: - raise ValueError('"%s" unknown boolean' % arg) - -def _option_format(arg): - return directives.choice(arg, ('python', 'lisp')) - -def _option_align(arg): - return directives.choice(arg, ("top", "middle", "bottom", "left", "center", - "right")) - -plot_directive_options = {'alt': directives.unchanged, - 'height': directives.length_or_unitless, - 'width': directives.length_or_percentage_or_unitless, - 'scale': directives.nonnegative_int, - 'align': _option_align, - 'class': directives.class_option, - 'include-source': _option_boolean, - 'format': _option_format, - } - -#------------------------------------------------------------------------------ -# Generating output -#------------------------------------------------------------------------------ - -from docutils import nodes, utils - -try: - # Sphinx depends on either Jinja or Jinja2 - import jinja2 - def format_template(template, **kw): - return jinja2.Template(template).render(**kw) -except ImportError: - import jinja - def format_template(template, **kw): - return jinja.from_string(template, **kw) - -TEMPLATE = """ -{{ source_code }} - -{{ only_html }} - - {% if source_link or (html_show_formats and not multi_image) %} - ( - {%- if source_link -%} - `Source code <{{ source_link }}>`__ - {%- endif -%} - {%- if html_show_formats and not multi_image -%} - {%- for img in images -%} - {%- for fmt in img.formats -%} - {%- if source_link or not loop.first -%}, {% endif -%} - `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ - {%- endfor -%} - {%- endfor -%} - {%- endif -%} - ) - {% endif %} - - {% for img in images %} - .. figure:: {{ build_dir }}/{{ img.basename }}.png - {%- for option in options %} - {{ option }} - {% endfor %} - - {% if html_show_formats and multi_image -%} - ( - {%- for fmt in img.formats -%} - {%- if not loop.first -%}, {% endif -%} - `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ - {%- endfor -%} - ) - {%- endif -%} - {% endfor %} - -{{ only_latex }} - - {% for img in images %} - .. image:: {{ build_dir }}/{{ img.basename }}.pdf - {% endfor %} - -""" - -class ImageFile(object): - def __init__(self, basename, dirname): - self.basename = basename - self.dirname = dirname - self.formats = [] - - def filename(self, format): - return os.path.join(self.dirname, "%s.%s" % (self.basename, format)) - - def filenames(self): - return [self.filename(fmt) for fmt in self.formats] - -def run(arguments, content, options, state_machine, state, lineno): - if arguments and content: - raise RuntimeError("plot:: directive can't have both args and content") - - document = state_machine.document - config = document.settings.env.config - - options.setdefault('include-source', config.plot_include_source) - - # determine input - rst_file = document.attributes['source'] - rst_dir = os.path.dirname(rst_file) - - if arguments: - if not config.plot_basedir: - source_file_name = os.path.join(rst_dir, - directives.uri(arguments[0])) - else: - source_file_name = os.path.join(setup.confdir, config.plot_basedir, - directives.uri(arguments[0])) - code = open(source_file_name, 'r').read() - output_base = os.path.basename(source_file_name) - else: - source_file_name = rst_file - code = textwrap.dedent("\n".join(map(str, content))) - counter = document.attributes.get('_plot_counter', 0) + 1 - document.attributes['_plot_counter'] = counter - base, ext = os.path.splitext(os.path.basename(source_file_name)) - output_base = '%s-%d.py' % (base, counter) - - base, source_ext = os.path.splitext(output_base) - if source_ext in ('.py', '.rst', '.txt'): - output_base = base - else: - source_ext = '' - - # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames - output_base = output_base.replace('.', '-') - - # is it in doctest format? - is_doctest = contains_doctest(code) - if 'format' in options: - if options['format'] == 'python': - is_doctest = False - else: - is_doctest = True - - # determine output directory name fragment - source_rel_name = relpath(source_file_name, setup.confdir) - source_rel_dir = os.path.dirname(source_rel_name) - while source_rel_dir.startswith(os.path.sep): - source_rel_dir = source_rel_dir[1:] - - # build_dir: where to place output files (temporarily) - build_dir = os.path.join(os.path.dirname(setup.app.doctreedir), - 'plot_directive', - source_rel_dir) - if not os.path.exists(build_dir): - os.makedirs(build_dir) - - # output_dir: final location in the builder's directory - dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, - source_rel_dir)) - - # how to link to files from the RST file - dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir), - source_rel_dir).replace(os.path.sep, '/') - build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/') - source_link = dest_dir_link + '/' + output_base + source_ext - - # make figures - try: - results = makefig(code, source_file_name, build_dir, output_base, - config) - errors = [] - except PlotError as err: - reporter = state.memo.reporter - sm = reporter.system_message( - 2, "Exception occurred in plotting %s: %s" % (output_base, err), - line=lineno) - results = [(code, [])] - errors = [sm] - - # generate output restructuredtext - total_lines = [] - for j, (code_piece, images) in enumerate(results): - if options['include-source']: - if is_doctest: - lines = [''] - lines += [row.rstrip() for row in code_piece.split('\n')] - else: - lines = ['.. code-block:: python', ''] - lines += [' %s' % row.rstrip() - for row in code_piece.split('\n')] - source_code = "\n".join(lines) - else: - source_code = "" - - opts = [':%s: %s' % (key, val) for key, val in list(options.items()) - if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] - - only_html = ".. only:: html" - only_latex = ".. only:: latex" - - if j == 0: - src_link = source_link - else: - src_link = None - - result = format_template( - TEMPLATE, - dest_dir=dest_dir_link, - build_dir=build_dir_link, - source_link=src_link, - multi_image=len(images) > 1, - only_html=only_html, - only_latex=only_latex, - options=opts, - images=images, - source_code=source_code, - html_show_formats=config.plot_html_show_formats) - - total_lines.extend(result.split("\n")) - total_lines.extend("\n") - - if total_lines: - state_machine.insert_input(total_lines, source=source_file_name) - - # copy image files to builder's output directory - if not os.path.exists(dest_dir): - os.makedirs(dest_dir) - - for code_piece, images in results: - for img in images: - for fn in img.filenames(): - shutil.copyfile(fn, os.path.join(dest_dir, - os.path.basename(fn))) - - # copy script (if necessary) - if source_file_name == rst_file: - target_name = os.path.join(dest_dir, output_base + source_ext) - f = open(target_name, 'w') - f.write(unescape_doctest(code)) - f.close() - - return errors - - -#------------------------------------------------------------------------------ -# Run code and capture figures -#------------------------------------------------------------------------------ - -import matplotlib -matplotlib.use('Agg') -import matplotlib.pyplot as plt -import matplotlib.image as image -from matplotlib import _pylab_helpers - -import exceptions - -def contains_doctest(text): - try: - # check if it's valid Python as-is - compile(text, '', 'exec') - return False - except SyntaxError: - pass - r = re.compile(r'^\s*>>>', re.M) - m = r.search(text) - return bool(m) - -def unescape_doctest(text): - """ - Extract code from a piece of text, which contains either Python code - or doctests. - - """ - if not contains_doctest(text): - return text - - code = "" - for line in text.split("\n"): - m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line) - if m: - code += m.group(2) + "\n" - elif line.strip(): - code += "# " + line.strip() + "\n" - else: - code += "\n" - return code - -def split_code_at_show(text): - """ - Split code at plt.show() - - """ - - parts = [] - is_doctest = contains_doctest(text) - - part = [] - for line in text.split("\n"): - if (not is_doctest and line.strip() == 'plt.show()') or \ - (is_doctest and line.strip() == '>>> plt.show()'): - part.append(line) - parts.append("\n".join(part)) - part = [] - else: - part.append(line) - if "\n".join(part).strip(): - parts.append("\n".join(part)) - return parts - -class PlotError(RuntimeError): - pass - -def run_code(code, code_path, ns=None): - # Change the working directory to the directory of the example, so - # it can get at its data files, if any. - pwd = os.getcwd() - old_sys_path = list(sys.path) - if code_path is not None: - dirname = os.path.abspath(os.path.dirname(code_path)) - os.chdir(dirname) - sys.path.insert(0, dirname) - - # Redirect stdout - stdout = sys.stdout - sys.stdout = StringIO() - - # Reset sys.argv - old_sys_argv = sys.argv - sys.argv = [code_path] - - try: - try: - code = unescape_doctest(code) - if ns is None: - ns = {} - if not ns: - exec(setup.config.plot_pre_code, ns) - exec(code, ns) - except (Exception, SystemExit) as err: - raise PlotError(traceback.format_exc()) - finally: - os.chdir(pwd) - sys.argv = old_sys_argv - sys.path[:] = old_sys_path - sys.stdout = stdout - return ns - - -#------------------------------------------------------------------------------ -# Generating figures -#------------------------------------------------------------------------------ - -def out_of_date(original, derived): - """ - Returns True if derivative is out-of-date wrt original, - both of which are full file paths. - """ - return (not os.path.exists(derived) - or os.stat(derived).st_mtime < os.stat(original).st_mtime) - - -def makefig(code, code_path, output_dir, output_base, config): - """ - Run a pyplot script *code* and save the images under *output_dir* - with file names derived from *output_base* - - """ - - # -- Parse format list - default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50} - formats = [] - for fmt in config.plot_formats: - if isinstance(fmt, str): - formats.append((fmt, default_dpi.get(fmt, 80))) - elif type(fmt) in (tuple, list) and len(fmt)==2: - formats.append((str(fmt[0]), int(fmt[1]))) - else: - raise PlotError('invalid image format "%r" in plot_formats' % fmt) - - # -- Try to determine if all images already exist - - code_pieces = split_code_at_show(code) - - # Look for single-figure output files first - all_exists = True - img = ImageFile(output_base, output_dir) - for format, dpi in formats: - if out_of_date(code_path, img.filename(format)): - all_exists = False - break - img.formats.append(format) - - if all_exists: - return [(code, [img])] - - # Then look for multi-figure output files - results = [] - all_exists = True - for i, code_piece in enumerate(code_pieces): - images = [] - for j in range(1000): - img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir) - for format, dpi in formats: - if out_of_date(code_path, img.filename(format)): - all_exists = False - break - img.formats.append(format) - - # assume that if we have one, we have them all - if not all_exists: - all_exists = (j > 0) - break - images.append(img) - if not all_exists: - break - results.append((code_piece, images)) - - if all_exists: - return results - - # -- We didn't find the files, so build them - - results = [] - ns = {} - - for i, code_piece in enumerate(code_pieces): - # Clear between runs - plt.close('all') - - # Run code - run_code(code_piece, code_path, ns) - - # Collect images - images = [] - fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() - for j, figman in enumerate(fig_managers): - if len(fig_managers) == 1 and len(code_pieces) == 1: - img = ImageFile(output_base, output_dir) - else: - img = ImageFile("%s_%02d_%02d" % (output_base, i, j), - output_dir) - images.append(img) - for format, dpi in formats: - try: - figman.canvas.figure.savefig(img.filename(format), dpi=dpi) - except exceptions.BaseException as err: - raise PlotError(traceback.format_exc()) - img.formats.append(format) - - # Results - results.append((code_piece, images)) - - return results - - -#------------------------------------------------------------------------------ -# Relative pathnames -#------------------------------------------------------------------------------ - -try: - from os.path import relpath -except ImportError: - # Copied from Python 2.7 - if 'posix' in sys.builtin_module_names: - def relpath(path, start=os.path.curdir): - """Return a relative version of a path""" - from os.path import sep, curdir, join, abspath, commonprefix, \ - pardir - - if not path: - raise ValueError("no path specified") - - start_list = abspath(start).split(sep) - path_list = abspath(path).split(sep) - - # Work out how much of the filepath is shared by start and path. - i = len(commonprefix([start_list, path_list])) - - rel_list = [pardir] * (len(start_list)-i) + path_list[i:] - if not rel_list: - return curdir - return join(*rel_list) - elif 'nt' in sys.builtin_module_names: - def relpath(path, start=os.path.curdir): - """Return a relative version of a path""" - from os.path import sep, curdir, join, abspath, commonprefix, \ - pardir, splitunc - - if not path: - raise ValueError("no path specified") - start_list = abspath(start).split(sep) - path_list = abspath(path).split(sep) - if start_list[0].lower() != path_list[0].lower(): - unc_path, rest = splitunc(path) - unc_start, rest = splitunc(start) - if bool(unc_path) ^ bool(unc_start): - raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" - % (path, start)) - else: - raise ValueError("path is on drive %s, start on drive %s" - % (path_list[0], start_list[0])) - # Work out how much of the filepath is shared by start and path. - for i in range(min(len(start_list), len(path_list))): - if start_list[i].lower() != path_list[i].lower(): - break - else: - i += 1 - - rel_list = [pardir] * (len(start_list)-i) + path_list[i:] - if not rel_list: - return curdir - return join(*rel_list) - else: - raise RuntimeError("Unsupported platform (no relpath available!)") diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/templates/numpydoc_docstring.rst python-numpy-1.14.5/doc/sphinxext/numpydoc/templates/numpydoc_docstring.rst --- python-numpy-1.13.3/doc/sphinxext/numpydoc/templates/numpydoc_docstring.rst 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/templates/numpydoc_docstring.rst 2018-06-07 19:30:04.000000000 +0000 @@ -0,0 +1,16 @@ +{{index}} +{{summary}} +{{extended_summary}} +{{parameters}} +{{returns}} +{{yields}} +{{other_parameters}} +{{raises}} +{{warns}} +{{warnings}} +{{see_also}} +{{notes}} +{{references}} +{{examples}} +{{attributes}} +{{methods}} diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/tests/test_docscrape.py python-numpy-1.14.5/doc/sphinxext/numpydoc/tests/test_docscrape.py --- python-numpy-1.13.3/doc/sphinxext/numpydoc/tests/test_docscrape.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/tests/test_docscrape.py 2018-06-12 18:29:26.000000000 +0000 @@ -1,11 +1,21 @@ # -*- encoding:utf-8 -*- from __future__ import division, absolute_import, print_function -import sys, textwrap +import sys +import textwrap -from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc -from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc -from nose.tools import * +import jinja2 + +from numpydoc.docscrape import ( + NumpyDocString, + FunctionDoc, + ClassDoc, + ParseError +) +from numpydoc.docscrape_sphinx import (SphinxDocString, SphinxClassDoc, + SphinxFunctionDoc) +from nose.tools import (assert_equal, assert_raises, assert_list_equal, + assert_true) if sys.version_info[0] >= 3: sixu = lambda s: s @@ -209,6 +219,71 @@ """ assert_raises(ValueError, NumpyDocString, doc_text) + +def test_section_twice(): + doc_text = """ +Test having a section Notes twice + +Notes +----- +See the next note for more information + +Notes +----- +That should break... +""" + assert_raises(ValueError, NumpyDocString, doc_text) + + # if we have a numpydoc object, we know where the error came from + class Dummy(object): + """ + Dummy class. + + Notes + ----- + First note. + + Notes + ----- + Second note. + + """ + def spam(self, a, b): + """Spam\n\nSpam spam.""" + pass + + def ham(self, c, d): + """Cheese\n\nNo cheese.""" + pass + + def dummy_func(arg): + """ + Dummy function. + + Notes + ----- + First note. + + Notes + ----- + Second note. + """ + + try: + SphinxClassDoc(Dummy) + except ValueError as e: + # python 3 version or python 2 version + assert_true("test_section_twice..Dummy" in str(e) + or 'test_docscrape.Dummy' in str(e)) + + try: + SphinxFunctionDoc(dummy_func) + except ValueError as e: + # python 3 version or python 2 version + assert_true("test_section_twice..dummy_func" in str(e) + or 'function dummy_func' in str(e)) + + def test_notes(): assert doc['Notes'][0].startswith('Instead') assert doc['Notes'][-1].endswith('definite.') @@ -232,11 +307,8 @@ b = textwrap.dedent(b) a = [l.rstrip() for l in a.split('\n') if l.strip()] b = [l.rstrip() for l in b.split('\n') if l.strip()] - for n,line in enumerate(a): - if not line == b[n]: - raise AssertionError("Lines %s of a and b differ: " - "\n>>> %s\n<<< %s\n" % - (n,line,b[n])) + assert_list_equal(a, b) + def test_str(): # doc_txt has the order of Notes and See Also sections flipped. # This should be handled automatically, and so, one thing this test does @@ -619,6 +691,23 @@ elif func == 'class_j': assert desc == ['fubar', 'foobar'] + +def test_see_also_parse_error(): + text = ( + """ + z(x,theta) + + See Also + -------- + :func:`~foo` + """) + with assert_raises(ParseError) as err: + NumpyDocString(text) + assert_equal( + str(r":func:`~foo` is not a item name in '\n z(x,theta)\n\n See Also\n --------\n :func:`~foo`\n '"), + str(err.exception) + ) + def test_see_also_print(): class Dummy(object): """ @@ -893,21 +982,46 @@ x - === ========== - t (float) Current time. - y (ndarray) Current variable values. - === ========== + ===== ========== + **t** (float) Current time. + **y** (ndarray) Current variable values. + ===== ========== .. rubric:: Methods - === ========== - a - b - c - === ========== + ===== ========== + **a** + **b** + **c** + ===== ========== """) +def test_templated_sections(): + doc = SphinxClassDoc(None, class_doc_txt, + config={'template': jinja2.Template('{{examples}}{{parameters}}')}) + non_blank_line_by_line_compare(str(doc), + """ + .. rubric:: Examples + + For usage examples, see `ode`. + + + :Parameters: + + **f** : callable ``f(t, y, *f_args)`` + + Aaa. + + **jac** : callable ``jac(t, y, *jac_args)`` + + Bbb. + + """) + + + + if __name__ == "__main__": import nose nose.run() diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/tests/test_linkcode.py python-numpy-1.14.5/doc/sphinxext/numpydoc/tests/test_linkcode.py --- python-numpy-1.13.3/doc/sphinxext/numpydoc/tests/test_linkcode.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/tests/test_linkcode.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import numpydoc.linkcode - -# No tests at the moment... diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/tests/test_phantom_import.py python-numpy-1.14.5/doc/sphinxext/numpydoc/tests/test_phantom_import.py --- python-numpy-1.13.3/doc/sphinxext/numpydoc/tests/test_phantom_import.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/tests/test_phantom_import.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from nose import SkipTest - -def test_import(): - if sys.version_info[0] >= 3: - raise SkipTest("phantom_import not ported to Py3") - - import numpydoc.phantom_import - -# No tests at the moment... diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/tests/test_plot_directive.py python-numpy-1.14.5/doc/sphinxext/numpydoc/tests/test_plot_directive.py --- python-numpy-1.13.3/doc/sphinxext/numpydoc/tests/test_plot_directive.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/tests/test_plot_directive.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from nose import SkipTest - -def test_import(): - if sys.version_info[0] >= 3: - raise SkipTest("plot_directive not ported to Python 3 (use the one from Matplotlib instead)") - import numpydoc.plot_directive - -# No tests at the moment... diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/tests/test_traitsdoc.py python-numpy-1.14.5/doc/sphinxext/numpydoc/tests/test_traitsdoc.py --- python-numpy-1.13.3/doc/sphinxext/numpydoc/tests/test_traitsdoc.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/tests/test_traitsdoc.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -from nose import SkipTest - -def test_import(): - if sys.version_info[0] >= 3: - raise SkipTest("traitsdoc not ported to Python3") - import numpydoc.traitsdoc - -# No tests at the moment... diff -Nru python-numpy-1.13.3/doc/sphinxext/numpydoc/traitsdoc.py python-numpy-1.14.5/doc/sphinxext/numpydoc/traitsdoc.py --- python-numpy-1.13.3/doc/sphinxext/numpydoc/traitsdoc.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/numpydoc/traitsdoc.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,143 +0,0 @@ -""" -========= -traitsdoc -========= - -Sphinx extension that handles docstrings in the Numpy standard format, [1] -and support Traits [2]. - -This extension can be used as a replacement for ``numpydoc`` when support -for Traits is required. - -.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard -.. [2] http://code.enthought.com/projects/traits/ - -""" -from __future__ import division, absolute_import, print_function - -import inspect -import os -import pydoc -import collections - -from . import docscrape -from . import docscrape_sphinx -from .docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString - -from . import numpydoc - -from . import comment_eater - -class SphinxTraitsDoc(SphinxClassDoc): - def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc): - if not inspect.isclass(cls): - raise ValueError("Initialise using a class. Got %r" % cls) - self._cls = cls - - if modulename and not modulename.endswith('.'): - modulename += '.' - self._mod = modulename - self._name = cls.__name__ - self._func_doc = func_doc - - docstring = pydoc.getdoc(cls) - docstring = docstring.split('\n') - - # De-indent paragraph - try: - indent = min(len(s) - len(s.lstrip()) for s in docstring - if s.strip()) - except ValueError: - indent = 0 - - for n,line in enumerate(docstring): - docstring[n] = docstring[n][indent:] - - self._doc = docscrape.Reader(docstring) - self._parsed_data = { - 'Signature': '', - 'Summary': '', - 'Description': [], - 'Extended Summary': [], - 'Parameters': [], - 'Returns': [], - 'Yields': [], - 'Raises': [], - 'Warns': [], - 'Other Parameters': [], - 'Traits': [], - 'Methods': [], - 'See Also': [], - 'Notes': [], - 'References': '', - 'Example': '', - 'Examples': '', - 'index': {} - } - - self._parse() - - def _str_summary(self): - return self['Summary'] + [''] - - def _str_extended_summary(self): - return self['Description'] + self['Extended Summary'] + [''] - - def __str__(self, indent=0, func_role="func"): - out = [] - out += self._str_signature() - out += self._str_index() + [''] - out += self._str_summary() - out += self._str_extended_summary() - for param_list in ('Parameters', 'Traits', 'Methods', - 'Returns', 'Yields', 'Raises'): - out += self._str_param_list(param_list) - out += self._str_see_also("obj") - out += self._str_section('Notes') - out += self._str_references() - out += self._str_section('Example') - out += self._str_section('Examples') - out = self._str_indent(out,indent) - return '\n'.join(out) - -def looks_like_issubclass(obj, classname): - """ Return True if the object has a class or superclass with the given class - name. - - Ignores old-style classes. - """ - t = obj - if t.__name__ == classname: - return True - for klass in t.__mro__: - if klass.__name__ == classname: - return True - return False - -def get_doc_object(obj, what=None, config=None): - if what is None: - if inspect.isclass(obj): - what = 'class' - elif inspect.ismodule(obj): - what = 'module' - elif isinstance(obj, collections.Callable): - what = 'function' - else: - what = 'object' - if what == 'class': - doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config) - if looks_like_issubclass(obj, 'HasTraits'): - for name, trait, comment in comment_eater.get_class_traits(obj): - # Exclude private traits. - if not name.startswith('_'): - doc['Traits'].append((name, trait, comment.splitlines())) - return doc - elif what in ('function', 'method'): - return SphinxFunctionDoc(obj, '', config=config) - else: - return SphinxDocString(pydoc.getdoc(obj), config=config) - -def setup(app): - # init numpydoc - numpydoc.setup(app, get_doc_object) - diff -Nru python-numpy-1.13.3/doc/sphinxext/setup.py python-numpy-1.14.5/doc/sphinxext/setup.py --- python-numpy-1.13.3/doc/sphinxext/setup.py 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/setup.py 2018-06-12 18:29:26.000000000 +0000 @@ -6,10 +6,14 @@ import setuptools from distutils.core import setup -if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[0:2] < (3, 3): - raise RuntimeError("Python version 2.6, 2.7 or >= 3.3 required.") +if sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[0:2] < (3, 4): + raise RuntimeError("Python version 2.7 or >= 3.4 required.") -version = "0.6.0" +with open('numpydoc/__init__.py') as fid: + for line in fid: + if line.startswith('__version__'): + version = line.strip().split()[-1][1:-1] + break setup( name="numpydoc", @@ -23,19 +27,18 @@ "Topic :: Documentation", "Programming Language :: Python", "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", - "Programming Language :: Python :: 3.5"], + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6"], keywords="sphinx numpy", author="Pauli Virtanen and others", author_email="pav@iki.fi", url="https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt", license="BSD", - requires=["sphinx (>= 1.0.1)"], - package_data={'numpydoc': ['tests/test_*.py']}, + install_requires=["sphinx >= 1.2.3", 'Jinja2>=2.3'], + package_data={'numpydoc': ['tests/test_*.py', 'templates/*.rst']}, test_suite = 'nose.collector', cmdclass={"sdist": sdist}, ) diff -Nru python-numpy-1.13.3/doc/sphinxext/.travis.yml python-numpy-1.14.5/doc/sphinxext/.travis.yml --- python-numpy-1.13.3/doc/sphinxext/.travis.yml 2017-09-21 00:11:40.000000000 +0000 +++ python-numpy-1.14.5/doc/sphinxext/.travis.yml 2018-06-12 18:29:26.000000000 +0000 @@ -3,7 +3,7 @@ language: python sudo: false python: - - 3.5 + - 3.6 - 2.7 env: - SPHINX_SPEC="Sphinx==1.2.3" @@ -15,4 +15,8 @@ - pip install --upgrade pip setuptools # Upgrade pip and setuptools to get ones with `wheel` support - pip install --find-links http://wheels.astropy.org/ --find-links http://wheels2.astropy.org/ --trusted-host wheels.astropy.org --trusted-host wheels2.astropy.org --use-wheel nose numpy matplotlib ${SPHINX_SPEC} script: - - python setup.py test + - | + python setup.py sdist + cd dist + pip install numpydoc* -v + - nosetests numpydoc diff -Nru python-numpy-1.13.3/LICENSE.txt python-numpy-1.14.5/LICENSE.txt --- python-numpy-1.13.3/LICENSE.txt 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/LICENSE.txt 2018-06-12 17:31:56.000000000 +0000 @@ -28,3 +28,33 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +The NumPy repository and source distributions bundle several libraries that are +compatibly licensed. We list these here. + +Name: Numpydoc +Files: doc/sphinxext/numpydoc/* +License: 2-clause BSD + For details, see doc/sphinxext/LICENSE.txt + +Name: scipy-sphinx-theme +Files: doc/scipy-sphinx-theme/* +License: 3-clause BSD, PSF and Apache 2.0 + For details, see doc/sphinxext/LICENSE.txt + +Name: lapack-lite +Files: numpy/linalg/lapack_lite/* +License: 3-clause BSD + For details, see numpy/linalg/lapack_lite/LICENSE.txt + +Name: tempita +Files: tools/npy_tempita/* +License: BSD derived + For details, see tools/npy_tempita/license.txt + +Name: dragon4 +Files: numpy/core/src/multiarray/dragon4.c +License: One of a kind + For license text, see numpy/core/src/multiarray/dragon4.c diff -Nru python-numpy-1.13.3/MANIFEST.in python-numpy-1.14.5/MANIFEST.in --- python-numpy-1.13.3/MANIFEST.in 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/MANIFEST.in 2018-06-12 18:28:52.000000000 +0000 @@ -11,6 +11,7 @@ # Add build support that should go in sdist, but not go in bdist/be installed recursive-include numpy/_build_utils * recursive-include numpy/linalg/lapack_lite *.c *.h +include tox.ini # Add sdist files whose use depends on local configuration. include numpy/core/src/multiarray/cblasfuncs.c include numpy/core/src/multiarray/python_xerbla.c @@ -25,6 +26,5 @@ recursive-include doc/sphinxext * recursive-include tools/swig * recursive-include doc/scipy-sphinx-theme * -recursive-include doc/f2py * -global-exclude *.pyc *.pyo *.pyd +global-exclude *.pyc *.pyo *.pyd *.swp *.bak *~ diff -Nru python-numpy-1.13.3/numpy/add_newdocs.py python-numpy-1.14.5/numpy/add_newdocs.py --- python-numpy-1.13.3/numpy/add_newdocs.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/add_newdocs.py 2018-06-12 18:28:52.000000000 +0000 @@ -931,7 +931,7 @@ >>> np.zeros(5) array([ 0., 0., 0., 0., 0.]) - >>> np.zeros((5,), dtype=np.int) + >>> np.zeros((5,), dtype=int) array([0, 0, 0, 0, 0]) >>> np.zeros((2, 1)) @@ -961,7 +961,7 @@ """ fromstring(string, dtype=float, count=-1, sep='') - A new 1-D array initialized from raw binary or text data in a string. + A new 1-D array initialized from text data in a string. Parameters ---------- @@ -975,11 +975,13 @@ negative (the default), the count will be determined from the length of the data. sep : str, optional - If not provided or, equivalently, the empty string, the data will - be interpreted as binary data; otherwise, as ASCII text with - decimal numbers. Also in this latter case, this argument is - interpreted as the string separating numbers in the data; extra - whitespace between elements is also ignored. + The string separating numbers in the data; extra whitespace between + elements is also ignored. + + .. deprecated:: 1.14 + If this argument is not provided, `fromstring` falls back on the + behaviour of `frombuffer` after encoding unicode string inputs as + either utf-8 (python 3), or the default encoding (python 2). Returns ------- @@ -998,14 +1000,10 @@ Examples -------- - >>> np.fromstring('\\x01\\x02', dtype=np.uint8) - array([1, 2], dtype=uint8) >>> np.fromstring('1 2', dtype=int, sep=' ') array([1, 2]) >>> np.fromstring('1, 2', dtype=int, sep=',') array([1, 2]) - >>> np.fromstring('\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) - array([1, 2, 3], dtype=uint8) """) @@ -1038,7 +1036,7 @@ Examples -------- >>> iterable = (x*x for x in range(5)) - >>> np.fromiter(iterable, np.float) + >>> np.fromiter(iterable, float) array([ 0., 1., 4., 9., 16.]) """) @@ -1154,11 +1152,16 @@ array(['w', 'o', 'r', 'l', 'd'], dtype='|S1') + >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8) + array([1, 2], dtype=uint8) + >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) + array([1, 2, 3], dtype=uint8) + """) add_newdoc('numpy.core.multiarray', 'concatenate', """ - concatenate((a1, a2, ...), axis=0) + concatenate((a1, a2, ...), axis=0, out=None) Join a sequence of arrays along an existing axis. @@ -1169,6 +1172,10 @@ corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. Default is 0. + out : ndarray, optional + If provided, the destination to place the result. The shape must be + correct, matching that of what concatenate would have returned if no + out argument were specified. Returns ------- @@ -1338,7 +1345,8 @@ step : number, optional Spacing between values. For any output `out`, this is the distance between two adjacent values, ``out[i+1] - out[i]``. The default - step size is 1. If `step` is specified, `start` must also be given. + step size is 1. If `step` is specified as a position argument, + `start` must also be given. dtype : dtype The type of the output array. If `dtype` is not given, infer the data type from the other input arguments. @@ -1504,7 +1512,7 @@ >>> ix array([[False, False, False], [ True, True, False], - [False, True, False]], dtype=bool) + [False, True, False]]) >>> np.where(ix) (array([1, 1, 2]), array([0, 1, 1])) @@ -1589,7 +1597,7 @@ add_newdoc('numpy.core.multiarray', 'can_cast', """ - can_cast(from, totype, casting = 'safe') + can_cast(from_, to, casting='safe') Returns True if cast between data types can occur according to the casting rule. If from is a scalar or array scalar, also returns @@ -1598,9 +1606,9 @@ Parameters ---------- - from : dtype, dtype specifier, scalar, or array + from_ : dtype, dtype specifier, scalar, or array Data type, scalar, or array to cast from. - totype : dtype or dtype specifier + to : dtype or dtype specifier Data type to cast to. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. @@ -1635,9 +1643,9 @@ >>> np.can_cast(np.int32, np.int64) True - >>> np.can_cast(np.float64, np.complex) + >>> np.can_cast(np.float64, complex) True - >>> np.can_cast(np.complex, np.float) + >>> np.can_cast(complex, float) False >>> np.can_cast('i8', 'f8') @@ -1920,12 +1928,22 @@ """ dot(a, b, out=None) - Dot product of two arrays. + Dot product of two arrays. Specifically, + + - If both `a` and `b` are 1-D arrays, it is inner product of vectors + (without complex conjugation). + + - If both `a` and `b` are 2-D arrays, it is matrix multiplication, + but using :func:`matmul` or ``a @ b`` is preferred. - For 2-D arrays it is equivalent to matrix multiplication, and for 1-D - arrays to inner product of vectors (without complex conjugation). For - N dimensions it is a sum product over the last axis of `a` and - the second-to-last of `b`:: + - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply` + and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred. + + - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over + the last axis of `a` and `b`. + + - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a + sum product over the last axis of `a` and the second-to-last axis of `b`:: dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m]) @@ -2774,8 +2792,13 @@ array raises a RuntimeError exception. ALIGNED (A) The data and all elements are aligned appropriately for the hardware. + WRITEBACKIFCOPY (X) + This array is a copy of some other array. The C-API function + PyArray_ResolveWritebackIfCopy must be called before deallocating + to the base array will be updated with the contents of this array. UPDATEIFCOPY (U) - This array is a copy of some other array. When this array is + (Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array. + When this array is deallocated, the base array will be updated with the contents of this array. FNC @@ -2795,13 +2818,14 @@ or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag names are only supported in dictionary access. - Only the UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be changed by - the user, via direct assignment to the attribute or dictionary entry, - or by calling `ndarray.setflags`. + Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be + changed by the user, via direct assignment to the attribute or dictionary + entry, or by calling `ndarray.setflags`. The array flags cannot be set arbitrarily: - UPDATEIFCOPY can only be set ``False``. + - WRITEBACKIFCOPY can only be set ``False``. - ALIGNED can only be set ``True`` if the data is truly aligned. - WRITEABLE can only be set ``True`` if the array owns its own memory or the ultimate owner of the memory exposes a writeable buffer @@ -2921,10 +2945,12 @@ """ Tuple of array dimensions. - Notes - ----- - May be used to "reshape" the array, as long as this would not - require a change in the total number of elements + The shape property is usually used to get the current shape of an array, + but may also be used to reshape the array in-place by assigning a tuple of + array dimensions to it. As with `numpy.reshape`, one of the new shape + dimensions can be -1, in which case its value is inferred from the size of + the array and the remaining dimensions. Reshaping an array in-place will + fail if a copy is required. Examples -------- @@ -2943,6 +2969,15 @@ Traceback (most recent call last): File "", line 1, in ValueError: total size of new array must be unchanged + >>> np.zeros((4,2))[::2].shape = (-1,) + Traceback (most recent call last): + File "", line 1, in + AttributeError: incompatible shape for a non-contiguous array + + See Also + -------- + numpy.reshape : similar function + ndarray.reshape : similar method """)) @@ -3080,25 +3115,19 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', - """a.__copy__([order]) + """a.__copy__() - Return a copy of the array. + Used if :func:`copy.copy` is called on an array. Returns a copy of the array. - Parameters - ---------- - order : {'C', 'F', 'A'}, optional - If order is 'C' (False) then the result is contiguous (default). - If order is 'Fortran' (True) then the result has fortran order. - If order is 'Any' (None) then the result has fortran order - only if the array already is in fortran order. + Equivalent to ``a.copy(order='K')``. """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', - """a.__deepcopy__() -> Deep copy of array. + """a.__deepcopy__(memo, /) -> Deep copy of array. - Used if copy.deepcopy is called on an array. + Used if :func:`copy.deepcopy` is called on an array. """)) @@ -3112,10 +3141,13 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__', - """a.__setstate__(version, shape, dtype, isfortran, rawdata) + """a.__setstate__(state, /) For unpickling. + The `state` argument must be a sequence that contains the following + elements: + Parameters ---------- version : int @@ -3292,7 +3324,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', """ - a.byteswap(inplace) + a.byteswap(inplace=False) Swap the bytes of the array elements @@ -3315,7 +3347,7 @@ >>> A = np.array([1, 256, 8755], dtype=np.int16) >>> map(hex, A) ['0x1', '0x100', '0x2233'] - >>> A.byteswap(True) + >>> A.byteswap(inplace=True) array([ 256, 1, 13090], dtype=int16) >>> map(hex, A) ['0x100', '0x1', '0x3322'] @@ -3418,7 +3450,7 @@ Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. (Note that this function and :func:numpy.copy are very + as possible. (Note that this function and :func:`numpy.copy` are very similar, but have different default values for their order= arguments.) @@ -3764,7 +3796,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('max', """ - a.max(axis=None, out=None) + a.max(axis=None, out=None, keepdims=False) Return the maximum along a given axis. @@ -3991,7 +4023,7 @@ add_newdoc('numpy.core.multiarray', 'copyto', """ - copyto(dst, src, casting='same_kind', where=None) + copyto(dst, src, casting='same_kind', where=True) Copies values from one array to another, broadcasting as necessary. @@ -4109,6 +4141,13 @@ -------- numpy.reshape : equivalent function + Notes + ----- + Unlike the free function `numpy.reshape`, this method on `ndarray` allows + the elements of the shape parameter to be passed in as separate arguments. + For example, ``a.reshape(10, 11)`` is equivalent to + ``a.reshape((10, 11))``. + """)) @@ -4289,16 +4328,17 @@ """ a.setflags(write=None, align=None, uic=None) - Set array flags WRITEABLE, ALIGNED, and UPDATEIFCOPY, respectively. + Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY), + respectively. These Boolean-valued flags affect how numpy interprets the memory area used by `a` (see Notes below). The ALIGNED flag can only be set to True if the data is actually aligned according to the type. - The UPDATEIFCOPY flag can never be set to True. The flag WRITEABLE - can only be set to True if the array owns its own memory, or the - ultimate owner of the memory exposes a writeable buffer interface, - or is a string. (The exception for string is made so that unpickling - can be done without copying memory.) + The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set + to True. The flag WRITEABLE can only be set to True if the array owns its + own memory, or the ultimate owner of the memory exposes a writeable buffer + interface, or is a string. (The exception for string is made so that + unpickling can be done without copying memory.) Parameters ---------- @@ -4312,20 +4352,22 @@ Notes ----- Array flags provide information about how the memory area used - for the array is to be interpreted. There are 6 Boolean flags - in use, only three of which can be changed by the user: - UPDATEIFCOPY, WRITEABLE, and ALIGNED. + for the array is to be interpreted. There are 7 Boolean flags + in use, only four of which can be changed by the user: + WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED. WRITEABLE (W) the data area can be written to; ALIGNED (A) the data and strides are aligned appropriately for the hardware (as determined by the compiler); - UPDATEIFCOPY (U) this array is a copy of some other array (referenced - by .base). When this array is deallocated, the base array will be - updated with the contents of this array. + UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY; + + WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced + by .base). When the C-API function PyArray_ResolveWritebackIfCopy is + called, the base array will be updated with the contents of this array. - All flags can be accessed using their first (upper case) letter as well + All flags can be accessed using the single (upper case) letter as well as the full name. Examples @@ -4340,6 +4382,7 @@ OWNDATA : True WRITEABLE : True ALIGNED : True + WRITEBACKIFCOPY : False UPDATEIFCOPY : False >>> y.setflags(write=0, align=0) >>> y.flags @@ -4348,11 +4391,12 @@ OWNDATA : True WRITEABLE : False ALIGNED : False + WRITEBACKIFCOPY : False UPDATEIFCOPY : False >>> y.setflags(uic=1) Traceback (most recent call last): File "", line 1, in - ValueError: cannot set UPDATEIFCOPY flag to True + ValueError: cannot set WRITEBACKIFCOPY flag to True """)) @@ -5163,7 +5207,7 @@ The input array needs to be of integer dtype, otherwise a TypeError is raised: - >>> np.bincount(np.arange(5, dtype=np.float)) + >>> np.bincount(np.arange(5, dtype=float)) Traceback (most recent call last): File "", line 1, in TypeError: array cannot be safely cast to required type @@ -5376,7 +5420,8 @@ myarray : ndarray, uint8 type Input array. axis : int, optional - Unpacks along this axis. + The dimension over which bit-unpacking is done. + ``None`` implies unpacking the flattened array. Returns ------- @@ -5618,6 +5663,36 @@ """)) +add_newdoc('numpy.core', 'ufunc', ('signature', + """ + Definition of the core elements a generalized ufunc operates on. + + The signature determines how the dimensions of each input/output array + are split into core and loop dimensions: + + 1. Each dimension in the signature is matched to a dimension of the + corresponding passed-in array, starting from the end of the shape tuple. + 2. Core dimensions assigned to the same label in the signature must have + exactly matching sizes, no broadcasting is performed. + 3. The core dimensions are removed from all inputs and the remaining + dimensions are broadcast together, defining the loop dimensions. + + Notes + ----- + Generalized ufuncs are used internally in many linalg functions, and in + the testing suite; the examples below are taken from these. + For ufuncs that operate on scalars, the signature is `None`, which is + equivalent to '()' for every argument. + + Examples + -------- + >>> np.core.umath_tests.matrix_multiply.signature + '(m,n),(n,p)->(m,p)' + >>> np.linalg._umath_linalg.det.signature + '(m,m)->()' + >>> np.add.signature is None + True # equivalent to '(),()->()' + """)) ############################################################################## # @@ -5719,7 +5794,7 @@ add_newdoc('numpy.core', 'ufunc', ('accumulate', """ - accumulate(array, axis=0, dtype=None, out=None, keepdims=None) + accumulate(array, axis=0, dtype=None, out=None) Accumulate the result of applying the operator to all elements. @@ -5756,8 +5831,6 @@ .. versionchanged:: 1.13.0 Tuples are allowed for keyword argument. - keepdims : bool - Has no effect. Deprecated, and will be removed in future. Returns ------- @@ -6101,7 +6174,7 @@ Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void`` is a flexible type, here of size 10: - >>> np.dtype([('hello',(np.int,3)),('world',np.void,10)]) + >>> np.dtype([('hello',(int,3)),('world',np.void,10)]) dtype([('hello', '>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') + >>> d + array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', + '2002-10-27T07:30'], dtype='datetime64[m]') + + Setting the timezone to UTC shows the same information, but with a Z suffix + + >>> np.datetime_as_string(d, timezone='UTC') + array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z', + '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) + array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', + '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='>> np.datetime_as_string(d, unit='h') + array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'], + dtype='>> np.datetime_as_string(d, unit='s') + array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00', + '2002-10-27T07:30:00'], dtype='>> np.datetime_as_string(d, unit='h', casting='safe') + TypeError: Cannot create a datetime string as units 'h' from a NumPy + datetime with units 'm' according to the rule 'safe' + """) + +add_newdoc('numpy.core.multiarray', 'datetime_data', + """ + datetime_data(dtype, /) + + Get information about the step size of a date or time type. + + The returned tuple can be passed as the second argument of `datetime64` and + `timedelta64`. + + Parameters + ---------- + dtype : dtype + The dtype object, which must be a `datetime64` or `timedelta64` type. + + Returns + ------- + unit : str + The :ref:`datetime unit ` on which this dtype + is based. + count : int + The number of base units in a step. + + Examples + -------- + >>> dt_25s = np.dtype('timedelta64[25s]') + >>> np.datetime_data(dt_25s) + ('s', 25) + >>> np.array(10, dt_25s).astype('timedelta64[s]') + array(250, dtype='timedelta64[s]') + + The result can be used to construct a datetime that uses the same units + as a timedelta:: + + >>> np.datetime64('2010', np.datetime_data(dt_25s)) + numpy.datetime64('2010-01-01T00:00:00','25s') + """) + ############################################################################## # # nd_grid instances diff -Nru python-numpy-1.13.3/numpy/conftest.py python-numpy-1.14.5/numpy/conftest.py --- python-numpy-1.13.3/numpy/conftest.py 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/numpy/conftest.py 2018-06-12 18:28:52.000000000 +0000 @@ -0,0 +1,54 @@ +""" +Pytest configuration and fixtures for the Numpy test suite. +""" +from __future__ import division, absolute_import, print_function + +import warnings +import pytest + +from numpy.core.multiarray_tests import get_fpu_mode + + +_old_fpu_mode = None +_collect_results = {} + + +@pytest.hookimpl() +def pytest_itemcollected(item): + """ + Check FPU precision mode was not changed during test collection. + + The clumsy way we do it here is mainly necessary because numpy + still uses yield tests, which can execute code at test collection + time. + """ + global _old_fpu_mode + + mode = get_fpu_mode() + + if _old_fpu_mode is None: + _old_fpu_mode = mode + elif mode != _old_fpu_mode: + _collect_results[item] = (_old_fpu_mode, mode) + _old_fpu_mode = mode + + +@pytest.fixture(scope="function", autouse=True) +def check_fpu_mode(request): + """ + Check FPU precision mode was not changed during the test. + """ + old_mode = get_fpu_mode() + yield + new_mode = get_fpu_mode() + + if old_mode != new_mode: + raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" + " during the test".format(old_mode, new_mode)) + + collect_result = _collect_results.get(request.node) + if collect_result is not None: + old_mode, new_mode = collect_result + raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" + " when collecting the test".format(old_mode, + new_mode)) diff -Nru python-numpy-1.13.3/numpy/core/arrayprint.py python-numpy-1.14.5/numpy/core/arrayprint.py --- python-numpy-1.13.3/numpy/core/arrayprint.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/arrayprint.py 2018-06-12 18:28:52.000000000 +0000 @@ -5,7 +5,9 @@ """ from __future__ import division, absolute_import, print_function -__all__ = ["array2string", "set_printoptions", "get_printoptions"] +__all__ = ["array2string", "array_str", "array_repr", "set_string_function", + "set_printoptions", "get_printoptions", "format_float_positional", + "format_float_scientific"] __docformat__ = 'restructuredtext' # @@ -15,6 +17,13 @@ # and by Perry Greenfield 2000-4-1 for numarray # and by Travis Oliphant 2005-8-22 for numpy + +# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy +# scalars but for different purposes. scalartypes.c.src has str/reprs for when +# the scalar is printed on its own, while arrayprint.py has strs for when +# scalars are printed inside an ndarray. Only the latter strs are currently +# user-customizable. + import sys import functools if sys.version_info[0] >= 3: @@ -28,38 +37,60 @@ except ImportError: from dummy_thread import get_ident +import numpy as np from . import numerictypes as _nt -from .umath import maximum, minimum, absolute, not_equal, isnan, isinf -from .multiarray import (array, format_longfloat, datetime_as_string, - datetime_data, dtype) -from .fromnumeric import ravel -from .numeric import asarray +from .umath import absolute, not_equal, isnan, isinf, isfinite, isnat +from . import multiarray +from .multiarray import (array, dragon4_positional, dragon4_scientific, + datetime_as_string, datetime_data, dtype, ndarray, + set_legacy_print_mode) +from .fromnumeric import ravel, any +from .numeric import concatenate, asarray, errstate +from .numerictypes import (longlong, intc, int_, float_, complex_, bool_, + flexible) +import warnings + + +_format_options = { + 'edgeitems': 3, # repr N leading and trailing items of each dimension + 'threshold': 1000, # total items > triggers array summarization + 'floatmode': 'maxprec', + 'precision': 8, # precision of floating point representations + 'suppress': False, # suppress printing small floating values in exp format + 'linewidth': 75, + 'nanstr': 'nan', + 'infstr': 'inf', + 'sign': '-', + 'formatter': None, + 'legacy': False} + +def _make_options_dict(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, infstr=None, + sign=None, formatter=None, floatmode=None, legacy=None): + """ make a dictionary out of the non-None arguments, plus sanity checks """ -if sys.version_info[0] >= 3: - _MAXINT = sys.maxsize - _MININT = -sys.maxsize - 1 -else: - _MAXINT = sys.maxint - _MININT = -sys.maxint - 1 + options = {k: v for k, v in locals().items() if v is not None} -def product(x, y): - return x*y + if suppress is not None: + options['suppress'] = bool(suppress) -_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension -_summaryThreshold = 1000 # total items > triggers array summarization + modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal'] + if floatmode not in modes + [None]: + raise ValueError("floatmode option must be one of " + + ", ".join('"{}"'.format(m) for m in modes)) -_float_output_precision = 8 -_float_output_suppress_small = False -_line_width = 75 -_nan_str = 'nan' -_inf_str = 'inf' -_formatter = None # formatting function for array elements + if sign not in [None, '-', '+', ' ']: + raise ValueError("sign option must be one of ' ', '+', or '-'") + if legacy not in [None, False, '1.13']: + warnings.warn("legacy printing option can currently only be '1.13' or " + "`False`", stacklevel=3) + + return options def set_printoptions(precision=None, threshold=None, edgeitems=None, - linewidth=None, suppress=None, - nanstr=None, infstr=None, - formatter=None): + linewidth=None, suppress=None, nanstr=None, infstr=None, + formatter=None, sign=None, floatmode=None, **kwarg): """ Set printing options. @@ -68,8 +99,10 @@ Parameters ---------- - precision : int, optional + precision : int or None, optional Number of digits of precision for floating point output (default 8). + May be `None` if `floatmode` is not `fixed`, to print as many digits as + necessary to uniquely specify the value. threshold : int, optional Total number of array elements which trigger summarization rather than full repr (default 1000). @@ -80,12 +113,20 @@ The number of characters per line for the purpose of inserting line breaks (default 75). suppress : bool, optional - Whether or not suppress printing of small floating point values - using scientific notation (default False). + If True, always print floating point numbers using fixed point + notation, in which case numbers equal to zero in the current precision + will print as zero. If False, then scientific notation is used when + absolute value of the smallest number is < 1e-4 or the ratio of the + maximum absolute value to the minimum is > 1e3. The default is False. nanstr : str, optional String representation of floating point not-a-number (default nan). infstr : str, optional String representation of floating point infinity (default inf). + sign : string, either '-', '+', or ' ', optional + Controls printing of the sign of floating-point types. If '+', always + print the sign of positive values. If ' ', always prints a space + (whitespace character) in the sign position of positive values. If + '-', omit the sign character of positive values. (default '-') formatter : dict of callables, optional If not None, the keys should indicate the type(s) that the respective formatting function applies to. Callables should return a string. @@ -112,6 +153,31 @@ - 'float_kind' : sets 'float' and 'longfloat' - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - 'str_kind' : sets 'str' and 'numpystr' + floatmode : str, optional + Controls the interpretation of the `precision` option for + floating-point types. Can take the following values: + - 'fixed' : Always print exactly `precision` fractional digits, + even if this would print more or fewer digits than + necessary to specify the value uniquely. + - 'unique : Print the minimum number of fractional digits necessary + to represent each value uniquely. Different elements may + have a different number of digits. The value of the + `precision` option is ignored. + - 'maxprec' : Print at most `precision` fractional digits, but if + an element can be uniquely represented with fewer digits + only print it with that many. + - 'maxprec_equal' : Print at most `precision` fractional digits, + but if every element in the array can be uniquely + represented with an equal number of fewer digits, use that + many digits for all elements. + legacy : string or `False`, optional + If set to the string `'1.13'` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. If set to + `False`, disables legacy mode. Unrecognized strings will be ignored + with a warning for forward compatibility. + + .. versionadded:: 1.14.0 See Also -------- @@ -161,26 +227,26 @@ ... linewidth=75, nanstr='nan', precision=8, ... suppress=False, threshold=1000, formatter=None) """ + legacy = kwarg.pop('legacy', None) + if kwarg: + msg = "set_printoptions() got unexpected keyword argument '{}'" + raise TypeError(msg.format(kwarg.popitem()[0])) + + opt = _make_options_dict(precision, threshold, edgeitems, linewidth, + suppress, nanstr, infstr, sign, formatter, + floatmode, legacy) + # formatter is always reset + opt['formatter'] = formatter + _format_options.update(opt) + + # set the C variable for legacy mode + if _format_options['legacy'] == '1.13': + set_legacy_print_mode(113) + # reset the sign option in legacy mode to avoid confusion + _format_options['sign'] = '-' + elif _format_options['legacy'] is False: + set_legacy_print_mode(0) - global _summaryThreshold, _summaryEdgeItems, _float_output_precision - global _line_width, _float_output_suppress_small, _nan_str, _inf_str - global _formatter - - if linewidth is not None: - _line_width = linewidth - if threshold is not None: - _summaryThreshold = threshold - if edgeitems is not None: - _summaryEdgeItems = edgeitems - if precision is not None: - _float_output_precision = precision - if suppress is not None: - _float_output_suppress_small = not not suppress - if nanstr is not None: - _nan_str = nanstr - if infstr is not None: - _inf_str = infstr - _formatter = formatter def get_printoptions(): """ @@ -199,6 +265,7 @@ - nanstr : str - infstr : str - formatter : dict of callables + - sign : str For a full description of these options, see `set_printoptions`. @@ -207,40 +274,28 @@ set_printoptions, set_string_function """ - d = dict(precision=_float_output_precision, - threshold=_summaryThreshold, - edgeitems=_summaryEdgeItems, - linewidth=_line_width, - suppress=_float_output_suppress_small, - nanstr=_nan_str, - infstr=_inf_str, - formatter=_formatter) - return d - -def _leading_trailing(a): - from . import numeric as _nc - if a.ndim == 1: - if len(a) > 2*_summaryEdgeItems: - b = _nc.concatenate((a[:_summaryEdgeItems], - a[-_summaryEdgeItems:])) - else: - b = a - else: - if len(a) > 2*_summaryEdgeItems: - l = [_leading_trailing(a[i]) for i in range( - min(len(a), _summaryEdgeItems))] - l.extend([_leading_trailing(a[-i]) for i in range( - min(len(a), _summaryEdgeItems), 0, -1)]) - else: - l = [_leading_trailing(a[i]) for i in range(0, len(a))] - b = _nc.concatenate(tuple(l)) - return b - -def _boolFormatter(x): - if x: - return ' True' + return _format_options.copy() + + +def _leading_trailing(a, edgeitems, index=()): + """ + Keep only the N-D corners (leading and trailing edges) of an array. + + Should be passed a base-class ndarray, since it makes no guarantees about + preserving subclasses. + """ + axis = len(index) + if axis == a.ndim: + return a[index] + + if a.shape[axis] > 2*edgeitems: + return concatenate(( + _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]), + _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:]) + ), axis=axis) else: - return 'False' + return _leading_trailing(a, edgeitems, index + np.index_exp[:]) + def _object_format(o): """ Object arrays containing lists should be printed unambiguously """ @@ -253,26 +308,39 @@ def repr_format(x): return repr(x) -def _get_formatdict(data, precision, suppress_small, formatter): +def str_format(x): + return str(x) + +def _get_formatdict(data, **opt): + prec, fmode = opt['precision'], opt['floatmode'] + supp, sign = opt['suppress'], opt['sign'] + legacy = opt['legacy'] + # wrapped in lambdas to avoid taking a code path with the wrong type of data - formatdict = {'bool': lambda: _boolFormatter, - 'int': lambda: IntegerFormat(data), - 'float': lambda: FloatFormat(data, precision, suppress_small), - 'longfloat': lambda: LongFloatFormat(precision), - 'complexfloat': lambda: ComplexFormat(data, precision, - suppress_small), - 'longcomplexfloat': lambda: LongComplexFormat(precision), - 'datetime': lambda: DatetimeFormat(data), - 'timedelta': lambda: TimedeltaFormat(data), - 'object': lambda: _object_format, - 'numpystr': lambda: repr_format, - 'str': lambda: str} + formatdict = { + 'bool': lambda: BoolFormat(data), + 'int': lambda: IntegerFormat(data), + 'float': lambda: + FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), + 'longfloat': lambda: + FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), + 'complexfloat': lambda: + ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), + 'longcomplexfloat': lambda: + ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), + 'datetime': lambda: DatetimeFormat(data, legacy=legacy), + 'timedelta': lambda: TimedeltaFormat(data), + 'object': lambda: _object_format, + 'void': lambda: str_format, + 'numpystr': lambda: repr_format, + 'str': lambda: str} # we need to wrap values in `formatter` in a lambda, so that the interface # is the same as the above values. def indirect(x): return lambda: x + formatter = opt['formatter'] if formatter is not None: fkeys = [k for k in formatter.keys() if formatter[k] is not None] if 'all' in fkeys: @@ -296,24 +364,13 @@ return formatdict -def _get_format_function(data, precision, suppress_small, formatter): +def _get_format_function(data, **options): """ find the right formatting function for the dtype_ """ dtype_ = data.dtype - if dtype_.fields is not None: - format_functions = [] - for field_name in dtype_.names: - field_values = data[field_name] - format_function = _get_format_function( - ravel(field_values), precision, suppress_small, formatter) - if dtype_[field_name].shape != (): - format_function = SubArrayFormat(format_function) - format_functions.append(format_function) - return StructureFormat(format_functions) - dtypeobj = dtype_.type - formatdict = _get_formatdict(data, precision, suppress_small, formatter) + formatdict = _get_formatdict(data, **options) if issubclass(dtypeobj, _nt.bool_): return formatdict['bool']() elif issubclass(dtypeobj, _nt.integer): @@ -337,33 +394,14 @@ return formatdict['datetime']() elif issubclass(dtypeobj, _nt.object_): return formatdict['object']() + elif issubclass(dtypeobj, _nt.void): + if dtype_.names is not None: + return StructuredVoidFormat.from_data(data, **options) + else: + return formatdict['void']() else: return formatdict['numpystr']() -def _array2string(a, max_line_width, precision, suppress_small, separator=' ', - prefix="", formatter=None): - - if a.size > _summaryThreshold: - summary_insert = "..., " - data = _leading_trailing(a) - else: - summary_insert = "" - data = ravel(asarray(a)) - - # find the right formatting function for the array - format_function = _get_format_function(data, precision, - suppress_small, formatter) - - # skip over "[" - next_line_prefix = " " - # skip over array( - next_line_prefix += " "*len(prefix) - - lst = _formatArray(a, format_function, a.ndim, max_line_width, - next_line_prefix, separator, - _summaryEdgeItems, summary_insert)[:-1] - return lst - def _recursive_guard(fillvalue='...'): """ @@ -394,23 +432,52 @@ return decorating_function -# gracefully handle recursive calls - this comes up when object arrays contain -# themselves +# gracefully handle recursive calls, when object arrays contain themselves @_recursive_guard() +def _array2string(a, options, separator=' ', prefix=""): + # The formatter __init__s in _get_format_function cannot deal with + # subclasses yet, and we also need to avoid recursion issues in + # _formatArray with subclasses which return 0d arrays in place of scalars + data = asarray(a) + if a.shape == (): + a = data + + if a.size > options['threshold']: + summary_insert = "..." + data = _leading_trailing(data, options['edgeitems']) + else: + summary_insert = "" + + # find the right formatting function for the array + format_function = _get_format_function(data, **options) + + # skip over "[" + next_line_prefix = " " + # skip over array( + next_line_prefix += " "*len(prefix) + + lst = _formatArray(a, format_function, options['linewidth'], + next_line_prefix, separator, options['edgeitems'], + summary_insert, options['legacy']) + return lst + + def array2string(a, max_line_width=None, precision=None, suppress_small=None, separator=' ', prefix="", - style=repr, formatter=None): + style=np._NoValue, formatter=None, threshold=None, + edgeitems=None, sign=None, floatmode=None, suffix="", + **kwarg): """ Return a string representation of an array. Parameters ---------- - a : ndarray + a : array_like Input array. max_line_width : int, optional The maximum number of columns the string should span. Newline characters splits the string appropriately after array elements. - precision : int, optional + precision : int or None, optional Floating point precision. Default is the current printing precision (usually 8), which can be altered using `set_printoptions`. suppress_small : bool, optional @@ -419,15 +486,18 @@ separator : str, optional Inserted between elements. prefix : str, optional - An array is typically printed as:: + suffix: str, optional + The length of the prefix and suffix strings are used to respectively + align and wrap the output. An array is typically printed as:: + + prefix + array2string(a) + suffix + + The output is left-padded by the length of the prefix string, and + wrapping is forced at the column ``max_line_width - len(suffix)``. + style : _NoValue, optional + Has no effect, do not use. - 'prefix(' + array2string(a) + ')' - - The length of the prefix string is used to align the - output correctly. - style : function, optional - A function that accepts an ndarray and returns a string. Used only - when the shape of `a` is equal to ``()``, i.e. for 0-D arrays. + .. deprecated:: 1.14.0 formatter : dict of callables, optional If not None, the keys should indicate the type(s) that the respective formatting function applies to. Callables should return a string. @@ -443,6 +513,7 @@ - 'longfloat' : 128-bit floats - 'complexfloat' - 'longcomplexfloat' : composed of two 128-bit floats + - 'void' : type `numpy.void` - 'numpystr' : types `numpy.string_` and `numpy.unicode_` - 'str' : all other strings @@ -453,6 +524,42 @@ - 'float_kind' : sets 'float' and 'longfloat' - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - 'str_kind' : sets 'str' and 'numpystr' + threshold : int, optional + Total number of array elements which trigger summarization + rather than full repr. + edgeitems : int, optional + Number of array items in summary at beginning and end of + each dimension. + sign : string, either '-', '+', or ' ', optional + Controls printing of the sign of floating-point types. If '+', always + print the sign of positive values. If ' ', always prints a space + (whitespace character) in the sign position of positive values. If + '-', omit the sign character of positive values. + floatmode : str, optional + Controls the interpretation of the `precision` option for + floating-point types. Can take the following values: + - 'fixed' : Always print exactly `precision` fractional digits, + even if this would print more or fewer digits than + necessary to specify the value uniquely. + - 'unique : Print the minimum number of fractional digits necessary + to represent each value uniquely. Different elements may + have a different number of digits. The value of the + `precision` option is ignored. + - 'maxprec' : Print at most `precision` fractional digits, but if + an element can be uniquely represented with fewer digits + only print it with that many. + - 'maxprec_equal' : Print at most `precision` fractional digits, + but if every element in the array can be uniquely + represented with an equal number of fewer digits, use that + many digits for all elements. + legacy : string or `False`, optional + If set to the string `'1.13'` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. If set to + `False`, disables legacy mode. Unrecognized strings will be ignored + with a warning for forward compatibility. + + .. versionadded:: 1.14.0 Returns ------- @@ -493,47 +600,55 @@ '[0x0L 0x1L 0x2L]' """ + legacy = kwarg.pop('legacy', None) + if kwarg: + msg = "array2string() got unexpected keyword argument '{}'" + raise TypeError(msg.format(kwarg.popitem()[0])) + + overrides = _make_options_dict(precision, threshold, edgeitems, + max_line_width, suppress_small, None, None, + sign, formatter, floatmode, legacy) + options = _format_options.copy() + options.update(overrides) + + if options['legacy'] == '1.13': + if style is np._NoValue: + style = repr + + if a.shape == () and not a.dtype.names: + return style(a.item()) + elif style is not np._NoValue: + # Deprecation 11-9-2017 v1.14 + warnings.warn("'style' argument is deprecated and no longer functional" + " except in 1.13 'legacy' mode", + DeprecationWarning, stacklevel=3) + + if options['legacy'] != '1.13': + options['linewidth'] -= len(suffix) + + # treat as a null array if any of shape elements == 0 + if a.size == 0: + return "[]" + + return _array2string(a, options, separator, prefix) + + +def _extendLine(s, line, word, line_width, next_line_prefix, legacy): + needs_wrap = len(line) + len(word) > line_width + if legacy != '1.13': + s# don't wrap lines if it won't help + if len(line) <= len(next_line_prefix): + needs_wrap = False - if max_line_width is None: - max_line_width = _line_width - - if precision is None: - precision = _float_output_precision - - if suppress_small is None: - suppress_small = _float_output_suppress_small - - if formatter is None: - formatter = _formatter - - if a.shape == (): - x = a.item() - if a.dtype.fields is not None: - arr = array([x], dtype=a.dtype) - format_function = _get_format_function( - arr, precision, suppress_small, formatter) - lst = format_function(arr[0]) - else: - lst = style(x) - elif functools.reduce(product, a.shape) == 0: - # treat as a null array if any of shape elements == 0 - lst = "[]" - else: - lst = _array2string(a, max_line_width, precision, suppress_small, - separator, prefix, formatter=formatter) - return lst - - -def _extendLine(s, line, word, max_line_len, next_line_prefix): - if len(line.rstrip()) + len(word.rstrip()) >= max_line_len: + if needs_wrap: s += line.rstrip() + "\n" line = next_line_prefix line += word return s, line -def _formatArray(a, format_function, rank, max_line_len, - next_line_prefix, separator, edge_items, summary_insert): +def _formatArray(a, format_function, line_width, next_line_prefix, + separator, edge_items, summary_insert, legacy): """formatArray is designed for two modes of operation: 1. Full output @@ -541,253 +656,515 @@ 2. Summarized output """ - if rank == 0: - raise ValueError("rank shouldn't be zero.") + def recurser(index, hanging_indent, curr_width): + """ + By using this local function, we don't need to recurse with all the + arguments. Since this function is not created recursively, the cost is + not significant + """ + axis = len(index) + axes_left = a.ndim - axis + + if axes_left == 0: + return format_function(a[index]) + + # when recursing, add a space to align with the [ added, and reduce the + # length of the line by 1 + next_hanging_indent = hanging_indent + ' ' + if legacy == '1.13': + next_width = curr_width + else: + next_width = curr_width - len(']') - if summary_insert and 2*edge_items < len(a): - leading_items = edge_items - trailing_items = edge_items - summary_insert1 = summary_insert - else: - leading_items = 0 - trailing_items = len(a) - summary_insert1 = "" + a_len = a.shape[axis] + show_summary = summary_insert and 2*edge_items < a_len + if show_summary: + leading_items = edge_items + trailing_items = edge_items + else: + leading_items = 0 + trailing_items = a_len + + # stringify the array with the hanging indent on the first line too + s = '' + + # last axis (rows) - wrap elements if they would not fit on one line + if axes_left == 1: + # the length up until the beginning of the separator / bracket + if legacy == '1.13': + elem_width = curr_width - len(separator.rstrip()) + else: + elem_width = curr_width - max(len(separator.rstrip()), len(']')) + + line = hanging_indent + for i in range(leading_items): + word = recurser(index + (i,), next_hanging_indent, next_width) + s, line = _extendLine( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if show_summary: + s, line = _extendLine( + s, line, summary_insert, elem_width, hanging_indent, legacy) + if legacy == '1.13': + line += ", " + else: + line += separator + + for i in range(trailing_items, 1, -1): + word = recurser(index + (-i,), next_hanging_indent, next_width) + s, line = _extendLine( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if legacy == '1.13': + # width of the seperator is not considered on 1.13 + elem_width = curr_width + word = recurser(index + (-1,), next_hanging_indent, next_width) + s, line = _extendLine( + s, line, word, elem_width, hanging_indent, legacy) + + s += line + + # other axes - insert newlines between rows + else: + s = '' + line_sep = separator.rstrip() + '\n'*(axes_left - 1) + + for i in range(leading_items): + nested = recurser(index + (i,), next_hanging_indent, next_width) + s += hanging_indent + nested + line_sep + + if show_summary: + if legacy == '1.13': + # trailing space, fixed nbr of newlines, and fixed separator + s += hanging_indent + summary_insert + ", \n" + else: + s += hanging_indent + summary_insert + line_sep + + for i in range(trailing_items, 1, -1): + nested = recurser(index + (-i,), next_hanging_indent, + next_width) + s += hanging_indent + nested + line_sep + + nested = recurser(index + (-1,), next_hanging_indent, next_width) + s += hanging_indent + nested + + # remove the hanging indent, and wrap in [] + s = '[' + s[len(hanging_indent):] + ']' + return s + + try: + # invoke the recursive part with an initial index and prefix + return recurser(index=(), + hanging_indent=next_line_prefix, + curr_width=line_width) + finally: + # recursive closures have a cyclic reference to themselves, which + # requires gc to collect (gh-10620). To avoid this problem, for + # performance and PyPy friendliness, we break the cycle: + recurser = None + +def _none_or_positive_arg(x, name): + if x is None: + return -1 + if x < 0: + raise ValueError("{} must be >= 0".format(name)) + return x + +class FloatingFormat(object): + """ Formatter for subtypes of np.floating """ + def __init__(self, data, precision, floatmode, suppress_small, sign=False, + **kwarg): + # for backcompatibility, accept bools + if isinstance(sign, bool): + sign = '+' if sign else '-' + + self._legacy = kwarg.get('legacy', False) + if self._legacy == '1.13': + # when not 0d, legacy does not support '-' + if data.shape != () and sign == '-': + sign = ' ' + + self.floatmode = floatmode + if floatmode == 'unique': + self.precision = None + else: + self.precision = precision + + self.precision = _none_or_positive_arg(self.precision, 'precision') - if rank == 1: - s = "" - line = next_line_prefix - for i in range(leading_items): - word = format_function(a[i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - - if summary_insert1: - s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix) - - for i in range(trailing_items, 1, -1): - word = format_function(a[-i]) + separator - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - - word = format_function(a[-1]) - s, line = _extendLine(s, line, word, max_line_len, next_line_prefix) - s += line + "]\n" - s = '[' + s[len(next_line_prefix):] - else: - s = '[' - sep = separator.rstrip() - for i in range(leading_items): - if i > 0: - s += next_line_prefix - s += _formatArray(a[i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1) - - if summary_insert1: - s += next_line_prefix + summary_insert1 + "\n" - - for i in range(trailing_items, 1, -1): - if leading_items or i != trailing_items: - s += next_line_prefix - s += _formatArray(a[-i], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert) - s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1) - if leading_items or trailing_items > 1: - s += next_line_prefix - s += _formatArray(a[-1], format_function, rank-1, max_line_len, - " " + next_line_prefix, separator, edge_items, - summary_insert).rstrip()+']\n' - return s - -class FloatFormat(object): - def __init__(self, data, precision, suppress_small, sign=False): - self.precision = precision self.suppress_small = suppress_small self.sign = sign self.exp_format = False self.large_exponent = False - self.max_str_len = 0 - try: - self.fillFormat(data) - except (TypeError, NotImplementedError): - # if reduce(data) fails, this instance will not be called, just - # instantiated in formatdict. - pass + + self.fillFormat(data) def fillFormat(self, data): - from . import numeric as _nc + # only the finite values are used to compute the number of digits + finite_vals = data[isfinite(data)] - with _nc.errstate(all='ignore'): - special = isnan(data) | isinf(data) - valid = not_equal(data, 0) & ~special - non_zero = absolute(data.compress(valid)) - if len(non_zero) == 0: - max_val = 0. - min_val = 0. - else: - max_val = maximum.reduce(non_zero) - min_val = minimum.reduce(non_zero) - if max_val >= 1.e8: - self.exp_format = True - if not self.suppress_small and (min_val < 0.0001 - or max_val/min_val > 1000.): + # choose exponential mode based on the non-zero finite values: + abs_non_zero = absolute(finite_vals[finite_vals != 0]) + if len(abs_non_zero) != 0: + max_val = np.max(abs_non_zero) + min_val = np.min(abs_non_zero) + with errstate(over='ignore'): # division can overflow + if max_val >= 1.e8 or (not self.suppress_small and + (min_val < 0.0001 or max_val/min_val > 1000.)): self.exp_format = True - if self.exp_format: - self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100 - self.max_str_len = 8 + self.precision - if self.large_exponent: - self.max_str_len += 1 - if self.sign: - format = '%+' + # do a first pass of printing all the numbers, to determine sizes + if len(finite_vals) == 0: + self.pad_left = 0 + self.pad_right = 0 + self.trim = '.' + self.exp_size = -1 + self.unique = True + elif self.exp_format: + trim, unique = '.', True + if self.floatmode == 'fixed' or self._legacy == '1.13': + trim, unique = 'k', False + strs = (dragon4_scientific(x, precision=self.precision, + unique=unique, trim=trim, sign=self.sign == '+') + for x in finite_vals) + frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs)) + int_part, frac_part = zip(*(s.split('.') for s in frac_strs)) + self.exp_size = max(len(s) for s in exp_strs) - 1 + + self.trim = 'k' + self.precision = max(len(s) for s in frac_part) + + # for back-compat with np 1.13, use 2 spaces & sign and full prec + if self._legacy == '1.13': + self.pad_left = 3 else: - format = '%' - format = format + '%d.%de' % (self.max_str_len, self.precision) + # this should be only 1 or 2. Can be calculated from sign. + self.pad_left = max(len(s) for s in int_part) + # pad_right is only needed for nan length calculation + self.pad_right = self.exp_size + 2 + self.precision + + self.unique = False else: - format = '%%.%df' % (self.precision,) - if len(non_zero): - precision = max([_digits(x, self.precision, format) - for x in non_zero]) + # first pass printing to determine sizes + trim, unique = '.', True + if self.floatmode == 'fixed': + trim, unique = 'k', False + strs = (dragon4_positional(x, precision=self.precision, + fractional=True, + unique=unique, trim=trim, + sign=self.sign == '+') + for x in finite_vals) + int_part, frac_part = zip(*(s.split('.') for s in strs)) + if self._legacy == '1.13': + self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part) else: - precision = 0 - precision = min(self.precision, precision) - self.max_str_len = len(str(int(max_val))) + precision + 2 - if _nc.any(special): - self.max_str_len = max(self.max_str_len, - len(_nan_str), - len(_inf_str)+1) - if self.sign: - format = '%#+' + self.pad_left = max(len(s) for s in int_part) + self.pad_right = max(len(s) for s in frac_part) + self.exp_size = -1 + + if self.floatmode in ['fixed', 'maxprec_equal']: + self.precision = self.pad_right + self.unique = False + self.trim = 'k' else: - format = '%#' - format = format + '%d.%df' % (self.max_str_len, precision) + self.unique = True + self.trim = '.' - self.special_fmt = '%%%ds' % (self.max_str_len,) - self.format = format + if self._legacy != '1.13': + # account for sign = ' ' by adding one to pad_left + if self.sign == ' ' and not any(np.signbit(finite_vals)): + self.pad_left += 1 + + # if there are non-finite values, may need to increase pad_left + if data.size != finite_vals.size: + neginf = self.sign != '-' or any(data[isinf(data)] < 0) + nanlen = len(_format_options['nanstr']) + inflen = len(_format_options['infstr']) + neginf + offset = self.pad_right + 1 # +1 for decimal pt + self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset) - def __call__(self, x, strip_zeros=True): - from . import numeric as _nc + def __call__(self, x): + if not np.isfinite(x): + with errstate(invalid='ignore'): + if np.isnan(x): + sign = '+' if self.sign == '+' else '' + ret = sign + _format_options['nanstr'] + else: # isinf + sign = '-' if x < 0 else '+' if self.sign == '+' else '' + ret = sign + _format_options['infstr'] + return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret - with _nc.errstate(invalid='ignore'): - if isnan(x): - if self.sign: - return self.special_fmt % ('+' + _nan_str,) - else: - return self.special_fmt % (_nan_str,) - elif isinf(x): - if x > 0: - if self.sign: - return self.special_fmt % ('+' + _inf_str,) - else: - return self.special_fmt % (_inf_str,) - else: - return self.special_fmt % ('-' + _inf_str,) + if self.exp_format: + return dragon4_scientific(x, + precision=self.precision, + unique=self.unique, + trim=self.trim, + sign=self.sign == '+', + pad_left=self.pad_left, + exp_digits=self.exp_size) + else: + return dragon4_positional(x, + precision=self.precision, + unique=self.unique, + fractional=True, + trim=self.trim, + sign=self.sign == '+', + pad_left=self.pad_left, + pad_right=self.pad_right) + +# for back-compatibility, we keep the classes for each float type too +class FloatFormat(FloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn("FloatFormat has been replaced by FloatingFormat", + DeprecationWarning, stacklevel=2) + super(FloatFormat, self).__init__(*args, **kwargs) + + +class LongFloatFormat(FloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn("LongFloatFormat has been replaced by FloatingFormat", + DeprecationWarning, stacklevel=2) + super(LongFloatFormat, self).__init__(*args, **kwargs) - s = self.format % x - if self.large_exponent: - # 3-digit exponent - expsign = s[-3] - if expsign == '+' or expsign == '-': - s = s[1:-2] + '0' + s[-2:] - elif self.exp_format: - # 2-digit exponent - if s[-3] == '0': - s = ' ' + s[:-3] + s[-2:] - elif strip_zeros: - z = s.rstrip('0') - s = z + ' '*(len(s)-len(z)) - return s +def format_float_scientific(x, precision=None, unique=True, trim='k', + sign=False, pad_left=None, exp_digits=None): + """ + Format a floating-point scalar as a decimal string in scientific notation. + Provides control over rounding, trimming and padding. Uses and assumes + IEEE unbiased rounding. Uses the "Dragon4" algorithm. -def _digits(x, precision, format): - if precision > 0: - s = format % x - z = s.rstrip('0') - return precision - len(s) + len(z) - else: - return 0 + Parameters + ---------- + x : python float or numpy floating scalar + Value to format. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. + unique : boolean, optional + If `True`, use a digit-generation strategy which gives the shortest + representation which uniquely identifies the floating-point number from + other values of the same type, by judicious rounding. If `precision` + was omitted, print all necessary digits, otherwise digit generation is + cut off after `precision` digits and the remaining value is rounded. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value. + trim : one of 'k', '.', '0', '-', optional + Controls post-processing trimming of trailing digits, as follows: + k : keep trailing zeros, keep decimal point (no trimming) + . : trim all trailing zeros, leave decimal point + 0 : trim all but the zero before the decimal point. Insert the + zero if it is missing. + - : trim trailing zeros and any trailing decimal point + sign : boolean, optional + Whether to show the sign for positive values. + pad_left : non-negative integer, optional + Pad the left side of the string with whitespace until at least that + many characters are to the left of the decimal point. + exp_digits : non-negative integer, optional + Pad the exponent with zeros until it contains at least this many digits. + If omitted, the exponent will be at least 2 digits. + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_positional + + Examples + -------- + >>> np.format_float_scientific(np.float32(np.pi)) + '3.1415927e+00' + >>> s = np.float32(1.23e24) + >>> np.format_float_scientific(s, unique=False, precision=15) + '1.230000071797338e+24' + >>> np.format_float_scientific(s, exp_digits=4) + '1.23e+0024' + """ + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits') + return dragon4_scientific(x, precision=precision, unique=unique, + trim=trim, sign=sign, pad_left=pad_left, + exp_digits=exp_digits) + +def format_float_positional(x, precision=None, unique=True, + fractional=True, trim='k', sign=False, + pad_left=None, pad_right=None): + """ + Format a floating-point scalar as a decimal string in positional notation. + + Provides control over rounding, trimming and padding. Uses and assumes + IEEE unbiased rounding. Uses the "Dragon4" algorithm. + + Parameters + ---------- + x : python float or numpy floating scalar + Value to format. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. + unique : boolean, optional + If `True`, use a digit-generation strategy which gives the shortest + representation which uniquely identifies the floating-point number from + other values of the same type, by judicious rounding. If `precision` + was omitted, print out all necessary digits, otherwise digit generation + is cut off after `precision` digits and the remaining value is rounded. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value. + fractional : boolean, optional + If `True`, the cutoff of `precision` digits refers to the total number + of digits after the decimal point, including leading zeros. + If `False`, `precision` refers to the total number of significant + digits, before or after the decimal point, ignoring leading zeros. + trim : one of 'k', '.', '0', '-', optional + Controls post-processing trimming of trailing digits, as follows: + k : keep trailing zeros, keep decimal point (no trimming) + . : trim all trailing zeros, leave decimal point + 0 : trim all but the zero before the decimal point. Insert the + zero if it is missing. + - : trim trailing zeros and any trailing decimal point + sign : boolean, optional + Whether to show the sign for positive values. + pad_left : non-negative integer, optional + Pad the left side of the string with whitespace until at least that + many characters are to the left of the decimal point. + pad_right : non-negative integer, optional + Pad the right side of the string with whitespace until at least that + many characters are to the right of the decimal point. + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_scientific + + Examples + -------- + >>> np.format_float_scientific(np.float32(np.pi)) + '3.1415927' + >>> np.format_float_positional(np.float16(np.pi)) + '3.14' + >>> np.format_float_positional(np.float16(0.3)) + '0.3' + >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10) + '0.3000488281' + """ + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + pad_right = _none_or_positive_arg(pad_right, 'pad_right') + return dragon4_positional(x, precision=precision, unique=unique, + fractional=fractional, trim=trim, + sign=sign, pad_left=pad_left, + pad_right=pad_right) class IntegerFormat(object): def __init__(self, data): - try: - max_str_len = max(len(str(maximum.reduce(data))), - len(str(minimum.reduce(data)))) - self.format = '%' + str(max_str_len) + 'd' - except (TypeError, NotImplementedError): - # if reduce(data) fails, this instance will not be called, just - # instantiated in formatdict. - pass - except ValueError: - # this occurs when everything is NA - pass + if data.size > 0: + max_str_len = max(len(str(np.max(data))), + len(str(np.min(data)))) + else: + max_str_len = 0 + self.format = '%{}d'.format(max_str_len) def __call__(self, x): - if _MININT < x < _MAXINT: - return self.format % x - else: - return "%s" % x + return self.format % x -class LongFloatFormat(object): - # XXX Have to add something to determine the width to use a la FloatFormat - # Right now, things won't line up properly - def __init__(self, precision, sign=False): - self.precision = precision - self.sign = sign + +class BoolFormat(object): + def __init__(self, data, **kwargs): + # add an extra space so " True" and "False" have the same length and + # array elements align nicely when printed, except in 0d arrays + self.truestr = ' True' if data.shape != () else 'True' def __call__(self, x): - if isnan(x): - if self.sign: - return '+' + _nan_str - else: - return ' ' + _nan_str - elif isinf(x): - if x > 0: - if self.sign: - return '+' + _inf_str - else: - return ' ' + _inf_str - else: - return '-' + _inf_str - elif x >= 0: - if self.sign: - return '+' + format_longfloat(x, self.precision) - else: - return ' ' + format_longfloat(x, self.precision) - else: - return format_longfloat(x, self.precision) + return self.truestr if x else "False" -class LongComplexFormat(object): - def __init__(self, precision): - self.real_format = LongFloatFormat(precision) - self.imag_format = LongFloatFormat(precision, sign=True) +class ComplexFloatingFormat(object): + """ Formatter for subtypes of np.complexfloating """ + def __init__(self, x, precision, floatmode, suppress_small, + sign=False, **kwarg): + # for backcompatibility, accept bools + if isinstance(sign, bool): + sign = '+' if sign else '-' + + floatmode_real = floatmode_imag = floatmode + if kwarg.get('legacy', False) == '1.13': + floatmode_real = 'maxprec_equal' + floatmode_imag = 'maxprec' + + self.real_format = FloatingFormat(x.real, precision, floatmode_real, + suppress_small, sign=sign, **kwarg) + self.imag_format = FloatingFormat(x.imag, precision, floatmode_imag, + suppress_small, sign='+', **kwarg) def __call__(self, x): r = self.real_format(x.real) i = self.imag_format(x.imag) - return r + i + 'j' + # add the 'j' before the terminal whitespace in i + sp = len(i.rstrip()) + i = i[:sp] + 'j' + i[sp:] + + return r + i + +# for back-compatibility, we keep the classes for each complex type too +class ComplexFormat(ComplexFloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn( + "ComplexFormat has been replaced by ComplexFloatingFormat", + DeprecationWarning, stacklevel=2) + super(ComplexFormat, self).__init__(*args, **kwargs) + +class LongComplexFormat(ComplexFloatingFormat): + def __init__(self, *args, **kwargs): + warnings.warn( + "LongComplexFormat has been replaced by ComplexFloatingFormat", + DeprecationWarning, stacklevel=2) + super(LongComplexFormat, self).__init__(*args, **kwargs) -class ComplexFormat(object): - def __init__(self, x, precision, suppress_small): - self.real_format = FloatFormat(x.real, precision, suppress_small) - self.imag_format = FloatFormat(x.imag, precision, suppress_small, - sign=True) + +class _TimelikeFormat(object): + def __init__(self, data): + non_nat = data[~isnat(data)] + if len(non_nat) > 0: + # Max str length of non-NaT elements + max_str_len = max(len(self._format_non_nat(np.max(non_nat))), + len(self._format_non_nat(np.min(non_nat)))) + else: + max_str_len = 0 + if len(non_nat) < data.size: + # data contains a NaT + max_str_len = max(max_str_len, 5) + self._format = '%{}s'.format(max_str_len) + self._nat = "'NaT'".rjust(max_str_len) + + def _format_non_nat(self, x): + # override in subclass + raise NotImplementedError def __call__(self, x): - r = self.real_format(x.real, strip_zeros=False) - i = self.imag_format(x.imag, strip_zeros=False) - if not self.imag_format.exp_format: - z = i.rstrip('0') - i = z + 'j' + ' '*(len(i)-len(z)) + if isnat(x): + return self._nat else: - i = i + 'j' - return r + i + return self._format % self._format_non_nat(x) -class DatetimeFormat(object): - def __init__(self, x, unit=None, timezone=None, casting='same_kind'): +class DatetimeFormat(_TimelikeFormat): + def __init__(self, x, unit=None, timezone=None, casting='same_kind', + legacy=False): # Get the unit from the dtype if unit is None: if x.dtype.kind == 'M': @@ -800,38 +1177,26 @@ self.timezone = timezone self.unit = unit self.casting = casting + self.legacy = legacy + + # must be called after the above are configured + super(DatetimeFormat, self).__init__(x) def __call__(self, x): + if self.legacy == '1.13': + return self._format_non_nat(x) + return super(DatetimeFormat, self).__call__(x) + + def _format_non_nat(self, x): return "'%s'" % datetime_as_string(x, unit=self.unit, timezone=self.timezone, casting=self.casting) -class TimedeltaFormat(object): - def __init__(self, data): - if data.dtype.kind == 'm': - nat_value = array(['NaT'], dtype=data.dtype)[0] - int_dtype = dtype(data.dtype.byteorder + 'i8') - int_view = data.view(int_dtype) - v = int_view[not_equal(int_view, nat_value.view(int_dtype))] - if len(v) > 0: - # Max str length of non-NaT elements - max_str_len = max(len(str(maximum.reduce(v))), - len(str(minimum.reduce(v)))) - else: - max_str_len = 0 - if len(v) < len(data): - # data contains a NaT - max_str_len = max(max_str_len, 5) - self.format = '%' + str(max_str_len) + 'd' - self._nat = "'NaT'".rjust(max_str_len) - def __call__(self, x): - # TODO: After NAT == NAT deprecation should be simplified: - if (x + 1).view('i8') == x.view('i8'): - return self._nat - else: - return self.format % x.astype('i8') +class TimedeltaFormat(_TimelikeFormat): + def _format_non_nat(self, x): + return str(x.astype('i8')) class SubArrayFormat(object): @@ -844,13 +1209,321 @@ return "[" + ", ".join(self.__call__(a) for a in arr) + "]" -class StructureFormat(object): +class StructuredVoidFormat(object): + """ + Formatter for structured np.void objects. + + This does not work on structured alias types like np.dtype(('i4', 'i2,i2')), + as alias scalars lose their field information, and the implementation + relies upon np.void.__getitem__. + """ def __init__(self, format_functions): self.format_functions = format_functions - self.num_fields = len(format_functions) + + @classmethod + def from_data(cls, data, **options): + """ + This is a second way to initialize StructuredVoidFormat, using the raw data + as input. Added to avoid changing the signature of __init__. + """ + format_functions = [] + for field_name in data.dtype.names: + format_function = _get_format_function(data[field_name], **options) + if data.dtype[field_name].shape != (): + format_function = SubArrayFormat(format_function) + format_functions.append(format_function) + return cls(format_functions) def __call__(self, x): - s = "(" - for field, format_function in zip(x, self.format_functions): - s += format_function(field) + ", " - return (s[:-2] if 1 < self.num_fields else s[:-1]) + ")" + str_fields = [ + format_function(field) + for field, format_function in zip(x, self.format_functions) + ] + if len(str_fields) == 1: + return "({},)".format(str_fields[0]) + else: + return "({})".format(", ".join(str_fields)) + + +# for backwards compatibility +class StructureFormat(StructuredVoidFormat): + def __init__(self, *args, **kwargs): + # NumPy 1.14, 2018-02-14 + warnings.warn( + "StructureFormat has been replaced by StructuredVoidFormat", + DeprecationWarning, stacklevel=2) + super(StructureFormat, self).__init__(*args, **kwargs) + + +def _void_scalar_repr(x): + """ + Implements the repr for structured-void scalars. It is called from the + scalartypes.c.src code, and is placed here because it uses the elementwise + formatters defined above. + """ + return StructuredVoidFormat.from_data(array(x), **_format_options)(x) + + +_typelessdata = [int_, float_, complex_, bool_] +if issubclass(intc, int): + _typelessdata.append(intc) +if issubclass(longlong, int): + _typelessdata.append(longlong) + + +def dtype_is_implied(dtype): + """ + Determine if the given dtype is implied by the representation of its values. + + Parameters + ---------- + dtype : dtype + Data type + + Returns + ------- + implied : bool + True if the dtype is implied by the representation of its values. + + Examples + -------- + >>> np.core.arrayprint.dtype_is_implied(int) + True + >>> np.array([1, 2, 3], int) + array([1, 2, 3]) + >>> np.core.arrayprint.dtype_is_implied(np.int8) + False + >>> np.array([1, 2, 3], np.int8) + array([1, 2, 3], dtype=np.int8) + """ + dtype = np.dtype(dtype) + if _format_options['legacy'] == '1.13' and dtype.type == bool_: + return False + + # not just void types can be structured, and names are not part of the repr + if dtype.names is not None: + return False + + return dtype.type in _typelessdata + + +def dtype_short_repr(dtype): + """ + Convert a dtype to a short form which evaluates to the same dtype. + + The intent is roughly that the following holds + + >>> from numpy import * + >>> assert eval(dtype_short_repr(dt)) == dt + """ + if dtype.names is not None: + # structured dtypes give a list or tuple repr + return str(dtype) + elif issubclass(dtype.type, flexible): + # handle these separately so they don't give garbage like str256 + return "'%s'" % str(dtype) + + typename = dtype.name + # quote typenames which can't be represented as python variable names + if typename and not (typename[0].isalpha() and typename.isalnum()): + typename = repr(typename) + + return typename + + +def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): + """ + Return the string representation of an array. + + Parameters + ---------- + arr : ndarray + Input array. + max_line_width : int, optional + The maximum number of columns the string should span. Newline + characters split the string appropriately after array elements. + precision : int, optional + Floating point precision. Default is the current printing precision + (usually 8), which can be altered using `set_printoptions`. + suppress_small : bool, optional + Represent very small numbers as zero, default is False. Very small + is defined by `precision`, if the precision is 8 then + numbers smaller than 5e-9 are represented as zero. + + Returns + ------- + string : str + The string representation of an array. + + See Also + -------- + array_str, array2string, set_printoptions + + Examples + -------- + >>> np.array_repr(np.array([1,2])) + 'array([1, 2])' + >>> np.array_repr(np.ma.array([0.])) + 'MaskedArray([ 0.])' + >>> np.array_repr(np.array([], np.int32)) + 'array([], dtype=int32)' + + >>> x = np.array([1e-6, 4e-7, 2, 3]) + >>> np.array_repr(x, precision=6, suppress_small=True) + 'array([ 0.000001, 0. , 2. , 3. ])' + + """ + if max_line_width is None: + max_line_width = _format_options['linewidth'] + + if type(arr) is not ndarray: + class_name = type(arr).__name__ + else: + class_name = "array" + + skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0 + + prefix = class_name + "(" + suffix = ")" if skipdtype else "," + + if (_format_options['legacy'] == '1.13' and + arr.shape == () and not arr.dtype.names): + lst = repr(arr.item()) + elif arr.size > 0 or arr.shape == (0,): + lst = array2string(arr, max_line_width, precision, suppress_small, + ', ', prefix, suffix=suffix) + else: # show zero-length shape unless it is (0,) + lst = "[], shape=%s" % (repr(arr.shape),) + + arr_str = prefix + lst + suffix + + if skipdtype: + return arr_str + + dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype)) + + # compute whether we should put dtype on a new line: Do so if adding the + # dtype would extend the last line past max_line_width. + # Note: This line gives the correct result even when rfind returns -1. + last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) + spacer = " " + if _format_options['legacy'] == '1.13': + if issubclass(arr.dtype.type, flexible): + spacer = '\n' + ' '*len(class_name + "(") + elif last_line_len + len(dtype_str) + 1 > max_line_width: + spacer = '\n' + ' '*len(class_name + "(") + + return arr_str + spacer + dtype_str + +_guarded_str = _recursive_guard()(str) + +def array_str(a, max_line_width=None, precision=None, suppress_small=None): + """ + Return a string representation of the data in an array. + + The data in the array is returned as a single string. This function is + similar to `array_repr`, the difference being that `array_repr` also + returns information on the kind of array and its data type. + + Parameters + ---------- + a : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. The + default is, indirectly, 75. + precision : int, optional + Floating point precision. Default is the current printing precision + (usually 8), which can be altered using `set_printoptions`. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + + See Also + -------- + array2string, array_repr, set_printoptions + + Examples + -------- + >>> np.array_str(np.arange(3)) + '[0 1 2]' + + """ + if (_format_options['legacy'] == '1.13' and + a.shape == () and not a.dtype.names): + return str(a.item()) + + # the str of 0d arrays is a special case: It should appear like a scalar, + # so floats are not truncated by `precision`, and strings are not wrapped + # in quotes. So we return the str of the scalar value. + if a.shape == (): + # obtain a scalar and call str on it, avoiding problems for subclasses + # for which indexing with () returns a 0d instead of a scalar by using + # ndarray's getindex. Also guard against recursive 0d object arrays. + return _guarded_str(np.ndarray.__getitem__(a, ())) + + return array2string(a, max_line_width, precision, suppress_small, ' ', "") + +def set_string_function(f, repr=True): + """ + Set a Python function to be used when pretty printing arrays. + + Parameters + ---------- + f : function or None + Function to be used to pretty print arrays. The function should expect + a single array argument and return a string of the representation of + the array. If None, the function is reset to the default NumPy function + to print arrays. + repr : bool, optional + If True (default), the function for pretty printing (``__repr__``) + is set, if False the function that returns the default string + representation (``__str__``) is set. + + See Also + -------- + set_printoptions, get_printoptions + + Examples + -------- + >>> def pprint(arr): + ... return 'HA! - What are you going to do now?' + ... + >>> np.set_string_function(pprint) + >>> a = np.arange(10) + >>> a + HA! - What are you going to do now? + >>> print(a) + [0 1 2 3 4 5 6 7 8 9] + + We can reset the function to the default: + + >>> np.set_string_function(None) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + `repr` affects either pretty printing or normal string representation. + Note that ``__repr__`` is still affected by setting ``__str__`` + because the width of each array element in the returned string becomes + equal to the length of the result of ``__str__()``. + + >>> x = np.arange(4) + >>> np.set_string_function(lambda x:'random', repr=False) + >>> x.__str__() + 'random' + >>> x.__repr__() + 'array([ 0, 1, 2, 3])' + + """ + if f is None: + if repr: + return multiarray.set_string_function(array_repr, 1) + else: + return multiarray.set_string_function(array_str, 0) + else: + return multiarray.set_string_function(f, repr) + +set_string_function(array_str, 0) +set_string_function(array_repr, 1) diff -Nru python-numpy-1.13.3/numpy/core/code_generators/cversions.txt python-numpy-1.14.5/numpy/core/code_generators/cversions.txt --- python-numpy-1.13.3/numpy/core/code_generators/cversions.txt 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/code_generators/cversions.txt 2018-06-12 18:28:52.000000000 +0000 @@ -37,3 +37,7 @@ # Version 11 (NumPy 1.13) Added PyArray_MapIterArrayCopyIfOverlap 0x0000000b = edb1ba83730c650fd9bc5772a919cda7 + +# Version 12 (NumPy 1.14) Added PyArray_ResolveWritebackIfCopy, +# PyArray_SetWritebackIfCopyBase and deprecated PyArray_SetUpdateIfCopyBase. +0x0000000c = a1bc756c5782853ec2e3616cf66869d8 diff -Nru python-numpy-1.13.3/numpy/core/code_generators/genapi.py python-numpy-1.14.5/numpy/core/code_generators/genapi.py --- python-numpy-1.13.3/numpy/core/code_generators/genapi.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/code_generators/genapi.py 2018-06-12 17:31:56.000000000 +0000 @@ -52,6 +52,7 @@ join('multiarray', 'scalarapi.c'), join('multiarray', 'sequence.c'), join('multiarray', 'shape.c'), + join('multiarray', 'strfuncs.c'), join('multiarray', 'usertypes.c'), join('umath', 'loops.c.src'), join('umath', 'ufunc_object.c'), @@ -71,7 +72,7 @@ return str.replace('Bool', 'npy_bool') -class StealRef: +class StealRef(object): def __init__(self, arg): self.arg = arg # counting from 1 @@ -82,7 +83,7 @@ return 'NPY_STEALS_REF_TO_ARG(%d)' % self.arg -class NonNull: +class NonNull(object): def __init__(self, arg): self.arg = arg # counting from 1 @@ -271,7 +272,7 @@ state = SCANNING else: function_args.append(line) - except: + except Exception: print(filename, lineno + 1) raise fo.close() diff -Nru python-numpy-1.13.3/numpy/core/code_generators/generate_numpy_api.py python-numpy-1.14.5/numpy/core/code_generators/generate_numpy_api.py --- python-numpy-1.13.3/numpy/core/code_generators/generate_numpy_api.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/code_generators/generate_numpy_api.py 2018-06-12 17:31:56.000000000 +0000 @@ -220,8 +220,13 @@ multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name) if len(multiarray_api_dict) != len(multiarray_api_index): - raise AssertionError("Multiarray API size mismatch %d %d" % - (len(multiarray_api_dict), len(multiarray_api_index))) + keys_dict = set(multiarray_api_dict.keys()) + keys_index = set(multiarray_api_index.keys()) + raise AssertionError( + "Multiarray API size mismatch - " + "index has extra keys {}, dict has extra keys {}" + .format(keys_index - keys_dict, keys_dict - keys_index) + ) extension_list = [] for name, index in genapi.order_dict(multiarray_api_index): diff -Nru python-numpy-1.13.3/numpy/core/code_generators/numpy_api.py python-numpy-1.14.5/numpy/core/code_generators/numpy_api.py --- python-numpy-1.13.3/numpy/core/code_generators/numpy_api.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/code_generators/numpy_api.py 2018-06-12 18:28:52.000000000 +0000 @@ -346,6 +346,9 @@ # End 1.10 API 'PyArray_MapIterArrayCopyIfOverlap': (301,), # End 1.13 API + 'PyArray_ResolveWritebackIfCopy': (302,), + 'PyArray_SetWritebackIfCopyBase': (303,), + # End 1.14 API } ufunc_types_api = { diff -Nru python-numpy-1.13.3/numpy/core/code_generators/ufunc_docstrings.py python-numpy-1.14.5/numpy/core/code_generators/ufunc_docstrings.py --- python-numpy-1.13.3/numpy/core/code_generators/ufunc_docstrings.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/code_generators/ufunc_docstrings.py 2018-06-12 18:28:52.000000000 +0000 @@ -573,7 +573,7 @@ >>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16])) array([ 2, 4, 16]) >>> np.bitwise_and([True, True], [False, True]) - array([False, True], dtype=bool) + array([False, True]) """) @@ -630,7 +630,7 @@ ... np.array([4, 4, 4, 2147483647L], dtype=np.int32)) array([ 6, 5, 255, 2147483647]) >>> np.bitwise_or([True, True], [False, True]) - array([ True, True], dtype=bool) + array([ True, True]) """) @@ -680,7 +680,7 @@ >>> np.bitwise_xor([31,3], [5,6]) array([26, 5]) >>> np.bitwise_xor([True, True], [False, True]) - array([ True, False], dtype=bool) + array([ True, False]) """) @@ -1057,13 +1057,13 @@ Examples -------- >>> np.equal([0, 1, 3], np.arange(3)) - array([ True, True, False], dtype=bool) + array([ True, True, False]) What is compared are values, not types. So an int (1) and an array of length one can evaluate as True: >>> np.equal(1, np.ones(1)) - array([ True], dtype=bool) + array([ True]) """) @@ -1389,14 +1389,14 @@ Examples -------- >>> np.greater([4,2],[2,2]) - array([ True, False], dtype=bool) + array([ True, False]) If the inputs are ndarrays, then np.greater is equivalent to '>'. >>> a = np.array([4,2]) >>> b = np.array([2,2]) >>> a > b - array([ True, False], dtype=bool) + array([ True, False]) """) @@ -1424,7 +1424,7 @@ Examples -------- >>> np.greater_equal([4, 2, 1], [2, 2, 2]) - array([ True, True, False], dtype=bool) + array([ True, True, False]) """) @@ -1541,7 +1541,7 @@ Booleans are accepted as well: >>> np.invert(array([True, False])) - array([False, True], dtype=bool) + array([False, True]) """) @@ -1599,7 +1599,7 @@ >>> np.isfinite(np.NINF) False >>> np.isfinite([np.log(-1.),1.,np.log(0)]) - array([False, True, False], dtype=bool) + array([False, True, False]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([2, 2, 2]) @@ -1661,7 +1661,7 @@ >>> np.isinf(np.NINF) True >>> np.isinf([np.inf, -np.inf, 1.0, np.nan]) - array([ True, True, False, False], dtype=bool) + array([ True, True, False, False]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([2, 2, 2]) @@ -1709,7 +1709,7 @@ >>> np.isnan(np.inf) False >>> np.isnan([np.log(-1.),1.,np.log(0)]) - array([ True, False, False], dtype=bool) + array([ True, False, False]) """) @@ -1745,7 +1745,7 @@ >>> np.isnat(np.datetime64("2016-01-01")) False >>> np.isnat(np.array(["NaT", "2016-01-01"], dtype="datetime64[ns]")) - array([ True, False], dtype=bool) + array([ True, False]) """) @@ -1814,7 +1814,7 @@ Examples -------- >>> np.less([1, 2], [2, 2]) - array([ True, False], dtype=bool) + array([ True, False]) """) @@ -1842,7 +1842,7 @@ Examples -------- >>> np.less_equal([4, 2, 1], [2, 2, 2]) - array([False, True, True], dtype=bool) + array([False, True, True]) """) @@ -2155,11 +2155,11 @@ >>> np.logical_and(True, False) False >>> np.logical_and([True, False], [False, False]) - array([False, False], dtype=bool) + array([False, False]) >>> x = np.arange(5) >>> np.logical_and(x>1, x<4) - array([False, False, True, True, False], dtype=bool) + array([False, False, True, True, False]) """) @@ -2188,11 +2188,11 @@ >>> np.logical_not(3) False >>> np.logical_not([True, False, 0, 1]) - array([False, True, True, False], dtype=bool) + array([False, True, True, False]) >>> x = np.arange(5) >>> np.logical_not(x<3) - array([False, False, False, True, True], dtype=bool) + array([False, False, False, True, True]) """) @@ -2223,11 +2223,11 @@ >>> np.logical_or(True, False) True >>> np.logical_or([True, False], [False, False]) - array([ True, False], dtype=bool) + array([ True, False]) >>> x = np.arange(5) >>> np.logical_or(x < 1, x > 3) - array([ True, False, False, False, True], dtype=bool) + array([ True, False, False, False, True]) """) @@ -2258,17 +2258,17 @@ >>> np.logical_xor(True, False) True >>> np.logical_xor([True, True, False, False], [True, False, True, False]) - array([False, True, True, False], dtype=bool) + array([False, True, True, False]) >>> x = np.arange(5) >>> np.logical_xor(x < 1, x > 3) - array([ True, False, False, False, True], dtype=bool) + array([ True, False, False, False, True]) Simple example showing support of broadcasting >>> np.logical_xor(0, np.eye(2)) array([[ True, False], - [False, True]], dtype=bool) + [False, True]]) """) @@ -2647,10 +2647,10 @@ Examples -------- >>> np.not_equal([1.,2.], [1., 3.]) - array([False, True], dtype=bool) + array([False, True]) >>> np.not_equal([1, 2], [[1, 3],[1, 4]]) array([[False, True], - [False, True]], dtype=bool) + [False, True]]) """) @@ -2887,8 +2887,18 @@ Computes the remainder complementary to the `floor_divide` function. It is equivalent to the Python modulus operator``x1 % x2`` and has the same sign - as the divisor `x2`. It should not be confused with the Matlab(TM) ``rem`` - function. + as the divisor `x2`. The MATLAB function equivalent to ``np.remainder`` + is ``mod``. + + .. warning:: + + This should not be confused with: + + * Python 3.7's `math.remainder` and C's ``remainder``, which + computes the IEEE remainder, which are the complement to + ``round(x1 / x2)``. + * The MATLAB ``rem`` function and or the C ``%`` operator which is the + complement to ``int(x1 / x2)``. Parameters ---------- @@ -2908,7 +2918,7 @@ -------- floor_divide : Equivalent of Python ``//`` operator. divmod : Simultaneous floor division and remainder. - fmod : Equivalent of the Matlab(TM) ``rem`` function. + fmod : Equivalent of the MATLAB ``rem`` function. divide, floor Notes @@ -3092,7 +3102,7 @@ >>> np.signbit(-1.2) True >>> np.signbit(np.array([1, -2.3, 2.1])) - array([False, True, False], dtype=bool) + array([False, True, False]) """) @@ -3156,7 +3166,7 @@ >>> np.nextafter(1, 2) == eps + 1 True >>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps] - array([ True, True], dtype=bool) + array([ True, True]) """) diff -Nru python-numpy-1.13.3/numpy/core/defchararray.py python-numpy-1.14.5/numpy/core/defchararray.py --- python-numpy-1.13.3/numpy/core/defchararray.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/defchararray.py 2018-06-12 17:31:56.000000000 +0000 @@ -575,9 +575,9 @@ array(['foo', 'bar'], dtype='|S3') >>> np.char.endswith(s, 'ar') - array([False, True], dtype=bool) + array([False, True]) >>> np.char.endswith(s, 'a', start=1, end=2) - array([False, True], dtype=bool) + array([False, True]) """ return _vec_string( diff -Nru python-numpy-1.13.3/numpy/core/einsumfunc.py python-numpy-1.14.5/numpy/core/einsumfunc.py --- python-numpy-1.13.3/numpy/core/einsumfunc.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/einsumfunc.py 2018-06-12 18:28:52.000000000 +0000 @@ -4,8 +4,9 @@ """ from __future__ import division, absolute_import, print_function +from numpy.compat import basestring from numpy.core.multiarray import c_einsum -from numpy.core.numeric import asarray, asanyarray, result_type +from numpy.core.numeric import asarray, asanyarray, result_type, tensordot, dot __all__ = ['einsum', 'einsum_path'] @@ -166,8 +167,14 @@ new_pos = positions + [con] iter_results.append((new_cost, new_pos, new_input_sets)) - # Update list to iterate over - full_results = iter_results + # Update combinatorial list, if we did not find anything return best + # path + remaining contractions + if iter_results: + full_results = iter_results + else: + path = min(full_results, key=lambda x: x[0])[1] + path += [tuple(range(len(input_sets) - iteration))] + return path # If we have not found anything return single einsum contraction if len(full_results) == 0: @@ -256,6 +263,114 @@ return path +def _can_dot(inputs, result, idx_removed): + """ + Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. + + Parameters + ---------- + inputs : list of str + Specifies the subscripts for summation. + result : str + Resulting summation. + idx_removed : set + Indices that are removed in the summation + + + Returns + ------- + type : bool + Returns true if BLAS should and can be used, else False + + Notes + ----- + If the operations is BLAS level 1 or 2 and is not already aligned + we default back to einsum as the memory movement to copy is more + costly than the operation itself. + + + Examples + -------- + + # Standard GEMM operation + >>> _can_dot(['ij', 'jk'], 'ik', set('j')) + True + + # Can use the standard BLAS, but requires odd data movement + >>> _can_dot(['ijj', 'jk'], 'ik', set('j')) + False + + # DDOT where the memory is not aligned + >>> _can_dot(['ijk', 'ikj'], '', set('ijk')) + False + + """ + + # All `dot` calls remove indices + if len(idx_removed) == 0: + return False + + # BLAS can only handle two operands + if len(inputs) != 2: + return False + + # Build a few temporaries + input_left, input_right = inputs + set_left = set(input_left) + set_right = set(input_right) + keep_left = set_left - idx_removed + keep_right = set_right - idx_removed + rs = len(idx_removed) + + # Indices must overlap between the two operands + if not len(set_left & set_right): + return False + + # We cannot have duplicate indices ("ijj, jk -> ik") + if (len(set_left) != len(input_left)) or (len(set_right) != len(input_right)): + return False + + # Cannot handle partial inner ("ij, ji -> i") + if len(keep_left & keep_right): + return False + + # At this point we are a DOT, GEMV, or GEMM operation + + # Handle inner products + + # DDOT with aligned data + if input_left == input_right: + return True + + # DDOT without aligned data (better to use einsum) + if set_left == set_right: + return False + + # Handle the 4 possible (aligned) GEMV or GEMM cases + + # GEMM or GEMV no transpose + if input_left[-rs:] == input_right[:rs]: + return True + + # GEMM or GEMV transpose both + if input_left[:rs] == input_right[-rs:]: + return True + + # GEMM or GEMV transpose right + if input_left[-rs:] == input_right[-rs:]: + return True + + # GEMM or GEMV transpose left + if input_left[:rs] == input_right[:rs]: + return True + + # Einsum is faster than GEMV if we have to copy data + if not keep_left or not keep_right: + return False + + # We are a matrix-matrix product, but we need to copy data + return True + def _parse_einsum_input(operands): """ A reproduction of einsum c side einsum parsing in python. @@ -285,7 +400,7 @@ if len(operands) == 0: raise ValueError("No input operands") - if isinstance(operands[0], str): + if isinstance(operands[0], basestring): subscripts = operands[0].replace(" ", "") operands = [asanyarray(v) for v in operands[1:]] @@ -542,7 +657,7 @@ " %s" % unknown_kwargs) # Figure out what the path really is - path_type = kwargs.pop('optimize', False) + path_type = kwargs.pop('optimize', True) if path_type is True: path_type = 'greedy' if path_type is None: @@ -551,7 +666,7 @@ memory_limit = None # No optimization or a named path algorithm - if (path_type is False) or isinstance(path_type, str): + if (path_type is False) or isinstance(path_type, basestring): pass # Given an explicit path @@ -559,7 +674,7 @@ pass # Path tuple with memory limit - elif ((len(path_type) == 2) and isinstance(path_type[0], str) and + elif ((len(path_type) == 2) and isinstance(path_type[0], basestring) and isinstance(path_type[1], (int, float))): memory_limit = int(path_type[1]) path_type = path_type[0] @@ -586,14 +701,18 @@ sh = operands[tnum].shape if len(sh) != len(term): raise ValueError("Einstein sum subscript %s does not contain the " - "correct number of indices for operand %d.", - input_subscripts[tnum], tnum) + "correct number of indices for operand %d." + % (input_subscripts[tnum], tnum)) for cnum, char in enumerate(term): dim = sh[cnum] if char in dimension_dict.keys(): - if dimension_dict[char] != dim: - raise ValueError("Size of label '%s' for operand %d does " - "not match previous terms.", char, tnum) + # For broadcasting cases we always want the largest dim size + if dimension_dict[char] == 1: + dimension_dict[char] = dim + elif dim not in (1, dimension_dict[char]): + raise ValueError("Size of label '%s' for operand %d (%d) " + "does not match previous terms (%d)." + % (char, tnum, dimension_dict[char], dim)) else: dimension_dict[char] = dim @@ -653,6 +772,8 @@ for x in contract_inds: tmp_inputs.append(input_list.pop(x)) + do_blas = _can_dot(tmp_inputs, out_inds, idx_removed) + # Last contraction if (cnum - len(path)) == -1: idx_result = output_subscript @@ -663,7 +784,7 @@ input_list.append(idx_result) einsum_str = ",".join(tmp_inputs) + "->" + idx_result - contraction = (contract_inds, idx_removed, einsum_str, input_list[:]) + contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas) contraction_list.append(contraction) opt_cost = sum(cost_list) + 1 @@ -690,7 +811,7 @@ path_print += "-" * 74 for n, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining = contraction + inds, idx_rm, einsum_str, remaining, blas = contraction remaining_str = ",".join(remaining) + "->" + output_subscript path_run = (scale_list[n], einsum_str, remaining_str) path_print += "\n%4d %24s %40s" % path_run @@ -969,19 +1090,63 @@ # Build the contraction list and operand operands, contraction_list = einsum_path(*operands, optimize=optimize_arg, einsum_call=True) + + handle_out = False + # Start contraction loop for num, contraction in enumerate(contraction_list): - inds, idx_rm, einsum_str, remaining = contraction + inds, idx_rm, einsum_str, remaining, blas = contraction tmp_operands = [] for x in inds: tmp_operands.append(operands.pop(x)) - # If out was specified + # Do we need to deal with the output? if specified_out and ((num + 1) == len(contraction_list)): - einsum_kwargs["out"] = out_array + handle_out = True + + # Handle broadcasting vs BLAS cases + if blas: + # Checks have already been handled + input_str, results_index = einsum_str.split('->') + input_left, input_right = input_str.split(',') + if 1 in tmp_operands[0].shape or 1 in tmp_operands[1].shape: + left_dims = {dim: size for dim, size in + zip(input_left, tmp_operands[0].shape)} + right_dims = {dim: size for dim, size in + zip(input_right, tmp_operands[1].shape)} + # If dims do not match we are broadcasting, BLAS off + if any(left_dims[ind] != right_dims[ind] for ind in idx_rm): + blas = False + + # Call tensordot if still possible + if blas: + tensor_result = input_left + input_right + for s in idx_rm: + tensor_result = tensor_result.replace(s, "") + + # Find indices to contract over + left_pos, right_pos = [], [] + for s in idx_rm: + left_pos.append(input_left.find(s)) + right_pos.append(input_right.find(s)) + + # Contract! + new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos))) + + # Build a new view if needed + if (tensor_result != results_index) or handle_out: + if handle_out: + einsum_kwargs["out"] = out_array + new_view = c_einsum(tensor_result + '->' + results_index, new_view, **einsum_kwargs) + + # Call einsum + else: + # If out was specified + if handle_out: + einsum_kwargs["out"] = out_array - # Do the contraction - new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs) + # Do the contraction + new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs) # Append new items and derefernce what we can operands.append(new_view) diff -Nru python-numpy-1.13.3/numpy/core/fromnumeric.py python-numpy-1.14.5/numpy/core/fromnumeric.py --- python-numpy-1.13.3/numpy/core/fromnumeric.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/fromnumeric.py 2018-06-12 18:28:52.000000000 +0000 @@ -28,12 +28,7 @@ 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', ] - -try: - _gentype = types.GeneratorType -except AttributeError: - _gentype = type(None) - +_gentype = types.GeneratorType # save away Python sum _sum_ = sum @@ -71,15 +66,28 @@ """ Take elements from an array along an axis. - This function does the same thing as "fancy" indexing (indexing arrays - using arrays); however, it can be easier to use if you need elements - along a given axis. + When axis is not None, this function does the same thing as "fancy" + indexing (indexing arrays using arrays); however, it can be easier to use + if you need elements along a given axis. A call such as + ``np.take(arr, indices, axis=3)`` is equivalent to + ``arr[:,:,:,indices,...]``. + + Explained without fancy indexing, this is equivalent to the following use + of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of + indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + Nj = indices.shape + for ii in ndindex(Ni): + for jj in ndindex(Nj): + for kk in ndindex(Nk): + out[ii + jj + kk] = a[ii + (indices[jj],) + kk] Parameters ---------- - a : array_like + a : array_like (Ni..., M, Nk...) The source array. - indices : array_like + indices : array_like (Nj...) The indices of the values to extract. .. versionadded:: 1.8.0 @@ -88,7 +96,7 @@ axis : int, optional The axis over which to select values. By default, the flattened input array is used. - out : ndarray, optional + out : ndarray, optional (Ni..., Nj..., Nk...) If provided, the result will be placed in this array. It should be of the appropriate shape and dtype. mode : {'raise', 'wrap', 'clip'}, optional @@ -104,7 +112,7 @@ Returns ------- - subarray : ndarray + out : ndarray (Ni..., Nj..., Nk...) The returned array has the same type as `a`. See Also @@ -112,6 +120,23 @@ compress : Take elements using a boolean mask ndarray.take : equivalent method + Notes + ----- + + By eliminating the inner loop in the description above, and using `s_` to + build simple slice objects, `take` can be expressed in terms of applying + fancy indexing to each 1-d slice:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nj): + out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] + + For this reason, it is equivalent to (but faster than) the following use + of `apply_along_axis`:: + + out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a) + Examples -------- >>> a = [4, 3, 5, 7, 6, 8] @@ -176,11 +201,11 @@ Notes ----- It is not always possible to change the shape of an array without - copying the data. If you want an error to be raise if the data is copied, + copying the data. If you want an error to be raised when the data is copied, you should assign the new shape to the shape attribute of the array:: >>> a = np.zeros((10, 2)) - # A transpose make the array non-contiguous + # A transpose makes the array non-contiguous >>> b = a.T # Taking a view makes it possible to modify the shape without modifying # the initial object. @@ -882,14 +907,22 @@ array([[0, 3], [2, 2]]) - >>> np.argsort(x, axis=0) + >>> np.argsort(x, axis=0) # sorts along first axis (down) array([[0, 1], [1, 0]]) - >>> np.argsort(x, axis=1) + >>> np.argsort(x, axis=1) # sorts along last axis (across) array([[0, 1], [0, 1]]) + Indices of the sorted elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape) + >>> ind + (array([0, 1, 1, 0]), array([0, 0, 1, 1])) + >>> x[ind] # same as np.sort(x, axis=None) + array([0, 2, 2, 3]) + Sorting with keys: >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> np.argmax(a, axis=1) array([2, 2]) + Indexes of the maximal elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape) + >>> ind + (1, 2) + >>> a[ind] + 5 + >>> b = np.arange(6) >>> b[1] = 5 >>> b array([0, 5, 2, 3, 4, 5]) - >>> np.argmax(b) # Only the first occurrence is returned. + >>> np.argmax(b) # Only the first occurrence is returned. 1 """ @@ -1008,11 +1049,19 @@ >>> np.argmin(a, axis=1) array([0, 0]) + Indices of the minimum elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape) + >>> ind + (0, 0) + >>> a[ind] + 0 + >>> b = np.arange(6) >>> b[4] = 0 >>> b array([0, 1, 2, 3, 0, 5]) - >>> np.argmin(b) # Only the first occurrence is returned. + >>> np.argmin(b) # Only the first occurrence is returned. 0 """ @@ -1120,18 +1169,16 @@ new_shape = (new_shape,) a = ravel(a) Na = len(a) - if not Na: - return mu.zeros(new_shape, a.dtype) total_size = um.multiply.reduce(new_shape) + if Na == 0 or total_size == 0: + return mu.zeros(new_shape, a.dtype) + n_copies = int(total_size / Na) extra = total_size % Na - if total_size == 0: - return a[:0] - if extra != 0: - n_copies = n_copies+1 - extra = Na-extra + n_copies = n_copies + 1 + extra = Na - extra a = concatenate((a,)*n_copies) if extra > 0: @@ -1252,13 +1299,13 @@ Returns ------- array_of_diagonals : ndarray - If `a` is 2-D and not a matrix, a 1-D array of the same type as `a` - containing the diagonal is returned. If `a` is a matrix, a 1-D + If `a` is 2-D and not a `matrix`, a 1-D array of the same type as `a` + containing the diagonal is returned. If `a` is a `matrix`, a 1-D array containing the diagonal is returned in order to maintain - backward compatibility. If the dimension of `a` is greater than - two, then an array of diagonals is returned, "packed" from - left-most dimension to right-most (e.g., if `a` is 3-D, then the - diagonals are "packed" along rows). + backward compatibility. + If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` + are removed, and a new axis inserted at the end corresponding to the + diagonal. Raises ------ @@ -1531,14 +1578,15 @@ [0, 2, 0], [1, 1, 0]]) >>> np.nonzero(x) - (array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64)) + (array([0, 1, 2, 2]), array([0, 1, 0, 1])) >>> x[np.nonzero(x)] - array([ 1., 1., 1.]) + array([1, 2, 1, 1]) >>> np.transpose(np.nonzero(x)) array([[0, 0], [1, 1], - [2, 2]]) + [2, 0], + [2, 1]) A common use for ``nonzero`` is to find the indices of an array, where a condition is True. Given an array `a`, the condition `a` > 3 is a @@ -1549,7 +1597,7 @@ >>> a > 3 array([[False, False, False], [ True, True, True], - [ True, True, True]], dtype=bool) + [ True, True, True]]) >>> np.nonzero(a > 3) (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) @@ -1944,7 +1992,7 @@ True >>> np.any([[True, False], [False, False]], axis=0) - array([ True, False], dtype=bool) + array([ True, False]) >>> np.any([-1, 0, 5]) True @@ -1955,7 +2003,7 @@ >>> o=np.array([False]) >>> z=np.any([-1, 4, 5], out=o) >>> z, o - (array([ True], dtype=bool), array([ True], dtype=bool)) + (array([ True]), array([ True])) >>> # Check now that z is a reference to o >>> z is o True @@ -2029,7 +2077,7 @@ False >>> np.all([[True,False],[True,True]], axis=0) - array([ True, False], dtype=bool) + array([ True, False]) >>> np.all([-1, 4, 5]) True @@ -2040,7 +2088,7 @@ >>> o=np.array([False]) >>> z=np.all([-1, 4, 5], out=o) >>> id(z), id(o), z # doctest: +SKIP - (28293632, 28293632, array([ True], dtype=bool)) + (28293632, 28293632, array([ True])) """ arr = asanyarray(a) @@ -2248,7 +2296,7 @@ >>> np.amax(a, axis=1) # Maxima along the second axis array([1, 3]) - >>> b = np.arange(5, dtype=np.float) + >>> b = np.arange(5, dtype=float) >>> b[2] = np.NaN >>> np.amax(b) nan @@ -2349,7 +2397,7 @@ >>> np.amin(a, axis=1) # Minima along the second axis array([0, 2]) - >>> b = np.arange(5, dtype=np.float) + >>> b = np.arange(5, dtype=float) >>> b[2] = np.NaN >>> np.amin(b) nan @@ -2463,7 +2511,7 @@ raised on overflow. That means that, on a 32-bit platform: >>> x = np.array([536870910, 536870910, 536870910, 536870910]) - >>> np.prod(x) #random + >>> np.prod(x) # random 16 The product of an empty array is the neutral element 1: @@ -2499,7 +2547,7 @@ is the default platform integer: >>> x = np.array([1, 2, 3], dtype=np.int8) - >>> np.prod(x).dtype == np.int + >>> np.prod(x).dtype == int True """ diff -Nru python-numpy-1.13.3/numpy/core/function_base.py python-numpy-1.14.5/numpy/core/function_base.py --- python-numpy-1.13.3/numpy/core/function_base.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/function_base.py 2018-06-12 17:31:56.000000000 +0000 @@ -115,17 +115,24 @@ y = _nx.arange(0, num, dtype=dt) delta = stop - start + # In-place multiplication y *= delta/div is faster, but prevents the multiplicant + # from overriding what class is produced, and thus prevents, e.g. use of Quantities, + # see gh-7142. Hence, we multiply in place only for standard scalar types. + _mult_inplace = _nx.isscalar(delta) if num > 1: step = delta / div if step == 0: # Special handling for denormal numbers, gh-5437 y /= div - y = y * delta + if _mult_inplace: + y *= delta + else: + y = y * delta else: - # One might be tempted to use faster, in-place multiplication here, - # but this prevents step from overriding what class is produced, - # and thus prevents, e.g., use of Quantities; see gh-7142. - y = y * step + if _mult_inplace: + y *= step + else: + y = y * step else: # 0 and 1 item long sequences have an undefined step step = NaN @@ -339,7 +346,7 @@ # complex and another is negative and log would produce NaN otherwise start = start + (stop - stop) stop = stop + (start - start) - if _nx.issubdtype(dtype, complex): + if _nx.issubdtype(dtype, _nx.complexfloating): start = start + 0j stop = stop + 0j diff -Nru python-numpy-1.13.3/numpy/core/include/numpy/ndarrayobject.h python-numpy-1.14.5/numpy/core/include/numpy/ndarrayobject.h --- python-numpy-1.13.3/numpy/core/include/numpy/ndarrayobject.h 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/include/numpy/ndarrayobject.h 2018-06-12 17:31:56.000000000 +0000 @@ -170,16 +170,20 @@ (k)*PyArray_STRIDES(obj)[2] + \ (l)*PyArray_STRIDES(obj)[3])) +/* Move to arrayobject.c once PyArray_XDECREF_ERR is removed */ static NPY_INLINE void -PyArray_XDECREF_ERR(PyArrayObject *arr) +PyArray_DiscardWritebackIfCopy(PyArrayObject *arr) { - if (arr != NULL) { - if (PyArray_FLAGS(arr) & NPY_ARRAY_UPDATEIFCOPY) { - PyArrayObject *base = (PyArrayObject *)PyArray_BASE(arr); - PyArray_ENABLEFLAGS(base, NPY_ARRAY_WRITEABLE); + PyArrayObject_fields *fa = (PyArrayObject_fields *)arr; + if (fa && fa->base) { + if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) || + (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) { + PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE); + Py_DECREF(fa->base); + fa->base = NULL; + PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); PyArray_CLEARFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY); } - Py_DECREF(arr); } } @@ -231,13 +235,53 @@ dict. */ -#define NPY_TITLE_KEY(key, value) ((PyTuple_GET_SIZE((value))==3) && \ - (PyTuple_GET_ITEM((value), 2) == (key))) +static NPY_INLINE int +NPY_TITLE_KEY_check(PyObject *key, PyObject *value) +{ + PyObject *title; + if (PyTuple_GET_SIZE(value) != 3) { + return 0; + } + title = PyTuple_GET_ITEM(value, 2); + if (key == title) { + return 1; + } +#ifdef PYPY_VERSION + /* + * On PyPy, dictionary keys do not always preserve object identity. + * Fall back to comparison by value. + */ + if (PyUnicode_Check(title) && PyUnicode_Check(key)) { + return PyUnicode_Compare(title, key) == 0 ? 1 : 0; + } +#if PY_VERSION_HEX < 0x03000000 + if (PyString_Check(title) && PyString_Check(key)) { + return PyObject_Compare(title, key) == 0 ? 1 : 0; + } +#endif +#endif + return 0; +} +/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */ +#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value))) #define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1) #define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1) +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_14_API_VERSION) +static NPY_INLINE void +PyArray_XDECREF_ERR(PyArrayObject *arr) +{ + /* 2017-Nov-10 1.14 */ + DEPRECATE("PyArray_XDECREF_ERR is deprecated, call " + "PyArray_DiscardWritebackIfCopy then Py_XDECREF instead"); + PyArray_DiscardWritebackIfCopy(arr); + Py_XDECREF(arr); +} +#endif + #ifdef __cplusplus } diff -Nru python-numpy-1.13.3/numpy/core/include/numpy/ndarraytypes.h python-numpy-1.14.5/numpy/core/include/numpy/ndarraytypes.h --- python-numpy-1.13.3/numpy/core/include/numpy/ndarraytypes.h 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/include/numpy/ndarraytypes.h 2018-06-12 18:28:52.000000000 +0000 @@ -677,7 +677,7 @@ /* * This object is decref'd upon * deletion of array. Except in the - * case of UPDATEIFCOPY which has + * case of WRITEBACKIFCOPY which has * special handling. * * For views it points to the original @@ -688,9 +688,9 @@ * points to an object that should be * decref'd on deletion * - * For UPDATEIFCOPY flag this is an - * array to-be-updated upon deletion - * of this one + * For WRITEBACKIFCOPY flag this is an + * array to-be-updated upon calling + * PyArray_ResolveWritebackIfCopy */ PyObject *base; /* Pointer to type structure */ @@ -865,12 +865,13 @@ /* * If this flag is set, then base contains a pointer to an array of * the same size that should be updated with the current contents of - * this array when this array is deallocated + * this array when PyArray_ResolveWritebackIfCopy is called. * * This flag may be requested in constructor functions. * This flag may be tested for in PyArray_FLAGS(arr). */ -#define NPY_ARRAY_UPDATEIFCOPY 0x1000 +#define NPY_ARRAY_UPDATEIFCOPY 0x1000 /* Deprecated in 1.14 */ +#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000 /* * NOTE: there are also internal flags defined in multiarray/arrayobject.h, @@ -895,10 +896,14 @@ #define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) #define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY | \ NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) #define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) #define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) #define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY | \ NPY_ARRAY_UPDATEIFCOPY) +#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) #define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ NPY_ARRAY_F_CONTIGUOUS | \ @@ -1044,7 +1049,7 @@ #define NPY_ITER_CONTIG 0x00200000 /* The operand may be copied to satisfy requirements */ #define NPY_ITER_COPY 0x00400000 -/* The operand may be copied with UPDATEIFCOPY to satisfy requirements */ +/* The operand may be copied with WRITEBACKIFCOPY to satisfy requirements */ #define NPY_ITER_UPDATEIFCOPY 0x00800000 /* Allocate the operand if it is NULL */ #define NPY_ITER_ALLOCATE 0x01000000 @@ -1676,6 +1681,8 @@ #define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) #define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) #define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL) +#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0) +#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0) #define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) #define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) diff -Nru python-numpy-1.13.3/numpy/core/include/numpy/noprefix.h python-numpy-1.14.5/numpy/core/include/numpy/noprefix.h --- python-numpy-1.13.3/numpy/core/include/numpy/noprefix.h 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/include/numpy/noprefix.h 2018-06-12 17:31:56.000000000 +0000 @@ -166,6 +166,7 @@ #define NOTSWAPPED NPY_NOTSWAPPED #define WRITEABLE NPY_WRITEABLE #define UPDATEIFCOPY NPY_UPDATEIFCOPY +#define WRITEBACKIFCOPY NPY_ARRAY_WRITEBACKIFCOPY #define ARR_HAS_DESCR NPY_ARR_HAS_DESCR #define BEHAVED NPY_BEHAVED #define BEHAVED_NS NPY_BEHAVED_NS diff -Nru python-numpy-1.13.3/numpy/core/include/numpy/npy_3kcompat.h python-numpy-1.14.5/numpy/core/include/numpy/npy_3kcompat.h --- python-numpy-1.13.3/numpy/core/include/numpy/npy_3kcompat.h 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/include/numpy/npy_3kcompat.h 2018-06-12 17:35:36.000000000 +0000 @@ -94,6 +94,8 @@ #define PyUString_InternFromString PyUnicode_InternFromString #define PyUString_Format PyUnicode_Format +#define PyBaseString_Check(obj) (PyUnicode_Check(obj)) + #else #define PyBytes_Type PyString_Type @@ -123,6 +125,8 @@ #define PyUString_InternFromString PyString_InternFromString #define PyUString_Format PyString_Format +#define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj)) + #endif /* NPY_PY3K */ diff -Nru python-numpy-1.13.3/numpy/core/include/numpy/npy_cpu.h python-numpy-1.14.5/numpy/core/include/numpy/npy_cpu.h --- python-numpy-1.13.3/numpy/core/include/numpy/npy_cpu.h 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/include/numpy/npy_cpu.h 2018-06-12 18:28:52.000000000 +0000 @@ -15,6 +15,8 @@ * NPY_CPU_ARMEB * NPY_CPU_SH_LE * NPY_CPU_SH_BE + * NPY_CPU_ARCEL + * NPY_CPU_ARCEB */ #ifndef _NPY_CPUARCH_H_ #define _NPY_CPUARCH_H_ @@ -76,6 +78,10 @@ #define NPY_CPU_AARCH64 #elif defined(__mc68000__) #define NPY_CPU_M68K +#elif defined(__arc__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_ARCEL +#elif defined(__arc__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_ARCEB #else #error Unknown CPU, please report this to numpy maintainers with \ information about your platform (OS, CPU and compiler) diff -Nru python-numpy-1.13.3/numpy/core/include/numpy/npy_endian.h python-numpy-1.14.5/numpy/core/include/numpy/npy_endian.h --- python-numpy-1.13.3/numpy/core/include/numpy/npy_endian.h 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/include/numpy/npy_endian.h 2018-06-12 18:28:52.000000000 +0000 @@ -45,7 +45,8 @@ || defined(NPY_CPU_AARCH64) \ || defined(NPY_CPU_SH_LE) \ || defined(NPY_CPU_MIPSEL) \ - || defined(NPY_CPU_PPC64LE) + || defined(NPY_CPU_PPC64LE) \ + || defined(NPY_CPU_ARCEL) #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN #elif defined(NPY_CPU_PPC) \ || defined(NPY_CPU_SPARC) \ @@ -56,7 +57,8 @@ || defined(NPY_CPU_SH_BE) \ || defined(NPY_CPU_MIPSEB) \ || defined(NPY_CPU_OR1K) \ - || defined(NPY_CPU_M68K) + || defined(NPY_CPU_M68K) \ + || defined(NPY_CPU_ARCEB) #define NPY_BYTE_ORDER NPY_BIG_ENDIAN #else #error Unknown CPU: can not set endianness diff -Nru python-numpy-1.13.3/numpy/core/include/numpy/npy_math.h python-numpy-1.14.5/numpy/core/include/numpy/npy_math.h --- python-numpy-1.13.3/numpy/core/include/numpy/npy_math.h 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/include/numpy/npy_math.h 2018-06-12 18:28:52.000000000 +0000 @@ -524,8 +524,14 @@ #define NPY_FPE_UNDERFLOW 4 #define NPY_FPE_INVALID 8 -int npy_get_floatstatus(void); +int npy_clear_floatstatus_barrier(char*); +int npy_get_floatstatus_barrier(char*); +/* + * use caution with these - clang and gcc8.1 are known to reorder calls + * to this form of the function which can defeat the check + */ int npy_clear_floatstatus(void); +int npy_get_floatstatus(void); void npy_set_floatstatus_divbyzero(void); void npy_set_floatstatus_overflow(void); void npy_set_floatstatus_underflow(void); diff -Nru python-numpy-1.13.3/numpy/core/include/numpy/numpyconfig.h python-numpy-1.14.5/numpy/core/include/numpy/numpyconfig.h --- python-numpy-1.13.3/numpy/core/include/numpy/numpyconfig.h 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/include/numpy/numpyconfig.h 2018-06-12 17:31:56.000000000 +0000 @@ -35,5 +35,6 @@ #define NPY_1_11_API_VERSION 0x00000008 #define NPY_1_12_API_VERSION 0x00000008 #define NPY_1_13_API_VERSION 0x00000008 +#define NPY_1_14_API_VERSION 0x00000008 #endif diff -Nru python-numpy-1.13.3/numpy/core/__init__.py python-numpy-1.14.5/numpy/core/__init__.py --- python-numpy-1.13.3/numpy/core/__init__.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/__init__.py 2018-06-12 18:28:52.000000000 +0000 @@ -24,9 +24,9 @@ Original error was: %s """ % (exc,) raise ImportError(msg) - -for envkey in env_added: - del os.environ[envkey] +finally: + for envkey in env_added: + del os.environ[envkey] del envkey del env_added del os @@ -71,7 +71,7 @@ __all__ += einsumfunc.__all__ -from numpy.testing.nosetester import _numpy_tester +from numpy.testing import _numpy_tester test = _numpy_tester().test bench = _numpy_tester().bench diff -Nru python-numpy-1.13.3/numpy/core/_internal.py python-numpy-1.14.5/numpy/core/_internal.py --- python-numpy-1.13.3/numpy/core/_internal.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/_internal.py 2018-06-12 17:35:36.000000000 +0000 @@ -110,6 +110,10 @@ num = field[1] - offset result.append(('', '|V%d' % num)) offset += num + elif field[1] < offset: + raise ValueError( + "dtype.descr is not defined for types with overlapping or " + "out-of-order fields") if len(field) > 3: name = (field[2], field[3]) else: @@ -206,6 +210,8 @@ return self._cls(other) def __eq__(self, other): return self._cls == other._cls + def __ne__(self, other): + return self._cls != other._cls def _getintp_ctype(): val = _getintp_ctype.cache @@ -281,20 +287,26 @@ _as_parameter_ = property(get_as_parameter, None, doc="_as parameter_") -# Given a datatype and an order object -# return a new names tuple -# with the order indicated def _newnames(datatype, order): + """ + Given a datatype and an order object, return a new names tuple, with the + order indicated + """ oldnames = datatype.names nameslist = list(oldnames) if isinstance(order, str): order = [order] + seen = set() if isinstance(order, (list, tuple)): for name in order: try: nameslist.remove(name) except ValueError: - raise ValueError("unknown field name: %s" % (name,)) + if name in seen: + raise ValueError("duplicate field name: %s" % (name,)) + else: + raise ValueError("unknown field name: %s" % (name,)) + seen.add(name) return tuple(list(order) + nameslist) raise ValueError("unsupported order value: %s" % (order,)) diff -Nru python-numpy-1.13.3/numpy/core/numeric.py python-numpy-1.14.5/numpy/core/numeric.py --- python-numpy-1.13.3/numpy/core/numeric.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/numeric.py 2018-06-12 18:28:52.000000000 +0000 @@ -5,6 +5,7 @@ import operator import sys import warnings +import numbers import numpy as np from . import multiarray @@ -22,9 +23,9 @@ from .multiarray import newbuffer, getbuffer from . import umath -from .umath import (invert, sin, UFUNC_BUFSIZE_DEFAULT, ERR_IGNORE, - ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, - ERR_DEFAULT, PINF, NAN) +from .umath import (multiply, invert, sin, UFUNC_BUFSIZE_DEFAULT, + ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, + ERR_LOG, ERR_DEFAULT, PINF, NAN) from . import numerictypes from .numerictypes import longlong, intc, int_, float_, complex_, bool_ from ._internal import TooHardError, AxisError @@ -46,28 +47,23 @@ __all__ = [ 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', - 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', - 'dtype', 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer', - 'where', 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', - 'lexsort', 'set_numeric_ops', 'can_cast', 'promote_types', - 'min_scalar_type', 'result_type', 'asarray', 'asanyarray', - 'ascontiguousarray', 'asfortranarray', 'isfortran', 'empty_like', - 'zeros_like', 'ones_like', 'correlate', 'convolve', 'inner', 'dot', - 'outer', 'vdot', 'roll', - 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'array2string', - 'get_printoptions', 'set_printoptions', 'array_repr', 'array_str', - 'set_string_function', 'little_endian', 'require', 'fromiter', - 'array_equal', 'array_equiv', 'indices', 'fromfunction', 'isclose', 'load', - 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity', - 'allclose', 'compare_chararrays', 'putmask', 'seterr', 'geterr', - 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall', 'errstate', - 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_', - 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', - 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', 'matmul', - 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', - 'TooHardError', 'AxisError' - ] - + 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype', + 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer', 'where', + 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort', + 'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type', + 'result_type', 'asarray', 'asanyarray', 'ascontiguousarray', + 'asfortranarray', 'isfortran', 'empty_like', 'zeros_like', 'ones_like', + 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll', + 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', 'require', + 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction', + 'isclose', 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones', + 'identity', 'allclose', 'compare_chararrays', 'putmask', 'seterr', + 'geterr', 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall', + 'errstate', 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', + 'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', + 'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', + 'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', + 'MAY_SHARE_EXACT', 'TooHardError', 'AxisError' ] if sys.version_info[0] < 3: __all__.extend(['getbuffer', 'newbuffer']) @@ -133,7 +129,7 @@ array([[0, 0, 0], [0, 0, 0]]) - >>> y = np.arange(3, dtype=np.float) + >>> y = np.arange(3, dtype=float) >>> y array([ 0., 1., 2.]) >>> np.zeros_like(y) @@ -176,7 +172,7 @@ >>> np.ones(5) array([ 1., 1., 1., 1., 1.]) - >>> np.ones((5,), dtype=np.int) + >>> np.ones((5,), dtype=int) array([1, 1, 1, 1, 1]) >>> np.ones((2, 1)) @@ -243,7 +239,7 @@ array([[1, 1, 1], [1, 1, 1]]) - >>> y = np.arange(3, dtype=np.float) + >>> y = np.arange(3, dtype=float) >>> y array([ 0., 1., 2.]) >>> np.ones_like(y) @@ -344,7 +340,7 @@ Examples -------- - >>> x = np.arange(6, dtype=np.int) + >>> x = np.arange(6, dtype=int) >>> np.full_like(x, 1) array([1, 1, 1, 1, 1, 1]) >>> np.full_like(x, 0.1) @@ -363,20 +359,6 @@ multiarray.copyto(res, fill_value, casting='unsafe') return res - -def extend_all(module): - adict = {} - for a in __all__: - adict[a] = 1 - try: - mall = getattr(module, '__all__') - except AttributeError: - mall = [k for k in module.__dict__.keys() if not k.startswith('_')] - for a in mall: - if a not in adict: - __all__.append(a) - - def count_nonzero(a, axis=None): """ Counts the number of non-zero values in the array ``a``. @@ -425,39 +407,18 @@ array([2, 3]) """ - if axis is None or axis == (): + if axis is None: return multiarray.count_nonzero(a) a = asanyarray(a) - if a.dtype == bool: - return a.sum(axis=axis, dtype=np.intp) - - if issubdtype(a.dtype, np.number): - return (a != 0).sum(axis=axis, dtype=np.intp) - - if (issubdtype(a.dtype, np.string_) or - issubdtype(a.dtype, np.unicode_)): - nullstr = a.dtype.type('') - return (a != nullstr).sum(axis=axis, dtype=np.intp) - - axis = asarray(normalize_axis_tuple(axis, a.ndim)) - counts = np.apply_along_axis(multiarray.count_nonzero, axis[0], a) - - if axis.size == 1: - return counts.astype(np.intp, copy=False) + # TODO: this works around .astype(bool) not working properly (gh-9847) + if np.issubdtype(a.dtype, np.character): + a_bool = a != a.dtype.type() else: - # for subsequent axis numbers, that number decreases - # by one in this new 'counts' array if it was larger - # than the first axis upon which 'count_nonzero' was - # applied but remains unchanged if that number was - # smaller than that first axis - # - # this trick enables us to perform counts on object-like - # elements across multiple axes very quickly because integer - # addition is very well optimized - return counts.sum(axis=tuple(axis[1:] - ( - axis[1:] > axis[0])), dtype=np.intp) + a_bool = a.astype(np.bool_, copy=False) + + return a_bool.sum(axis=axis, dtype=np.intp) def asarray(a, dtype=None, order=None): @@ -705,6 +666,7 @@ OWNDATA : False WRITEABLE : True ALIGNED : True + WRITEBACKIFCOPY : False UPDATEIFCOPY : False >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) @@ -714,6 +676,7 @@ OWNDATA : True WRITEABLE : True ALIGNED : True + WRITEBACKIFCOPY : False UPDATEIFCOPY : False """ @@ -838,7 +801,7 @@ ``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``. The output of ``argwhere`` is not suitable for indexing arrays. - For this purpose use ``where(a)`` instead. + For this purpose use ``nonzero(a)`` instead. Examples -------- @@ -1106,7 +1069,10 @@ See also -------- - inner, einsum + inner + einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent. + ufunc.outer : A generalization to N dimensions and other operations. + ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent. References ---------- @@ -1227,7 +1193,7 @@ [ True, True], [ True, True], [ True, True], - [ True, True]], dtype=bool) + [ True, True]]) An extended example taking advantage of the overloading of + and \\*: @@ -1278,7 +1244,7 @@ """ try: iter(axes) - except: + except Exception: axes_a = list(range(-axes, 0)) axes_b = list(range(0, axes)) else: @@ -1323,7 +1289,7 @@ N2 = 1 for axis in axes_a: N2 *= as_[axis] - newshape_a = (-1, N2) + newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2) olda = [as_[axis] for axis in notin] notin = [k for k in range(ndb) if k not in axes_b] @@ -1331,7 +1297,7 @@ N2 = 1 for axis in axes_b: N2 *= bs[axis] - newshape_b = (N2, -1) + newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin]))) oldb = [bs[axis] for axis in notin] at = a.transpose(newaxes_a).reshape(newshape_a) @@ -1433,6 +1399,10 @@ """ Roll the specified axis backwards, until it lies in a given position. + This function continues to be supported for backward compatibility, but you + should prefer `moveaxis`. The `moveaxis` function was added in NumPy + 1.11. + Parameters ---------- a : ndarray @@ -1548,7 +1518,7 @@ Other axes remain in their original order. - .. versionadded::1.11.0 + .. versionadded:: 1.11.0 Parameters ---------- @@ -1615,7 +1585,7 @@ # fix hack in scipy which imports this function def _move_axis_to_0(a, axis): - return rollaxis(a, axis, 0) + return moveaxis(a, axis, 0) def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): @@ -1740,8 +1710,8 @@ axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb') # Move working axis to the end of the shape - a = rollaxis(a, axisa, a.ndim) - b = rollaxis(b, axisb, b.ndim) + a = moveaxis(a, axisa, -1) + b = moveaxis(b, axisb, -1) msg = ("incompatible dimensions for cross product\n" "(dimension must be 2 or 3)") if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): @@ -1812,195 +1782,7 @@ multiply(a0, b1, out=cp2) cp2 -= a1 * b0 - # This works because we are moving the last axis - return rollaxis(cp, -1, axisc) - - -# Use numarray's printing function -from .arrayprint import array2string, get_printoptions, set_printoptions - - -_typelessdata = [int_, float_, complex_] -if issubclass(intc, int): - _typelessdata.append(intc) - - -if issubclass(longlong, int): - _typelessdata.append(longlong) - - -def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): - """ - Return the string representation of an array. - - Parameters - ---------- - arr : ndarray - Input array. - max_line_width : int, optional - The maximum number of columns the string should span. Newline - characters split the string appropriately after array elements. - precision : int, optional - Floating point precision. Default is the current printing precision - (usually 8), which can be altered using `set_printoptions`. - suppress_small : bool, optional - Represent very small numbers as zero, default is False. Very small - is defined by `precision`, if the precision is 8 then - numbers smaller than 5e-9 are represented as zero. - - Returns - ------- - string : str - The string representation of an array. - - See Also - -------- - array_str, array2string, set_printoptions - - Examples - -------- - >>> np.array_repr(np.array([1,2])) - 'array([1, 2])' - >>> np.array_repr(np.ma.array([0.])) - 'MaskedArray([ 0.])' - >>> np.array_repr(np.array([], np.int32)) - 'array([], dtype=int32)' - - >>> x = np.array([1e-6, 4e-7, 2, 3]) - >>> np.array_repr(x, precision=6, suppress_small=True) - 'array([ 0.000001, 0. , 2. , 3. ])' - - """ - if type(arr) is not ndarray: - class_name = type(arr).__name__ - else: - class_name = "array" - - if arr.size > 0 or arr.shape == (0,): - lst = array2string(arr, max_line_width, precision, suppress_small, - ', ', class_name + "(") - else: # show zero-length shape unless it is (0,) - lst = "[], shape=%s" % (repr(arr.shape),) - - skipdtype = (arr.dtype.type in _typelessdata) and arr.size > 0 - - if skipdtype: - return "%s(%s)" % (class_name, lst) - else: - typename = arr.dtype.name - # Quote typename in the output if it is "complex". - if typename and not (typename[0].isalpha() and typename.isalnum()): - typename = "'%s'" % typename - - lf = ' ' - if issubclass(arr.dtype.type, flexible): - if arr.dtype.names: - typename = "%s" % str(arr.dtype) - else: - typename = "'%s'" % str(arr.dtype) - lf = '\n'+' '*len(class_name + "(") - return "%s(%s,%sdtype=%s)" % (class_name, lst, lf, typename) - - -def array_str(a, max_line_width=None, precision=None, suppress_small=None): - """ - Return a string representation of the data in an array. - - The data in the array is returned as a single string. This function is - similar to `array_repr`, the difference being that `array_repr` also - returns information on the kind of array and its data type. - - Parameters - ---------- - a : ndarray - Input array. - max_line_width : int, optional - Inserts newlines if text is longer than `max_line_width`. The - default is, indirectly, 75. - precision : int, optional - Floating point precision. Default is the current printing precision - (usually 8), which can be altered using `set_printoptions`. - suppress_small : bool, optional - Represent numbers "very close" to zero as zero; default is False. - Very close is defined by precision: if the precision is 8, e.g., - numbers smaller (in absolute value) than 5e-9 are represented as - zero. - - See Also - -------- - array2string, array_repr, set_printoptions - - Examples - -------- - >>> np.array_str(np.arange(3)) - '[0 1 2]' - - """ - return array2string(a, max_line_width, precision, suppress_small, ' ', "", str) - - -def set_string_function(f, repr=True): - """ - Set a Python function to be used when pretty printing arrays. - - Parameters - ---------- - f : function or None - Function to be used to pretty print arrays. The function should expect - a single array argument and return a string of the representation of - the array. If None, the function is reset to the default NumPy function - to print arrays. - repr : bool, optional - If True (default), the function for pretty printing (``__repr__``) - is set, if False the function that returns the default string - representation (``__str__``) is set. - - See Also - -------- - set_printoptions, get_printoptions - - Examples - -------- - >>> def pprint(arr): - ... return 'HA! - What are you going to do now?' - ... - >>> np.set_string_function(pprint) - >>> a = np.arange(10) - >>> a - HA! - What are you going to do now? - >>> print(a) - [0 1 2 3 4 5 6 7 8 9] - - We can reset the function to the default: - - >>> np.set_string_function(None) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - `repr` affects either pretty printing or normal string representation. - Note that ``__repr__`` is still affected by setting ``__str__`` - because the width of each array element in the returned string becomes - equal to the length of the result of ``__str__()``. - - >>> x = np.arange(4) - >>> np.set_string_function(lambda x:'random', repr=False) - >>> x.__str__() - 'random' - >>> x.__repr__() - 'array([ 0, 1, 2, 3])' - - """ - if f is None: - if repr: - return multiarray.set_string_function(array_repr, 1) - else: - return multiarray.set_string_function(array_str, 0) - else: - return multiarray.set_string_function(f, repr) - - -set_string_function(array_str, 0) -set_string_function(array_repr, 1) + return moveaxis(cp, -1, axisc) little_endian = (sys.byteorder == 'little') @@ -2119,7 +1901,7 @@ >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) array([[ True, False, False], [False, True, False], - [False, False, True]], dtype=bool) + [False, False, True]]) >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) array([[0, 1, 2], @@ -2154,12 +1936,22 @@ False >>> np.isscalar(False) True + >>> np.isscalar('numpy') + True + + NumPy supports PEP 3141 numbers: + + >>> from fractions import Fraction + >>> isscalar(Fraction(5, 17)) + True + >>> from numbers import Number + >>> isscalar(Number()) + True """ - if isinstance(num, generic): - return True - else: - return type(num) in ScalarType + return (isinstance(num, generic) + or type(num) in ScalarType + or isinstance(num, numbers.Number)) def binary_repr(num, width=None): @@ -2429,7 +2221,7 @@ See Also -------- - isclose, all, any + isclose, all, any, equal Notes ----- @@ -2439,9 +2231,14 @@ absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) The above equation is not symmetric in `a` and `b`, so that - `allclose(a, b)` might be different from `allclose(b, a)` in + ``allclose(a, b)`` might be different from ``allclose(b, a)`` in some rare cases. + The comparison of `a` and `b` uses standard broadcasting, which + means that `a` and `b` need not have the same shape in order for + ``allclose(a, b)`` to evaluate to True. The same is true for + `equal` but not `array_equal`. + Examples -------- >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) @@ -2521,13 +2318,10 @@ """ def within_tol(x, y, atol, rtol): with errstate(invalid='ignore'): - result = less_equal(abs(x-y), atol + rtol * abs(y)) - if isscalar(a) and isscalar(b): - result = bool(result) - return result + return less_equal(abs(x-y), atol + rtol * abs(y)) - x = array(a, copy=False, subok=True, ndmin=1) - y = array(b, copy=False, subok=True, ndmin=1) + x = asanyarray(a) + y = asanyarray(b) # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT). # This will cause casting of x later. Also, make sure to allow subclasses @@ -2554,12 +2348,11 @@ if equal_nan: # Make NaN == NaN both_nan = isnan(x) & isnan(y) + + # Needed to treat masked arrays correctly. = True would not work. cond[both_nan] = both_nan[both_nan] - if isscalar(a) and isscalar(b): - return bool(cond) - else: - return cond + return cond[()] # Flatten 0d arrays to scalars def array_equal(a1, a2): @@ -2597,7 +2390,7 @@ """ try: a1, a2 = asarray(a1), asarray(a2) - except: + except Exception: return False if a1.shape != a2.shape: return False @@ -2641,11 +2434,11 @@ """ try: a1, a2 = asarray(a1), asarray(a2) - except: + except Exception: return False try: multiarray.broadcast(a1, a2) - except: + except Exception: return False return bool(asarray(a1 == a2).all()) @@ -3085,10 +2878,26 @@ False_ = bool_(False) True_ = bool_(True) + +def extend_all(module): + adict = {} + for a in __all__: + adict[a] = 1 + try: + mall = getattr(module, '__all__') + except AttributeError: + mall = [k for k in module.__dict__.keys() if not k.startswith('_')] + for a in mall: + if a not in adict: + __all__.append(a) + from .umath import * from .numerictypes import * from . import fromnumeric from .fromnumeric import * +from . import arrayprint +from .arrayprint import * extend_all(fromnumeric) extend_all(umath) extend_all(numerictypes) +extend_all(arrayprint) diff -Nru python-numpy-1.13.3/numpy/core/numerictypes.py python-numpy-1.14.5/numpy/core/numerictypes.py --- python-numpy-1.13.3/numpy/core/numerictypes.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/numerictypes.py 2018-06-12 18:28:52.000000000 +0000 @@ -85,6 +85,7 @@ import types as _types import sys import numbers +import warnings from numpy.compat import bytes, long from numpy.core.multiarray import ( @@ -501,11 +502,11 @@ Examples -------- - >>> np.maximum_sctype(np.int) + >>> np.maximum_sctype(int) >>> np.maximum_sctype(np.uint8) - >>> np.maximum_sctype(np.complex) + >>> np.maximum_sctype(complex) >>> np.maximum_sctype(str) @@ -528,33 +529,6 @@ else: return sctypes[base][-1] -try: - buffer_type = _types.BufferType -except AttributeError: - # Py3K - buffer_type = memoryview - -_python_types = {int: 'int_', - float: 'float_', - complex: 'complex_', - bool: 'bool_', - bytes: 'bytes_', - unicode: 'unicode_', - buffer_type: 'void', - } - -if sys.version_info[0] >= 3: - def _python_type(t): - """returns the type corresponding to a certain Python type""" - if not isinstance(t, type): - t = type(t) - return allTypes[_python_types.get(t, 'object_')] -else: - def _python_type(t): - """returns the type corresponding to a certain Python type""" - if not isinstance(t, _types.TypeType): - t = type(t) - return allTypes[_python_types.get(t, 'object_')] def issctype(rep): """ @@ -597,7 +571,7 @@ if res and res != object_: return True return False - except: + except Exception: return False def obj2sctype(rep, default=None): @@ -639,22 +613,19 @@ """ - try: - if issubclass(rep, generic): - return rep - except TypeError: - pass - if isinstance(rep, dtype): - return rep.type - if isinstance(rep, type): - return _python_type(rep) + # prevent abtract classes being upcast + if isinstance(rep, type) and issubclass(rep, generic): + return rep + # extract dtype from arrays if isinstance(rep, ndarray): return rep.dtype.type + # fall back on dtype to convert try: res = dtype(rep) - except: + except Exception: return default - return res.type + else: + return res.type def issubclass_(arg1, arg2): @@ -684,9 +655,9 @@ Examples -------- - >>> np.issubclass_(np.int32, np.int) + >>> np.issubclass_(np.int32, int) True - >>> np.issubclass_(np.int32, np.float) + >>> np.issubclass_(np.int32, float) False """ @@ -717,9 +688,9 @@ -------- >>> np.issubsctype('S8', str) True - >>> np.issubsctype(np.array([1]), np.int) + >>> np.issubsctype(np.array([1]), int) True - >>> np.issubsctype(np.array([1]), np.float) + >>> np.issubsctype(np.array([1]), float) False """ @@ -745,20 +716,46 @@ Examples -------- - >>> np.issubdtype('S1', str) + >>> np.issubdtype('S1', np.string_) True >>> np.issubdtype(np.float64, np.float32) False """ - if issubclass_(arg2, generic): - return issubclass(dtype(arg1).type, arg2) - mro = dtype(arg2).type.mro() - if len(mro) > 1: - val = mro[1] - else: - val = mro[0] - return issubclass(dtype(arg1).type, val) + if not issubclass_(arg1, generic): + arg1 = dtype(arg1).type + if not issubclass_(arg2, generic): + arg2_orig = arg2 + arg2 = dtype(arg2).type + if not isinstance(arg2_orig, dtype): + # weird deprecated behaviour, that tried to infer np.floating from + # float, and similar less obvious things, such as np.generic from + # basestring + mro = arg2.mro() + arg2 = mro[1] if len(mro) > 1 else mro[0] + + def type_repr(x): + """ Helper to produce clear error messages """ + if not isinstance(x, type): + return repr(x) + elif issubclass(x, generic): + return "np.{}".format(x.__name__) + else: + return x.__name__ + + # 1.14, 2017-08-01 + warnings.warn( + "Conversion of the second argument of issubdtype from `{raw}` " + "to `{abstract}` is deprecated. In future, it will be treated " + "as `{concrete} == np.dtype({raw}).type`.".format( + raw=type_repr(arg2_orig), + abstract=type_repr(arg2), + concrete=type_repr(dtype(arg2_orig).type) + ), + FutureWarning, stacklevel=2 + ) + + return issubclass(arg1, arg2) # This dictionary allows look up based on any alias for an array data-type @@ -821,7 +818,7 @@ Examples -------- - >>> for sctype in [np.int32, np.float, np.complex, np.string_, np.ndarray]: + >>> for sctype in [np.int32, float, complex, np.string_, np.ndarray]: ... print(np.sctype2char(sctype)) l d @@ -958,6 +955,7 @@ numbers.Integral.register(integer) numbers.Complex.register(inexact) numbers.Real.register(floating) + numbers.Number.register(number) _register_types() @@ -986,7 +984,7 @@ Examples -------- - >>> np.find_common_type([], [np.int64, np.float32, np.complex]) + >>> np.find_common_type([], [np.int64, np.float32, complex]) dtype('complex128') >>> np.find_common_type([np.int64, np.float32], []) dtype('float64') @@ -1002,7 +1000,7 @@ Complex is of a different type, so it up-casts the float in the `array_types` argument: - >>> np.find_common_type([np.float32], [np.complex]) + >>> np.find_common_type([np.float32], [complex]) dtype('complex128') Type specifier strings are convertible to dtypes and can therefore diff -Nru python-numpy-1.13.3/numpy/core/records.py python-numpy-1.14.5/numpy/core/records.py --- python-numpy-1.13.3/numpy/core/records.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/records.py 2018-06-12 18:28:52.000000000 +0000 @@ -38,10 +38,12 @@ import sys import os +import warnings from . import numeric as sb from . import numerictypes as nt from numpy.compat import isfileobj, bytes, long +from .arrayprint import get_printoptions # All of the functions allow formats to be a dtype __all__ = ['record', 'recarray', 'format_parser'] @@ -80,7 +82,7 @@ dup.append(list[i]) return dup -class format_parser: +class format_parser(object): """ Class to convert formats, names, titles description to a dtype. @@ -222,10 +224,14 @@ __module__ = 'numpy' def __repr__(self): - return self.__str__() + if get_printoptions()['legacy'] == '1.13': + return self.__str__() + return super(record, self).__repr__() def __str__(self): - return str(self.item()) + if get_printoptions()['legacy'] == '1.13': + return str(self.item()) + return super(record, self).__str__() def __getattribute__(self, attr): if attr in ['setfield', 'getfield', 'dtype']: @@ -473,7 +479,7 @@ newattr = attr not in self.__dict__ try: ret = object.__setattr__(self, attr, val) - except: + except Exception: fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} if attr not in fielddict: exctype, value = sys.exc_info()[:2] @@ -487,7 +493,7 @@ # internal attribute. try: object.__delattr__(self, attr) - except: + except Exception: return ret try: res = fielddict[attr][:2] @@ -525,22 +531,25 @@ if repr_dtype.type is record: repr_dtype = sb.dtype((nt.void, repr_dtype)) prefix = "rec.array(" - fmt = 'rec.array(%s, %sdtype=%s)' + fmt = 'rec.array(%s,%sdtype=%s)' else: # otherwise represent it using np.array plus a view # This should only happen if the user is playing # strange games with dtypes. prefix = "array(" - fmt = 'array(%s, %sdtype=%s).view(numpy.recarray)' + fmt = 'array(%s,%sdtype=%s).view(numpy.recarray)' # get data/shape string. logic taken from numeric.array_repr if self.size > 0 or self.shape == (0,): - lst = sb.array2string(self, separator=', ', prefix=prefix) + lst = sb.array2string( + self, separator=', ', prefix=prefix, suffix=',') else: # show zero-length shape unless it is (0,) lst = "[], shape=%s" % (repr(self.shape),) lf = '\n'+' '*len(prefix) + if get_printoptions()['legacy'] == '1.13': + lf = ' ' + lf # trailing space return fmt % (lst, lf, repr_dtype) def field(self, attr, val=None): @@ -667,26 +676,40 @@ else: descr = format_parser(formats, names, titles, aligned, byteorder)._descr - try: - retval = sb.array(recList, dtype=descr) - except TypeError: # list of lists instead of list of tuples - if (shape is None or shape == 0): - shape = len(recList) - if isinstance(shape, (int, long)): - shape = (shape,) - if len(shape) > 1: - raise ValueError("Can only deal with 1-d array.") - _array = recarray(shape, descr) - for k in range(_array.size): - _array[k] = tuple(recList[k]) - return _array - else: - if shape is not None and retval.shape != shape: - retval.shape = shape + # deprecated back-compat block for numpy 1.14, to be removed in a later + # release. This converts list-of-list input to list-of-tuples in some + # cases, as done in numpy <= 1.13. In the future we will require tuples. + if (isinstance(recList, list) and len(recList) > 0 + and isinstance(recList[0], list) and len(recList[0]) > 0 + and not isinstance(recList[0][0], (list, tuple))): + + try: + memoryview(recList[0][0]) + except: + if (shape is None or shape == 0): + shape = len(recList) + if isinstance(shape, (int, long)): + shape = (shape,) + if len(shape) > 1: + raise ValueError("Can only deal with 1-d array.") + _array = recarray(shape, descr) + for k in range(_array.size): + _array[k] = tuple(recList[k]) + # list of lists instead of list of tuples ? + # 2018-02-07, 1.14.1 + warnings.warn( + "fromrecords expected a list of tuples, may have received a " + "list of lists instead. In the future that will raise an error", + FutureWarning, stacklevel=2) + return _array + else: + pass - res = retval.view(recarray) + retval = sb.array(recList, dtype=descr) + if shape is not None and retval.shape != shape: + retval.shape = shape - return res + return retval.view(recarray) def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, diff -Nru python-numpy-1.13.3/numpy/core/setup_common.py python-numpy-1.14.5/numpy/core/setup_common.py --- python-numpy-1.13.3/numpy/core/setup_common.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/setup_common.py 2018-06-12 18:28:52.000000000 +0000 @@ -39,7 +39,8 @@ # 0x0000000a - 1.11.x # 0x0000000a - 1.12.x # 0x0000000b - 1.13.x -C_API_VERSION = 0x0000000b +# 0x0000000c - 1.14.x +C_API_VERSION = 0x0000000c class MismatchCAPIWarning(Warning): pass @@ -215,6 +216,21 @@ except (AttributeError, ValueError): pass + # Disable multi-file interprocedural optimization in the Intel compiler on Linux + # which generates intermediary object files and prevents checking the + # float representation. + elif (sys.platform != "win32" + and cmd.compiler.compiler_type.startswith('intel') + and '-ipo' in cmd.compiler.cc_exe): + newcompiler = cmd.compiler.cc_exe.replace(' -ipo', '') + cmd.compiler.set_executables( + compiler=newcompiler, + compiler_so=newcompiler, + compiler_cxx=newcompiler, + linker_exe=newcompiler, + linker_so=newcompiler + ' -shared' + ) + # We need to use _compile because we need the object filename src, obj = cmd._compile(body, None, None, 'c') try: diff -Nru python-numpy-1.13.3/numpy/core/setup.py python-numpy-1.14.5/numpy/core/setup.py --- python-numpy-1.13.3/numpy/core/setup.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/setup.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,6 +6,7 @@ import copy import sysconfig import warnings +import platform from os.path import join from numpy.distutils import log from distutils.dep_util import newer @@ -187,7 +188,7 @@ if os.uname()[0] == "Interix": warnings.warn("Disabling broken complex support. See #1365", stacklevel=2) return priv, pub - except: + except Exception: # os.uname not available on all platforms. blanket except ugly but safe pass @@ -685,10 +686,16 @@ join('src', 'npymath', 'npy_math_complex.c.src'), join('src', 'npymath', 'halffloat.c') ] + + # Must be true for CRT compilers but not MinGW/cygwin. See gh-9977. + is_msvc = platform.system() == 'Windows' config.add_installed_library('npymath', sources=npymath_sources + [get_mathlib_info], install_dir='lib', - build_info={'include_dirs' : []}) # empty list required for creating npy_math_internal.h + build_info={ + 'include_dirs' : [], # empty list required for creating npy_math_internal.h + 'extra_compiler_args' : (['/GL-'] if is_msvc else []), + }) config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config", subst_dict) config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config", @@ -728,6 +735,7 @@ join('src', 'multiarray', 'conversion_utils.h'), join('src', 'multiarray', 'ctors.h'), join('src', 'multiarray', 'descriptor.h'), + join('src', 'multiarray', 'dragon4.h'), join('src', 'multiarray', 'getset.h'), join('src', 'multiarray', 'hashdescr.h'), join('src', 'multiarray', 'iterators.h'), @@ -741,6 +749,7 @@ join('src', 'multiarray', 'scalartypes.h'), join('src', 'multiarray', 'sequence.h'), join('src', 'multiarray', 'shape.h'), + join('src', 'multiarray', 'strfuncs.h'), join('src', 'multiarray', 'ucsnarrow.h'), join('src', 'multiarray', 'usertypes.h'), join('src', 'multiarray', 'vdot.h'), @@ -748,6 +757,7 @@ join('src', 'private', 'templ_common.h.src'), join('src', 'private', 'lowlevel_strided_loops.h'), join('src', 'private', 'mem_overlap.h'), + join('src', 'private', 'npy_longdouble.h'), join('src', 'private', 'ufunc_override.h'), join('src', 'private', 'binop_override.h'), join('src', 'private', 'npy_extint128.h'), @@ -792,6 +802,7 @@ join('src', 'multiarray', 'datetime_busday.c'), join('src', 'multiarray', 'datetime_busdaycal.c'), join('src', 'multiarray', 'descriptor.c'), + join('src', 'multiarray', 'dragon4.c'), join('src', 'multiarray', 'dtype_transfer.c'), join('src', 'multiarray', 'einsum.c.src'), join('src', 'multiarray', 'flagsobject.c'), @@ -814,12 +825,14 @@ join('src', 'multiarray', 'shape.c'), join('src', 'multiarray', 'scalarapi.c'), join('src', 'multiarray', 'scalartypes.c.src'), + join('src', 'multiarray', 'strfuncs.c'), join('src', 'multiarray', 'temp_elide.c'), join('src', 'multiarray', 'usertypes.c'), join('src', 'multiarray', 'ucsnarrow.c'), join('src', 'multiarray', 'vdot.c'), join('src', 'private', 'templ_common.h.src'), join('src', 'private', 'mem_overlap.c'), + join('src', 'private', 'npy_longdouble.c'), join('src', 'private', 'ufunc_override.c'), ] @@ -872,10 +885,12 @@ join('src', 'umath', 'loops.h.src'), join('src', 'umath', 'loops.c.src'), join('src', 'umath', 'ufunc_object.c'), + join('src', 'umath', 'extobj.c'), join('src', 'umath', 'scalarmath.c.src'), join('src', 'umath', 'ufunc_type_resolution.c'), join('src', 'umath', 'override.c'), join('src', 'private', 'mem_overlap.c'), + join('src', 'private', 'npy_longdouble.c'), join('src', 'private', 'ufunc_override.c')] umath_deps = [ @@ -889,6 +904,7 @@ join(codegen_dir, 'generate_ufunc_api.py'), join('src', 'private', 'lowlevel_strided_loops.h'), join('src', 'private', 'mem_overlap.h'), + join('src', 'private', 'npy_longdouble.h'), join('src', 'private', 'ufunc_override.h'), join('src', 'private', 'binop_override.h')] + npymath_sources @@ -931,7 +947,8 @@ sources=[join('src', 'multiarray', 'multiarray_tests.c.src'), join('src', 'private', 'mem_overlap.c')], depends=[join('src', 'private', 'mem_overlap.h'), - join('src', 'private', 'npy_extint128.h')]) + join('src', 'private', 'npy_extint128.h')], + libraries=['npymath']) ####################################################################### # operand_flag_tests module # diff -Nru python-numpy-1.13.3/numpy/core/shape_base.py python-numpy-1.14.5/numpy/core/shape_base.py --- python-numpy-1.13.3/numpy/core/shape_base.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/shape_base.py 2018-06-12 17:31:56.000000000 +0000 @@ -183,23 +183,25 @@ """ Stack arrays in sequence vertically (row wise). - Take a sequence of arrays and stack them vertically to make a single - array. Rebuild arrays divided by `vsplit`. - - This function continues to be supported for backward compatibility, but - you should prefer ``np.concatenate`` or ``np.stack``. The ``np.stack`` - function was added in NumPy 1.10. + This is equivalent to concatenation along the first axis after 1-D arrays + of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by + `vsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of ndarrays - Tuple containing arrays to be stacked. The arrays must have the same - shape along all but the first axis. + The arrays must have the same shape along all but the first axis. + 1-D arrays must have the same length. Returns ------- stacked : ndarray - The array formed by stacking the given arrays. + The array formed by stacking the given arrays, will be at least 2-D. See Also -------- @@ -210,11 +212,6 @@ vsplit : Split array into a list of multiple sub-arrays vertically. block : Assemble arrays from blocks. - Notes - ----- - Equivalent to ``np.concatenate(tup, axis=0)`` if `tup` contains arrays that - are at least 2-dimensional. - Examples -------- >>> a = np.array([1, 2, 3]) @@ -240,17 +237,20 @@ """ Stack arrays in sequence horizontally (column wise). - Take a sequence of arrays and stack them horizontally to make - a single array. Rebuild arrays divided by `hsplit`. - - This function continues to be supported for backward compatibility, but - you should prefer ``np.concatenate`` or ``np.stack``. The ``np.stack`` - function was added in NumPy 1.10. + This is equivalent to concatenation along the second axis, except for 1-D + arrays where it concatenates along the first axis. Rebuilds arrays divided + by `hsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of ndarrays - All arrays must have the same shape along all but the second axis. + The arrays must have the same shape along all but the second axis, + except 1-D arrays which can be any length. Returns ------- @@ -266,11 +266,6 @@ hsplit : Split array along second axis. block : Assemble arrays from blocks. - Notes - ----- - Equivalent to ``np.concatenate(tup, axis=1)`` if `tup` contains arrays that - are at least 2-dimensional. - Examples -------- >>> a = np.array((1,2,3)) @@ -293,7 +288,7 @@ return _nx.concatenate(arrs, 1) -def stack(arrays, axis=0): +def stack(arrays, axis=0, out=None): """ Join a sequence of arrays along a new axis. @@ -309,6 +304,10 @@ Each array must have the same shape. axis : int, optional The axis in the result array along which the input arrays are stacked. + out : ndarray, optional + If provided, the destination to place the result. The shape must be + correct, matching that of what stack would have returned if no + out argument were specified. Returns ------- @@ -358,81 +357,102 @@ sl = (slice(None),) * axis + (_nx.newaxis,) expanded_arrays = [arr[sl] for arr in arrays] - return _nx.concatenate(expanded_arrays, axis=axis) + return _nx.concatenate(expanded_arrays, axis=axis, out=out) -class _Recurser(object): +def _block_check_depths_match(arrays, parent_index=[]): """ - Utility class for recursing over nested iterables + Recursive function checking that the depths of nested lists in `arrays` + all match. Mismatch raises a ValueError as described in the block + docstring below. + + The entire index (rather than just the depth) needs to be calculated + for each innermost list, in case an error needs to be raised, so that + the index of the offending list can be printed as part of the error. + + The parameter `parent_index` is the full index of `arrays` within the + nested lists passed to _block_check_depths_match at the top of the + recursion. + The return value is a pair. The first item returned is the full index + of an element (specifically the first element) from the bottom of the + nesting in `arrays`. An empty list at the bottom of the nesting is + represented by a `None` index. + The second item is the maximum of the ndims of the arrays nested in + `arrays`. """ - def __init__(self, recurse_if): - self.recurse_if = recurse_if - - def map_reduce(self, x, f_map=lambda x, **kwargs: x, - f_reduce=lambda x, **kwargs: x, - f_kwargs=lambda **kwargs: kwargs, - **kwargs): - """ - Iterate over the nested list, applying: - * ``f_map`` (T -> U) to items - * ``f_reduce`` (Iterable[U] -> U) to mapped items - - For instance, ``map_reduce([[1, 2], 3, 4])`` is:: - - f_reduce([ - f_reduce([ - f_map(1), - f_map(2) - ]), - f_map(3), - f_map(4) - ]]) - - - State can be passed down through the calls with `f_kwargs`, - to iterables of mapped items. When kwargs are passed, as in - ``map_reduce([[1, 2], 3, 4], **kw)``, this becomes:: - - kw1 = f_kwargs(**kw) - kw2 = f_kwargs(**kw1) - f_reduce([ - f_reduce([ - f_map(1), **kw2) - f_map(2, **kw2) - ], **kw1), - f_map(3, **kw1), - f_map(4, **kw1) - ]], **kw) - """ - def f(x, **kwargs): - if not self.recurse_if(x): - return f_map(x, **kwargs) - else: - next_kwargs = f_kwargs(**kwargs) - return f_reduce(( - f(xi, **next_kwargs) - for xi in x - ), **kwargs) - return f(x, **kwargs) - - def walk(self, x, index=()): - """ - Iterate over x, yielding (index, value, entering), where - - * ``index``: a tuple of indices up to this point - * ``value``: equal to ``x[index[0]][...][index[-1]]``. On the first iteration, is - ``x`` itself - * ``entering``: bool. The result of ``recurse_if(value)`` - """ - do_recurse = self.recurse_if(x) - yield index, x, do_recurse - - if not do_recurse: - return - for i, xi in enumerate(x): - # yield from ... - for v in self.walk(xi, index + (i,)): - yield v + def format_index(index): + idx_str = ''.join('[{}]'.format(i) for i in index if i is not None) + return 'arrays' + idx_str + if type(arrays) is tuple: + # not strictly necessary, but saves us from: + # - more than one way to do things - no point treating tuples like + # lists + # - horribly confusing behaviour that results when tuples are + # treated like ndarray + raise TypeError( + '{} is a tuple. ' + 'Only lists can be used to arrange blocks, and np.block does ' + 'not allow implicit conversion from tuple to ndarray.'.format( + format_index(parent_index) + ) + ) + elif type(arrays) is list and len(arrays) > 0: + idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) + for i, arr in enumerate(arrays)) + + first_index, max_arr_ndim = next(idxs_ndims) + for index, ndim in idxs_ndims: + if ndim > max_arr_ndim: + max_arr_ndim = ndim + if len(index) != len(first_index): + raise ValueError( + "List depths are mismatched. First element was at depth " + "{}, but there is an element at depth {} ({})".format( + len(first_index), + len(index), + format_index(index) + ) + ) + return first_index, max_arr_ndim + elif type(arrays) is list and len(arrays) == 0: + # We've 'bottomed out' on an empty list + return parent_index + [None], 0 + else: + # We've 'bottomed out' - arrays is either a scalar or an array + return parent_index, _nx.ndim(arrays) + + +def _block(arrays, max_depth, result_ndim): + """ + Internal implementation of block. `arrays` is the argument passed to + block. `max_depth` is the depth of nested lists within `arrays` and + `result_ndim` is the greatest of the dimensions of the arrays in + `arrays` and the depth of the lists in `arrays` (see block docstring + for details). + """ + def atleast_nd(a, ndim): + # Ensures `a` has at least `ndim` dimensions by prepending + # ones to `a.shape` as necessary + return array(a, ndmin=ndim, copy=False, subok=True) + + def block_recursion(arrays, depth=0): + if depth < max_depth: + if len(arrays) == 0: + raise ValueError('Lists cannot be empty') + arrs = [block_recursion(arr, depth+1) for arr in arrays] + return _nx.concatenate(arrs, axis=-(max_depth-depth)) + else: + # We've 'bottomed out' - arrays is either a scalar or an array + # type(arrays) is not list + return atleast_nd(arrays, result_ndim) + + try: + return block_recursion(arrays) + finally: + # recursive closures have a cyclic reference to themselves, which + # requires gc to collect (gh-10620). To avoid this problem, for + # performance and PyPy friendliness, we break the cycle: + block_recursion = None def block(arrays): @@ -583,81 +603,6 @@ """ - def atleast_nd(x, ndim): - x = asanyarray(x) - diff = max(ndim - x.ndim, 0) - return x[(None,)*diff + (Ellipsis,)] - - def format_index(index): - return 'arrays' + ''.join('[{}]'.format(i) for i in index) - - rec = _Recurser(recurse_if=lambda x: type(x) is list) - - # ensure that the lists are all matched in depth - list_ndim = None - any_empty = False - for index, value, entering in rec.walk(arrays): - if type(value) is tuple: - # not strictly necessary, but saves us from: - # - more than one way to do things - no point treating tuples like - # lists - # - horribly confusing behaviour that results when tuples are - # treated like ndarray - raise TypeError( - '{} is a tuple. ' - 'Only lists can be used to arrange blocks, and np.block does ' - 'not allow implicit conversion from tuple to ndarray.'.format( - format_index(index) - ) - ) - if not entering: - curr_depth = len(index) - elif len(value) == 0: - curr_depth = len(index) + 1 - any_empty = True - else: - continue - - if list_ndim is not None and list_ndim != curr_depth: - raise ValueError( - "List depths are mismatched. First element was at depth {}, " - "but there is an element at depth {} ({})".format( - list_ndim, - curr_depth, - format_index(index) - ) - ) - list_ndim = curr_depth - - # do this here so we catch depth mismatches first - if any_empty: - raise ValueError('Lists cannot be empty') - - # convert all the arrays to ndarrays - arrays = rec.map_reduce(arrays, - f_map=asanyarray, - f_reduce=list - ) - - # determine the maximum dimension of the elements - elem_ndim = rec.map_reduce(arrays, - f_map=lambda xi: xi.ndim, - f_reduce=max - ) - ndim = max(list_ndim, elem_ndim) - - # first axis to concatenate along - first_axis = ndim - list_ndim - - # Make all the elements the same dimension - arrays = rec.map_reduce(arrays, - f_map=lambda xi: atleast_nd(xi, ndim), - f_reduce=list - ) - - # concatenate innermost lists on the right, outermost on the left - return rec.map_reduce(arrays, - f_reduce=lambda xs, axis: _nx.concatenate(list(xs), axis=axis), - f_kwargs=lambda axis: dict(axis=axis+1), - axis=first_axis - ) + bottom_index, arr_ndim = _block_check_depths_match(arrays) + list_ndim = len(bottom_index) + return _block(arrays, list_ndim, max(arr_ndim, list_ndim)) diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/alloc.c python-numpy-1.14.5/numpy/core/src/multiarray/alloc.c --- python-numpy-1.13.3/numpy/core/src/multiarray/alloc.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/alloc.c 2018-06-12 18:28:52.000000000 +0000 @@ -126,8 +126,11 @@ NPY_NO_EXPORT void * npy_alloc_cache_dim(npy_uintp sz) { - /* dims + strides */ - if (NPY_UNLIKELY(sz < 2)) { + /* + * make sure any temporary allocation can be used for array metadata which + * uses one memory block for both dimensions and strides + */ + if (sz < 2) { sz = 2; } return _npy_alloc_cache(sz, sizeof(npy_intp), NBUCKETS_DIM, dimcache, @@ -137,8 +140,8 @@ NPY_NO_EXPORT void npy_free_cache_dim(void * p, npy_uintp sz) { - /* dims + strides */ - if (NPY_UNLIKELY(sz < 2)) { + /* see npy_alloc_cache_dim */ + if (sz < 2) { sz = 2; } _npy_free_cache(p, sz, NBUCKETS_DIM, dimcache, diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/alloc.h python-numpy-1.14.5/numpy/core/src/multiarray/alloc.h --- python-numpy-1.13.3/numpy/core/src/multiarray/alloc.h 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/alloc.h 2018-06-12 17:31:56.000000000 +0000 @@ -21,4 +21,16 @@ NPY_NO_EXPORT void npy_free_cache_dim(void * p, npy_uintp sd); +static NPY_INLINE void +npy_free_cache_dim_obj(PyArray_Dims dims) +{ + npy_free_cache_dim(dims.ptr, dims.len); +} + +static NPY_INLINE void +npy_free_cache_dim_array(PyArrayObject * arr) +{ + npy_free_cache_dim(PyArray_DIMS(arr), PyArray_NDIM(arr)); +} + #endif diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/array_assign_array.c python-numpy-1.14.5/numpy/core/src/multiarray/array_assign_array.c --- python-numpy-1.13.3/numpy/core/src/multiarray/array_assign_array.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/array_assign_array.c 2018-06-12 17:31:56.000000000 +0000 @@ -293,7 +293,8 @@ if (((PyArray_NDIM(dst) == 1 && PyArray_NDIM(src) >= 1 && PyArray_STRIDES(dst)[0] * PyArray_STRIDES(src)[PyArray_NDIM(src) - 1] < 0) || - PyArray_NDIM(dst) > 1) && arrays_overlap(src, dst)) { + PyArray_NDIM(dst) > 1 || PyArray_HASFIELDS(dst)) && + arrays_overlap(src, dst)) { PyArrayObject *tmp; /* @@ -345,6 +346,21 @@ } } + /* optimization: scalar boolean mask */ + if (wheremask != NULL && + PyArray_NDIM(wheremask) == 0 && + PyArray_DESCR(wheremask)->type_num == NPY_BOOL) { + npy_bool value = *(npy_bool *)PyArray_DATA(wheremask); + if (value) { + /* where=True is the same as no where at all */ + wheremask = NULL; + } + else { + /* where=False copies nothing */ + return 0; + } + } + if (wheremask == NULL) { /* A straightforward value assignment */ /* Do the assignment with raw array iteration */ diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/arrayobject.c python-numpy-1.14.5/numpy/core/src/multiarray/arrayobject.c --- python-numpy-1.13.3/numpy/core/src/multiarray/arrayobject.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/arrayobject.c 2018-06-12 18:28:52.000000000 +0000 @@ -53,6 +53,7 @@ #include "alloc.h" #include "mem_overlap.h" #include "numpyos.h" +#include "strfuncs.h" #include "binop_override.h" @@ -75,7 +76,7 @@ * Precondition: 'arr' is a copy of 'base' (though possibly with different * strides, ordering, etc.). This function sets the UPDATEIFCOPY flag and the * ->base pointer on 'arr', so that when 'arr' is destructed, it will copy any - * changes back to 'base'. + * changes back to 'base'. DEPRECATED, use PyArray_SetWritebackIfCopyBase * * Steals a reference to 'base'. * @@ -84,17 +85,59 @@ NPY_NO_EXPORT int PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base) { + int ret; +#ifdef PYPY_VERSION + #ifndef DEPRECATE_UPDATEIFCOPY + #define DEPRECATE_UPDATEIFCOPY + #endif +#endif + +#ifdef DEPRECATE_UPDATEIFCOPY + /* TODO: enable this once a solution for UPDATEIFCOPY + * and nditer are resolved, also pending the fix for GH7054 + */ + /* 2017-Nov-10 1.14 */ + if (DEPRECATE("PyArray_SetUpdateIfCopyBase is deprecated, use " + "PyArray_SetWritebackIfCopyBase instead, and be sure to call " + "PyArray_ResolveWritebackIfCopy before the array is deallocated, " + "i.e. before the last call to Py_DECREF. If cleaning up from an " + "error, PyArray_DiscardWritebackIfCopy may be called instead to " + "throw away the scratch buffer.") < 0) + return -1; +#endif + ret = PyArray_SetWritebackIfCopyBase(arr, base); + if (ret >=0) { + PyArray_ENABLEFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY); + PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); + } + return ret; +} + +/*NUMPY_API + * + * Precondition: 'arr' is a copy of 'base' (though possibly with different + * strides, ordering, etc.). This function sets the WRITEBACKIFCOPY flag and the + * ->base pointer on 'arr', call PyArray_ResolveWritebackIfCopy to copy any + * changes back to 'base' before deallocating the array. + * + * Steals a reference to 'base'. + * + * Returns 0 on success, -1 on failure. + */ +NPY_NO_EXPORT int +PyArray_SetWritebackIfCopyBase(PyArrayObject *arr, PyArrayObject *base) +{ if (base == NULL) { PyErr_SetString(PyExc_ValueError, - "Cannot UPDATEIFCOPY to NULL array"); + "Cannot WRITEBACKIFCOPY to NULL array"); return -1; } if (PyArray_BASE(arr) != NULL) { PyErr_SetString(PyExc_ValueError, - "Cannot set array with existing base to UPDATEIFCOPY"); + "Cannot set array with existing base to WRITEBACKIFCOPY"); goto fail; } - if (PyArray_FailUnlessWriteable(base, "UPDATEIFCOPY base") < 0) { + if (PyArray_FailUnlessWriteable(base, "WRITEBACKIFCOPY base") < 0) { goto fail; } @@ -111,7 +154,7 @@ * references. */ ((PyArrayObject_fields *)arr)->base = (PyObject *)base; - PyArray_ENABLEFLAGS(arr, NPY_ARRAY_UPDATEIFCOPY); + PyArray_ENABLEFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); PyArray_CLEARFLAGS(base, NPY_ARRAY_WRITEABLE); return 0; @@ -279,8 +322,7 @@ if (PyArray_SIZE(dest) == 1) { Py_DECREF(dtype); Py_DECREF(src_object); - ret = PyArray_DESCR(dest)->f->setitem(src_object, - PyArray_DATA(dest), dest); + ret = PyArray_SETITEM(dest, PyArray_DATA(dest), src_object); return ret; } else { @@ -291,8 +333,7 @@ Py_DECREF(src_object); return -1; } - if (PyArray_DESCR(src)->f->setitem(src_object, - PyArray_DATA(src), src) < 0) { + if (PyArray_SETITEM(src, PyArray_DATA(src), src_object) < 0) { Py_DECREF(src_object); Py_DECREF(src); return -1; @@ -371,6 +412,45 @@ return NPY_NOTYPE; } +/*NUMPY_API + * + * If WRITEBACKIFCOPY and self has data, reset the base WRITEABLE flag, + * copy the local data to base, release the local data, and set flags + * appropriately. Return 0 if not relevant, 1 if success, < 0 on failure + */ +NPY_NO_EXPORT int +PyArray_ResolveWritebackIfCopy(PyArrayObject * self) +{ + PyArrayObject_fields *fa = (PyArrayObject_fields *)self; + if (fa && fa->base) { + if ((fa->flags & NPY_ARRAY_UPDATEIFCOPY) || (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY)) { + /* + * UPDATEIFCOPY or WRITEBACKIFCOPY means that fa->base's data + * should be updated with the contents + * of self. + * fa->base->flags is not WRITEABLE to protect the relationship + * unlock it. + */ + int retval = 0; + PyArray_ENABLEFLAGS(((PyArrayObject *)fa->base), + NPY_ARRAY_WRITEABLE); + PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY); + PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY); + retval = PyArray_CopyAnyInto((PyArrayObject *)fa->base, self); + Py_DECREF(fa->base); + fa->base = NULL; + if (retval < 0) { + /* this should never happen, how did the two copies of data + * get out of sync? + */ + return retval; + } + return 1; + } + } + return 0; +} + /*********************** end C-API functions **********************/ /* array object functions */ @@ -386,32 +466,45 @@ PyObject_ClearWeakRefs((PyObject *)self); } if (fa->base) { - /* - * UPDATEIFCOPY means that base points to an - * array that should be updated with the contents - * of this array upon destruction. - * fa->base->flags must have been WRITEABLE - * (checked previously) and it was locked here - * thus, unlock it. - */ - if (fa->flags & NPY_ARRAY_UPDATEIFCOPY) { - PyArray_ENABLEFLAGS(((PyArrayObject *)fa->base), - NPY_ARRAY_WRITEABLE); - Py_INCREF(self); /* hold on to self in next call */ - if (PyArray_CopyAnyInto((PyArrayObject *)fa->base, self) < 0) { + int retval; + if (PyArray_FLAGS(self) & NPY_ARRAY_WRITEBACKIFCOPY) + { + char * msg = "WRITEBACKIFCOPY requires a call to " + "PyArray_ResolveWritebackIfCopy or " + "PyArray_DiscardWritebackIfCopy before array_dealloc is " + "called."; + /* 2017-Nov-10 1.14 */ + if (DEPRECATE(msg) < 0) { + /* dealloc cannot raise an error, best effort try to write + to stderr and clear the error + */ + PyErr_WriteUnraisable((PyObject *)&PyArray_Type); + } + retval = PyArray_ResolveWritebackIfCopy(self); + if (retval < 0) + { + PyErr_Print(); + PyErr_Clear(); + } + } + if (PyArray_FLAGS(self) & NPY_ARRAY_UPDATEIFCOPY) { + /* DEPRECATED, remove once the flag is removed */ + Py_INCREF(self); /* hold on to self in next call since if + * refcount == 0 it will recurse back into + *array_dealloc + */ + retval = PyArray_ResolveWritebackIfCopy(self); + if (retval < 0) + { PyErr_Print(); PyErr_Clear(); } - /* - * Don't need to DECREF -- because we are deleting - *self already... - */ } /* * In any case base is pointing to something that we need * to DECREF -- either a view or a buffer object */ - Py_DECREF(fa->base); + Py_XDECREF(fa->base); } if ((fa->flags & NPY_ARRAY_OWNDATA) && fa->data) { @@ -433,93 +526,6 @@ Py_TYPE(self)->tp_free((PyObject *)self); } -/* - * Extend string. On failure, returns NULL and leaves *strp alone. - * XXX we do this in multiple places; time for a string library? - */ -static char * -extend(char **strp, Py_ssize_t n, Py_ssize_t *maxp) -{ - char *str = *strp; - Py_ssize_t new_cap; - - if (n >= *maxp - 16) { - new_cap = *maxp * 2; - - if (new_cap <= *maxp) { /* overflow */ - return NULL; - } - str = PyArray_realloc(*strp, new_cap); - if (str != NULL) { - *strp = str; - *maxp = new_cap; - } - } - return str; -} - -static int -dump_data(char **string, Py_ssize_t *n, Py_ssize_t *max_n, char *data, int nd, - npy_intp *dimensions, npy_intp *strides, PyArrayObject* self) -{ - PyArray_Descr *descr=PyArray_DESCR(self); - PyObject *op = NULL, *sp = NULL; - char *ostring; - npy_intp i, N, ret = 0; - -#define CHECK_MEMORY do { \ - if (extend(string, *n, max_n) == NULL) { \ - ret = -1; \ - goto end; \ - } \ - } while (0) - - if (nd == 0) { - if ((op = descr->f->getitem(data, self)) == NULL) { - return -1; - } - sp = PyObject_Repr(op); - if (sp == NULL) { - ret = -1; - goto end; - } - ostring = PyString_AsString(sp); - N = PyString_Size(sp)*sizeof(char); - *n += N; - CHECK_MEMORY; - memmove(*string + (*n - N), ostring, N); - } - else { - CHECK_MEMORY; - (*string)[*n] = '['; - *n += 1; - for (i = 0; i < dimensions[0]; i++) { - if (dump_data(string, n, max_n, - data + (*strides)*i, - nd - 1, dimensions + 1, - strides + 1, self) < 0) { - return -1; - } - CHECK_MEMORY; - if (i < dimensions[0] - 1) { - (*string)[*n] = ','; - (*string)[*n+1] = ' '; - *n += 2; - } - } - CHECK_MEMORY; - (*string)[*n] = ']'; - *n += 1; - } - -#undef CHECK_MEMORY - -end: - Py_XDECREF(op); - Py_XDECREF(sp); - return ret; -} - /*NUMPY_API * Prints the raw data of the ndarray in a form useful for debugging * low-level C issues. @@ -570,6 +576,8 @@ printf(" NPY_WRITEABLE"); if (fobj->flags & NPY_ARRAY_UPDATEIFCOPY) printf(" NPY_UPDATEIFCOPY"); + if (fobj->flags & NPY_ARRAY_WRITEBACKIFCOPY) + printf(" NPY_WRITEBACKIFCOPY"); printf("\n"); if (fobj->base != NULL && PyArray_Check(fobj->base)) { @@ -582,72 +590,6 @@ fflush(stdout); } -static PyObject * -array_repr_builtin(PyArrayObject *self, int repr) -{ - PyObject *ret; - char *string; - /* max_n initial value is arbitrary, dump_data will extend it */ - Py_ssize_t n = 0, max_n = PyArray_NBYTES(self) * 4 + 7; - - if ((string = PyArray_malloc(max_n)) == NULL) { - return PyErr_NoMemory(); - } - - if (dump_data(&string, &n, &max_n, PyArray_DATA(self), - PyArray_NDIM(self), PyArray_DIMS(self), - PyArray_STRIDES(self), self) < 0) { - PyArray_free(string); - return NULL; - } - - if (repr) { - if (PyArray_ISEXTENDED(self)) { - ret = PyUString_FromFormat("array(%s, '%c%d')", - string, - PyArray_DESCR(self)->type, - PyArray_DESCR(self)->elsize); - } - else { - ret = PyUString_FromFormat("array(%s, '%c')", - string, - PyArray_DESCR(self)->type); - } - } - else { - ret = PyUString_FromStringAndSize(string, n); - } - - PyArray_free(string); - return ret; -} - -static PyObject *PyArray_StrFunction = NULL; -static PyObject *PyArray_ReprFunction = NULL; - -/*NUMPY_API - * Set the array print function to be a Python function. - */ -NPY_NO_EXPORT void -PyArray_SetStringFunction(PyObject *op, int repr) -{ - if (repr) { - /* Dispose of previous callback */ - Py_XDECREF(PyArray_ReprFunction); - /* Add a reference to new callback */ - Py_XINCREF(op); - /* Remember new callback */ - PyArray_ReprFunction = op; - } - else { - /* Dispose of previous callback */ - Py_XDECREF(PyArray_StrFunction); - /* Add a reference to new callback */ - Py_XINCREF(op); - /* Remember new callback */ - PyArray_StrFunction = op; - } -} /*NUMPY_API * This function is scheduled to be removed @@ -659,41 +601,6 @@ { } - -static PyObject * -array_repr(PyArrayObject *self) -{ - PyObject *s, *arglist; - - if (PyArray_ReprFunction == NULL) { - s = array_repr_builtin(self, 1); - } - else { - arglist = Py_BuildValue("(O)", self); - s = PyEval_CallObject(PyArray_ReprFunction, arglist); - Py_DECREF(arglist); - } - return s; -} - -static PyObject * -array_str(PyArrayObject *self) -{ - PyObject *s, *arglist; - - if (PyArray_StrFunction == NULL) { - s = array_repr_builtin(self, 0); - } - else { - arglist = Py_BuildValue("(O)", self); - s = PyEval_CallObject(PyArray_StrFunction, arglist); - Py_DECREF(arglist); - } - return s; -} - - - /*NUMPY_API */ NPY_NO_EXPORT int @@ -1703,14 +1610,14 @@ } } - PyDimMem_FREE(dims.ptr); - PyDimMem_FREE(strides.ptr); + npy_free_cache_dim_obj(dims); + npy_free_cache_dim_obj(strides); return (PyObject *)ret; fail: Py_XDECREF(descr); - PyDimMem_FREE(dims.ptr); - PyDimMem_FREE(strides.ptr); + npy_free_cache_dim_obj(dims); + npy_free_cache_dim_obj(strides); return NULL; } diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/arraytypes.c.src python-numpy-1.14.5/numpy/core/src/multiarray/arraytypes.c.src --- python-numpy-1.13.3/numpy/core/src/multiarray/arraytypes.c.src 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/arraytypes.c.src 2018-06-12 18:28:52.000000000 +0000 @@ -36,6 +36,16 @@ #include #include +/* check for sequences, but ignore the types numpy considers scalars */ +static NPY_INLINE npy_bool +PySequence_NoString_Check(PyObject *op) { + return + PySequence_Check(op) && + !PyString_Check(op) && + !PyUnicode_Check(op) && + !PyArray_IsZeroDim(op); +} + /* ***************************************************************************** ** PYTHON TYPES TO C TYPES ** @@ -171,6 +181,15 @@ ***************************************************************************** */ +#define _ALIGN(type) offsetof(struct {char c; type v;}, v) +/* + * Disable harmless compiler warning "4116: unnamed type definition in + * parentheses" which is caused by the _ALIGN macro. + */ +#if defined(_MSC_VER) +#pragma warning(disable:4116) +#endif + /**begin repeat * @@ -203,7 +222,7 @@ return @func1@((@type1@)t1); } else { - PyArray_DESCR(ap)->f->copyswap(&t1, ip, !PyArray_ISNOTSWAPPED(ap), ap); + PyArray_DESCR(ap)->f->copyswap(&t1, ip, PyArray_ISBYTESWAPPED(ap), ap); return @func1@((@type1@)t1); } } @@ -223,8 +242,7 @@ if (PyErr_Occurred()) { PyObject *type, *value, *traceback; PyErr_Fetch(&type, &value, &traceback); - if (PySequence_Check(op) && !PyString_Check(op) && - !PyUnicode_Check(op)) { + if (PySequence_NoString_Check(op)) { PyErr_SetString(PyExc_ValueError, "setting an array element with a sequence."); Py_DECREF(type); @@ -236,10 +254,13 @@ } return -1; } - if (ap == NULL || PyArray_ISBEHAVED(ap)) + if (ap == NULL || PyArray_ISBEHAVED(ap)) { + assert(npy_is_aligned(ov, _ALIGN(@type@))); *((@type@ *)ov)=temp; + } else { - PyArray_DESCR(ap)->f->copyswap(ov, &temp, !PyArray_ISNOTSWAPPED(ap), ap); + PyArray_DESCR(ap)->f->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap), + ap); } return 0; } @@ -265,7 +286,7 @@ else { int size = sizeof(@type@); - npy_bool swap = !PyArray_ISNOTSWAPPED(ap); + npy_bool swap = PyArray_ISBYTESWAPPED(ap); copy_and_swap(&t1, ip, size, 1, 0, swap); copy_and_swap(&t2, ip + size, size, 1, 0, swap); return PyComplex_FromDoubles((double)t1, (double)t2); @@ -288,48 +309,37 @@ { PyArrayObject *ap = vap; Py_complex oop; - PyObject *op2; @type@ temp; int rsize; + if (PyArray_IsZeroDim(op)) { + return convert_to_scalar_and_retry(op, ov, vap, @NAME@_setitem); + } + if (PyArray_IsScalar(op, @kind@)){ temp = ((Py@kind@ScalarObject *)op)->obval; } else { - if (PyArray_IsZeroDim(op)) { - /* - * TODO: Elsewhere in this file we use PyArray_ToScalar. Is this - * better or worse? Possibly an optimization. - */ - op2 = PyArray_DESCR((PyArrayObject *)op)->f->getitem( - PyArray_BYTES((PyArrayObject *)op), - (PyArrayObject *)op); - } - else { - op2 = op; - Py_INCREF(op); - } - if (op2 == Py_None) { + if (op == Py_None) { oop.real = NPY_NAN; oop.imag = NPY_NAN; } else { - oop = PyComplex_AsCComplex (op2); - } - Py_DECREF(op2); - if (PyErr_Occurred()) { - return -1; + oop = PyComplex_AsCComplex (op); + if (PyErr_Occurred()) { + return -1; + } } temp.real = (@ftype@) oop.real; temp.imag = (@ftype@) oop.imag; } memcpy(ov, &temp, PyArray_DESCR(ap)->elsize); - if (!PyArray_ISNOTSWAPPED(ap)) { + if (PyArray_ISBYTESWAPPED(ap)) { byte_swap_vector(ov, 2, sizeof(@ftype@)); } rsize = sizeof(@ftype@); - copy_and_swap(ov, &temp, rsize, 2, rsize, !PyArray_ISNOTSWAPPED(ap)); + copy_and_swap(ov, &temp, rsize, 2, rsize, PyArray_ISBYTESWAPPED(ap)); return 0; } @@ -357,12 +367,13 @@ if (s) { errno = 0; temp = NumPyOS_ascii_strtold(s, &end); - if (end==s || *end) { - PyErr_Format(PyExc_ValueError, - "invalid literal for long double: %s", - s); - Py_XDECREF(b); - return 0; + if (errno == ERANGE) { + if (PyErr_Warn(PyExc_RuntimeWarning, + "overflow encountered in conversion from string") < 0) { + Py_XDECREF(b); + return 0; + } + /* strtold returns INFINITY of the correct sign. */ } else if (errno) { PyErr_Format(PyExc_ValueError, @@ -372,6 +383,15 @@ Py_XDECREF(b); return 0; } + + /* Extra characters at the end of the string, or nothing parsed */ + if (end == s || *end) { + PyErr_Format(PyExc_ValueError, + "invalid literal for long double: %s", + s); + Py_XDECREF(b); + return 0; + } Py_XDECREF(b); } else { @@ -422,7 +442,7 @@ } else { copy_and_swap(ov, &temp, PyArray_DESCR(ap)->elsize, 1, 0, - !PyArray_ISNOTSWAPPED(ap)); + PyArray_ISBYTESWAPPED(ap)); } return 0; } @@ -439,7 +459,7 @@ { PyArrayObject *ap = vap; Py_ssize_t size = PyArray_ITEMSIZE(ap); - int swap = !PyArray_ISNOTSWAPPED(ap); + int swap = PyArray_ISBYTESWAPPED(ap); int align = !PyArray_ISALIGNED(ap); return (PyObject *)PyUnicode_FromUCS4(ip, size, swap, align); @@ -460,7 +480,7 @@ return convert_to_scalar_and_retry(op, ov, vap, UNICODE_setitem); } - if (!PyBytes_Check(op) && !PyUnicode_Check(op) && PySequence_Check(op)) { + if (PySequence_NoString_Check(op)) { PyErr_SetString(PyExc_ValueError, "setting an array element with a sequence"); return -1; @@ -512,7 +532,7 @@ if (PyArray_DESCR(ap)->elsize > datalen) { memset((char*)ov + datalen, 0, (PyArray_DESCR(ap)->elsize - datalen)); } - if (!PyArray_ISNOTSWAPPED(ap)) { + if (PyArray_ISBYTESWAPPED(ap)) { byte_swap_vector(ov, PyArray_DESCR(ap)->elsize >> 2, 4); } Py_DECREF(temp); @@ -551,7 +571,7 @@ return convert_to_scalar_and_retry(op, ov, vap, STRING_setitem); } - if (!PyBytes_Check(op) && !PyUnicode_Check(op) && PySequence_Check(op)) { + if (PySequence_NoString_Check(op)) { PyErr_SetString(PyExc_ValueError, "setting an array element with a sequence"); return -1; @@ -686,7 +706,7 @@ else { PyArray_ENABLEFLAGS(ap, NPY_ARRAY_ALIGNED); } - PyTuple_SET_ITEM(ret, i, new->f->getitem(ip+offset, ap)); + PyTuple_SET_ITEM(ret, i, PyArray_GETITEM(ap, ip+offset)); ((PyArrayObject_fields *)ap)->flags = savedflags; } ((PyArrayObject_fields *)ap)->descr = descr; @@ -699,7 +719,7 @@ PyArrayObject *ret; if (!(PyArray_IntpConverter(descr->subarray->shape, &shape))) { - PyDimMem_FREE(shape.ptr); + npy_free_cache_dim_obj(shape); PyErr_SetString(PyExc_ValueError, "invalid shape in fixed-type tuple."); return NULL; @@ -708,7 +728,7 @@ ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, descr->subarray->base, shape.len, shape.ptr, NULL, ip, PyArray_FLAGS(ap)&(~NPY_ARRAY_F_CONTIGUOUS), NULL); - PyDimMem_FREE(shape.ptr); + npy_free_cache_dim_obj(shape); if (!ret) { return NULL; } @@ -721,6 +741,21 @@ return (PyObject *)ret; } + /* 2017-11-26, 1.14 */ + if (DEPRECATE_FUTUREWARNING( + "the `.item()` method of unstructured void types will return an " + "immutable `bytes` object in the near future, the same as " + "returned by `bytes(void_obj)`, instead of the mutable memoryview " + "or integer array returned in numpy 1.13.") < 0) { + return NULL; + } + /* + * In the future all the code below will be replaced by + * + * For unstructured void types like V4, return a bytes object (copy). + * return PyBytes_FromStringAndSize(PyArray_DATA(ap), descr->elsize); + */ + if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { PyErr_SetString(PyExc_ValueError, @@ -773,71 +808,179 @@ NPY_NO_EXPORT int PyArray_CopyObject(PyArrayObject *, PyObject *); +/* Given a structured PyArrayObject arr, index i and structured datatype descr, + * modify the dtype of arr to contain a single field corresponding to the ith + * field of descr, recompute the alignment flag, and return the offset of the + * field (in offset_p). This is useful in preparation for calling copyswap on + * individual fields of a numpy structure, in VOID_setitem. Compare to inner + * loops in VOID_getitem and VOID_nonzero. + * + * WARNING: Clobbers arr's dtype and alignment flag. + */ +NPY_NO_EXPORT int +_setup_field(int i, PyArray_Descr *descr, PyArrayObject *arr, + npy_intp *offset_p, char *dstdata) +{ + PyObject *key; + PyObject *tup; + PyArray_Descr *new; + npy_intp offset; + + key = PyTuple_GET_ITEM(descr->names, i); + tup = PyDict_GetItem(descr->fields, key); + if (_unpack_field(tup, &new, &offset) < 0) { + return -1; + } + + ((PyArrayObject_fields *)(arr))->descr = new; + if ((new->alignment > 1) && + ((((uintptr_t)dstdata + offset) % new->alignment) != 0)) { + PyArray_CLEARFLAGS(arr, NPY_ARRAY_ALIGNED); + } + else { + PyArray_ENABLEFLAGS(arr, NPY_ARRAY_ALIGNED); + } + + *offset_p = offset; + return 0; +} + +/* Helper function for VOID_setitem, which uses the copyswap or casting code to + * copy structured datatypes between numpy arrays or scalars. + */ +static int +_copy_and_return_void_setitem(PyArray_Descr *dstdescr, char *dstdata, + PyArray_Descr *srcdescr, char *srcdata){ + PyArrayObject_fields dummy_struct; + PyArrayObject *dummy = (PyArrayObject *)&dummy_struct; + npy_int names_size = PyTuple_GET_SIZE(dstdescr->names); + npy_intp offset; + npy_int i; + int ret; + + /* Fast path if dtypes are equal */ + if (PyArray_EquivTypes(srcdescr, dstdescr)) { + for (i = 0; i < names_size; i++) { + /* neither line can ever fail, in principle */ + if (_setup_field(i, dstdescr, dummy, &offset, dstdata)) { + return -1; + } + PyArray_DESCR(dummy)->f->copyswap(dstdata + offset, + srcdata + offset, 0, dummy); + } + return 0; + } + + /* Slow path */ + ret = PyArray_CastRawArrays(1, srcdata, dstdata, 0, 0, + srcdescr, dstdescr, 0); + if (ret != NPY_SUCCEED) { + return -1; + } + return 0; +} + static int VOID_setitem(PyObject *op, void *input, void *vap) { char *ip = input; PyArrayObject *ap = vap; PyArray_Descr *descr; + int flags; int itemsize=PyArray_DESCR(ap)->elsize; int res; descr = PyArray_DESCR(ap); - if (descr->names && PyTuple_Check(op)) { - PyObject *key; - PyObject *names; - int i, n; - PyObject *tup; - int savedflags; + flags = PyArray_FLAGS(ap); + if (PyDataType_HASFIELDS(descr)) { + PyObject *errmsg; + npy_int i; + npy_intp offset; + int failed = 0; - res = 0; - /* get the names from the fields dictionary*/ - names = descr->names; - n = PyTuple_GET_SIZE(names); - if (PyTuple_GET_SIZE(op) != n) { - PyErr_SetString(PyExc_ValueError, - "size of tuple must match number of fields."); - return -1; - } - savedflags = PyArray_FLAGS(ap); - for (i = 0; i < n; i++) { - PyArray_Descr *new; - npy_intp offset; - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(descr->fields, key); - if (_unpack_field(tup, &new, &offset) < 0) { - ((PyArrayObject_fields *)ap)->descr = descr; + /* If op is 0d-ndarray or numpy scalar, directly get dtype & data ptr */ + if (PyArray_Check(op)) { + PyArrayObject *oparr = (PyArrayObject *)op; + if (PyArray_SIZE(oparr) != 1) { + PyErr_SetString(PyExc_ValueError, + "setting an array element with a sequence."); return -1; } - /* - * TODO: temporarily modifying the array like this - * is bad coding style, should be changed. - */ - ((PyArrayObject_fields *)ap)->descr = new; - /* remember to update alignment flags */ - if ((new->alignment > 1) - && ((((npy_intp)(ip+offset)) % new->alignment) != 0)) { - PyArray_CLEARFLAGS(ap, NPY_ARRAY_ALIGNED); + return _copy_and_return_void_setitem(descr, ip, + PyArray_DESCR(oparr), PyArray_DATA(oparr)); + } + else if (PyArray_IsScalar(op, Void)) { + PyArray_Descr *srcdescr = ((PyVoidScalarObject *)op)->descr; + char *srcdata = ((PyVoidScalarObject *)op)->obval; + return _copy_and_return_void_setitem(descr, ip, srcdescr, srcdata); + } + else if (PyTuple_Check(op)) { + /* if it's a tuple, copy field-by-field to ap, */ + npy_intp names_size = PyTuple_GET_SIZE(descr->names); + + if (names_size != PyTuple_Size(op)) { + errmsg = PyUString_FromFormat( + "could not assign tuple of length %zd to structure " + "with %" NPY_INTP_FMT " fields.", + PyTuple_Size(op), names_size); + PyErr_SetObject(PyExc_ValueError, errmsg); + Py_DECREF(errmsg); + return -1; } - else { - PyArray_ENABLEFLAGS(ap, NPY_ARRAY_ALIGNED); + + for (i = 0; i < names_size; i++) { + PyObject *item; + + /* temporarily make ap have only this field */ + if (_setup_field(i, descr, ap, &offset, ip) == -1) { + failed = 1; + break; + } + item = PyTuple_GetItem(op, i); + if (item == NULL) { + failed = 1; + break; + } + /* use setitem to set this field */ + if (PyArray_SETITEM(ap, ip + offset, item) < 0) { + failed = 1; + break; + } } - res = new->f->setitem(PyTuple_GET_ITEM(op, i), ip+offset, ap); - ((PyArrayObject_fields *)ap)->flags = savedflags; - if (res < 0) { - break; + } + else { + /* Otherwise must be non-void scalar. Try to assign to each field */ + npy_intp names_size = PyTuple_GET_SIZE(descr->names); + + for (i = 0; i < names_size; i++) { + /* temporarily make ap have only this field */ + if (_setup_field(i, descr, ap, &offset, ip) == -1) { + failed = 1; + break; + } + /* use setitem to set this field */ + if (PyArray_SETITEM(ap, ip + offset, op) < 0) { + failed = 1; + break; + } } } - ((PyArrayObject_fields *)ap)->descr = descr; - return res; - } - if (descr->subarray) { + /* reset clobbered attributes */ + ((PyArrayObject_fields *)(ap))->descr = descr; + ((PyArrayObject_fields *)(ap))->flags = flags; + + if (failed) { + return -1; + } + return 0; + } + else if (PyDataType_HASSUBARRAY(descr)) { /* copy into an array of the same basic type */ PyArray_Dims shape = {NULL, -1}; PyArrayObject *ret; if (!(PyArray_IntpConverter(descr->subarray->shape, &shape))) { - PyDimMem_FREE(shape.ptr); + npy_free_cache_dim_obj(shape); PyErr_SetString(PyExc_ValueError, "invalid shape in fixed-type tuple."); return -1; @@ -846,7 +989,7 @@ ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, descr->subarray->base, shape.len, shape.ptr, NULL, ip, PyArray_FLAGS(ap), NULL); - PyDimMem_FREE(shape.ptr); + npy_free_cache_dim_obj(shape); if (!ret) { return -1; } @@ -861,19 +1004,17 @@ return res; } - /* Default is to use buffer interface to set item */ + /* + * Fall through case - non-structured void datatype. This is a very + * undiscerning case: It interprets any object as a buffer + * and reads as many bytes as possible, padding with 0. + */ { const void *buffer; Py_ssize_t buflen; - if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) - || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { - PyErr_SetString(PyExc_ValueError, - "Setting void-array with object members using buffer."); - return -1; - } res = PyObject_AsReadBuffer(op, &buffer, &buflen); if (res == -1) { - goto fail; + return -1; } memcpy(ip, buffer, PyArray_MIN(buflen, itemsize)); if (itemsize > buflen) { @@ -881,9 +1022,6 @@ } } return 0; - -fail: - return -1; } static PyObject * @@ -903,7 +1041,7 @@ dt = *((npy_datetime *)ip); } else { - PyArray_DESCR(ap)->f->copyswap(&dt, ip, !PyArray_ISNOTSWAPPED(ap), ap); + PyArray_DESCR(ap)->f->copyswap(&dt, ip, PyArray_ISBYTESWAPPED(ap), ap); } return convert_datetime_to_pyobject(dt, meta); @@ -927,7 +1065,7 @@ td = *((npy_timedelta *)ip); } else { - PyArray_DESCR(ap)->f->copyswap(&td, ip, !PyArray_ISNOTSWAPPED(ap), ap); + PyArray_DESCR(ap)->f->copyswap(&td, ip, PyArray_ISBYTESWAPPED(ap), ap); } return convert_timedelta_to_pyobject(td, meta); @@ -958,8 +1096,8 @@ *((npy_datetime *)ov)=temp; } else { - PyArray_DESCR(ap)->f->copyswap(ov, &temp, - !PyArray_ISNOTSWAPPED(ap), ap); + PyArray_DESCR(ap)->f->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap), + ap); } return 0; @@ -990,7 +1128,8 @@ *((npy_timedelta *)ov)=temp; } else { - PyArray_DESCR(ap)->f->copyswap(ov, &temp, !PyArray_ISNOTSWAPPED(ap), ap); + PyArray_DESCR(ap)->f->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap), + ap); } return 0; @@ -1459,12 +1598,12 @@ PyArrayObject *aip = vaip; npy_intp i; - PyObject *temp = NULL, *new; int skip = PyArray_DESCR(aip)->elsize; int oskip = @oskip@; for (i = 0; i < n; i++, ip+=skip, op+=oskip) { - temp = @from@_getitem(ip, aip); + PyObject *new; + PyObject *temp = PyArray_Scalar(ip, PyArray_DESCR(aip), (PyObject *)aip); if (temp == NULL) { return; } @@ -1519,12 +1658,11 @@ PyArrayObject *aip = vaip; npy_intp i; - PyObject *temp = NULL; int skip = PyArray_DESCR(aip)->elsize; int oskip = @oskip@; for (i = 0; i < n; i++, ip+=skip, op+=oskip) { - temp = @from@_getitem(ip, aip); + PyObject *temp = PyArray_Scalar(ip, PyArray_DESCR(aip), (PyObject *)aip); if (temp == NULL) { return; } @@ -1572,7 +1710,7 @@ int skip = 1; int oskip = PyArray_DESCR(aop)->elsize; for (i = 0; i < n; i++, ip += skip, op += oskip) { - temp = @from@_getitem(ip, aip); + temp = PyArray_Scalar(ip, PyArray_DESCR(aip), (PyObject *)aip); if (temp == NULL) { Py_INCREF(Py_False); temp = Py_False; @@ -2374,7 +2512,8 @@ */ @type@ tmp; #if @isfloat@ - PyArray_DESCR(ap)->f->copyswap(&tmp, ip, !PyArray_ISNOTSWAPPED(ap), ap); + PyArray_DESCR(ap)->f->copyswap(&tmp, ip, PyArray_ISBYTESWAPPED(ap), + ap); #else memcpy(&tmp, ip, sizeof(@type@)); #endif @@ -2397,7 +2536,8 @@ } else { @type@ tmp; - PyArray_DESCR(ap)->f->copyswap(&tmp, ip, !PyArray_ISNOTSWAPPED(ap), ap); + PyArray_DESCR(ap)->f->copyswap(&tmp, ip, PyArray_ISBYTESWAPPED(ap), + ap); return (npy_bool) ((tmp.real != 0) || (tmp.imag != 0)); } } @@ -2459,13 +2599,13 @@ npy_bool seen_null = NPY_FALSE; char *buffer = NULL; - if ((!PyArray_ISNOTSWAPPED(ap)) || (!PyArray_ISALIGNED(ap))) { + if (PyArray_ISBYTESWAPPED(ap) || !PyArray_ISALIGNED(ap)) { buffer = PyArray_malloc(PyArray_DESCR(ap)->elsize); if (buffer == NULL) { return nonz; } memcpy(buffer, ip, PyArray_DESCR(ap)->elsize); - if (!PyArray_ISNOTSWAPPED(ap)) { + if (PyArray_ISBYTESWAPPED(ap)) { byte_swap_vector(buffer, len, 4); } ip = (npy_ucs4 *)buffer; @@ -4112,17 +4252,6 @@ ***************************************************************************** */ - -#define _ALIGN(type) offsetof(struct {char c; type v;}, v) -/* - * Disable harmless compiler warning "4116: unnamed type definition in - * parentheses" which is caused by the _ALIGN macro. - */ -#if defined(_MSC_VER) -#pragma warning(disable:4116) -#endif - - /**begin repeat * * #from = VOID, STRING, UNICODE# @@ -4643,6 +4772,15 @@ /**end repeat**/ + + /**begin repeat + * #name = STRING, UNICODE, VOID# + */ + + PyDataType_MAKEUNSIZED(&@name@_Descr); + + /**end repeat**/ + /* Set a dictionary with type information */ infodict = PyDict_New(); if (infodict == NULL) return -1; diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/buffer.c python-numpy-1.14.5/numpy/core/src/multiarray/buffer.c --- python-numpy-1.13.3/numpy/core/src/multiarray/buffer.c 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/buffer.c 2018-06-12 18:28:52.000000000 +0000 @@ -12,6 +12,7 @@ #include "npy_pycompat.h" #include "buffer.h" +#include "common.h" #include "numpyos.h" #include "arrayobject.h" @@ -243,14 +244,19 @@ child = (PyArray_Descr*)PyTuple_GetItem(item, 0); offset_obj = PyTuple_GetItem(item, 1); - new_offset = base_offset + PyInt_AsLong(offset_obj); + new_offset = PyInt_AsLong(offset_obj); + if (error_converting(new_offset)) { + return -1; + } + new_offset += base_offset; /* Insert padding manually */ if (*offset > new_offset) { - PyErr_SetString(PyExc_RuntimeError, - "This should never happen: Invalid offset in " - "buffer format string generation. Please " - "report a bug to the Numpy developers."); + PyErr_SetString( + PyExc_ValueError, + "dtypes with overlapping or out-of-order fields are not " + "representable as buffers. Consider reordering the fields." + ); return -1; } while (*offset < new_offset) { @@ -828,6 +834,7 @@ /* Strip whitespace, except from field names */ buf = malloc(strlen(s) + 1); if (buf == NULL) { + PyErr_NoMemory(); return NULL; } p = buf; diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/calculation.c python-numpy-1.14.5/numpy/core/src/multiarray/calculation.c --- python-numpy-1.13.3/numpy/core/src/multiarray/calculation.c 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/calculation.c 2018-06-12 17:35:36.000000000 +0000 @@ -118,7 +118,7 @@ } rp = (PyArrayObject *)PyArray_FromArray(out, PyArray_DescrFromType(NPY_INTP), - NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY); + NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY); if (rp == NULL) { goto fail; } @@ -134,8 +134,9 @@ NPY_END_THREADS_DESCR(PyArray_DESCR(ap)); Py_DECREF(ap); - /* Trigger the UPDATEIFCOPY if necessary */ + /* Trigger the UPDATEIFCOPY/WRTIEBACKIFCOPY if necessary */ if (out != NULL && out != rp) { + PyArray_ResolveWritebackIfCopy(rp); Py_DECREF(rp); rp = out; Py_INCREF(rp); @@ -233,7 +234,7 @@ } rp = (PyArrayObject *)PyArray_FromArray(out, PyArray_DescrFromType(NPY_INTP), - NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY); + NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY); if (rp == NULL) { goto fail; } @@ -249,8 +250,9 @@ NPY_END_THREADS_DESCR(PyArray_DESCR(ap)); Py_DECREF(ap); - /* Trigger the UPDATEIFCOPY if necessary */ + /* Trigger the UPDATEIFCOPY/WRITEBACKIFCOPY if necessary */ if (out != NULL && out != rp) { + PyArray_ResolveWritebackIfCopy(rp); Py_DECREF(rp); rp = out; Py_INCREF(rp); @@ -1117,7 +1119,7 @@ oflags = NPY_ARRAY_FARRAY; else oflags = NPY_ARRAY_CARRAY; - oflags |= NPY_ARRAY_UPDATEIFCOPY | NPY_ARRAY_FORCECAST; + oflags |= NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_FORCECAST; Py_INCREF(indescr); newout = (PyArrayObject*)PyArray_FromArray(out, indescr, oflags); if (newout == NULL) { @@ -1153,6 +1155,7 @@ Py_XDECREF(maxa); Py_DECREF(newin); /* Copy back into out if out was not already a nice array. */ + PyArray_ResolveWritebackIfCopy(newout); Py_DECREF(newout); return (PyObject *)out; @@ -1162,7 +1165,8 @@ Py_XDECREF(maxa); Py_XDECREF(mina); Py_XDECREF(newin); - PyArray_XDECREF_ERR(newout); + PyArray_DiscardWritebackIfCopy(newout); + Py_XDECREF(newout); return NULL; } diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/cblasfuncs.c python-numpy-1.14.5/numpy/core/src/multiarray/cblasfuncs.c --- python-numpy-1.13.3/numpy/core/src/multiarray/cblasfuncs.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/cblasfuncs.c 2018-06-12 17:31:56.000000000 +0000 @@ -250,8 +250,6 @@ npy_intp ap1stride = 0; npy_intp dimensions[NPY_MAXDIMS]; npy_intp numbytes; - double prior1, prior2; - PyTypeObject *subtype; MatrixShape ap1shape, ap2shape; if (_bad_strides(ap1)) { @@ -381,29 +379,17 @@ } } - /* Choose which subtype to return */ - if (Py_TYPE(ap1) != Py_TYPE(ap2)) { - prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0); - prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0); - subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1)); - } - else { - prior1 = prior2 = 0.0; - subtype = Py_TYPE(ap1); - } - if (out != NULL) { int d; /* verify that out is usable */ - if (Py_TYPE(out) != subtype || - PyArray_NDIM(out) != nd || + if (PyArray_NDIM(out) != nd || PyArray_TYPE(out) != typenum || !PyArray_ISCARRAY(out)) { PyErr_SetString(PyExc_ValueError, - "output array is not acceptable " - "(must have the right type, nr dimensions, and be a C-Array)"); + "output array is not acceptable (must have the right datatype, " + "number of dimensions, and be a C-Array)"); goto fail; } for (d = 0; d < nd; ++d) { @@ -426,7 +412,7 @@ /* set copy-back */ Py_INCREF(out); - if (PyArray_SetUpdateIfCopyBase(out_buf, out) < 0) { + if (PyArray_SetWritebackIfCopyBase(out_buf, out) < 0) { Py_DECREF(out); goto fail; } @@ -439,7 +425,22 @@ result = out; } else { - PyObject *tmp = (PyObject *)(prior2 > prior1 ? ap2 : ap1); + double prior1, prior2; + PyTypeObject *subtype; + PyObject *tmp; + + /* Choose which subtype to return */ + if (Py_TYPE(ap1) != Py_TYPE(ap2)) { + prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0); + prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0); + subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1)); + } + else { + prior1 = prior2 = 0.0; + subtype = Py_TYPE(ap1); + } + + tmp = (PyObject *)(prior2 > prior1 ? ap2 : ap1); out_buf = (PyArrayObject *)PyArray_New(subtype, nd, dimensions, typenum, NULL, NULL, 0, 0, tmp); @@ -771,6 +772,7 @@ Py_DECREF(ap2); /* Trigger possible copyback into `result` */ + PyArray_ResolveWritebackIfCopy(out_buf); Py_DECREF(out_buf); return PyArray_Return(result); diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/compiled_base.c python-numpy-1.14.5/numpy/core/src/multiarray/compiled_base.c --- python-numpy-1.13.3/numpy/core/src/multiarray/compiled_base.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/compiled_base.c 2018-06-12 18:28:52.000000000 +0000 @@ -10,6 +10,8 @@ #include "npy_config.h" #include "templ_common.h" /* for npy_mul_with_overflow_intp */ #include "lowlevel_strided_loops.h" /* for npy_bswap8 */ +#include "alloc.h" +#include "common.h" /* @@ -94,9 +96,10 @@ NPY_NO_EXPORT PyObject * arr_bincount(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) { - PyObject *list = NULL, *weight = Py_None, *mlength = Py_None; + PyObject *list = NULL, *weight = Py_None, *mlength = NULL; PyArrayObject *lst = NULL, *ans = NULL, *wts = NULL; - npy_intp *numbers, *ians, len, mx, mn, ans_size, minlength; + npy_intp *numbers, *ians, len, mx, mn, ans_size; + npy_intp minlength = 0; npy_intp i; double *weights , *dans; static char *kwlist[] = {"list", "weights", "minlength", NULL}; @@ -112,20 +115,30 @@ } len = PyArray_SIZE(lst); + /* + * This if/else if can be removed by changing the argspec to O|On above, + * once we retire the deprecation + */ if (mlength == Py_None) { - minlength = 0; + /* NumPy 1.14, 2017-06-01 */ + if (DEPRECATE("0 should be passed as minlength instead of None; " + "this will error in future.") < 0) { + goto fail; + } } - else { + else if (mlength != NULL) { minlength = PyArray_PyIntAsIntp(mlength); - if (minlength < 0) { - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, - "minlength must be non-negative"); - } + if (error_converting(minlength)) { goto fail; } } + if (minlength < 0) { + PyErr_SetString(PyExc_ValueError, + "'minlength' must not be negative"); + goto fail; + } + /* handle empty list */ if (len == 0) { ans = (PyArrayObject *)PyArray_ZEROS(1, &minlength, NPY_INTP, 0); @@ -140,7 +153,7 @@ minmax(numbers, len, &mn, &mx); if (mn < 0) { PyErr_SetString(PyExc_ValueError, - "The first argument of bincount must be non-negative"); + "'list' argument must have no negative elements"); goto fail; } ans_size = mx + 1; @@ -326,7 +339,7 @@ } array = (PyArrayObject *)PyArray_FromArray((PyArrayObject *)array0, NULL, - NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY); + NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY); if (array == NULL) { goto fail; } @@ -401,6 +414,7 @@ Py_XDECREF(values); Py_XDECREF(mask); + PyArray_ResolveWritebackIfCopy(array); Py_DECREF(array); Py_RETURN_NONE; @@ -579,7 +593,7 @@ } else { lval = PyFloat_AsDouble(left); - if ((lval == -1) && PyErr_Occurred()) { + if (error_converting(lval)) { goto fail; } } @@ -588,7 +602,7 @@ } else { rval = PyFloat_AsDouble(right); - if ((rval == -1) && PyErr_Occurred()) { + if (error_converting(rval)) { goto fail; } } @@ -735,11 +749,11 @@ } else { lval.real = PyComplex_RealAsDouble(left); - if ((lval.real == -1) && PyErr_Occurred()) { + if (error_converting(lval.real)) { goto fail; } lval.imag = PyComplex_ImagAsDouble(left); - if ((lval.imag == -1) && PyErr_Occurred()) { + if (error_converting(lval.imag)) { goto fail; } } @@ -749,11 +763,11 @@ } else { rval.real = PyComplex_RealAsDouble(right); - if ((rval.real == -1) && PyErr_Occurred()) { + if (error_converting(rval.real)) { goto fail; } rval.imag = PyComplex_ImagAsDouble(right); - if ((rval.imag == -1) && PyErr_Occurred()) { + if (error_converting(rval.imag)) { goto fail; } } @@ -1091,7 +1105,7 @@ for (i = 0; i < dimensions.len; ++i) { Py_XDECREF(op[i]); } - PyDimMem_FREE(dimensions.ptr); + npy_free_cache_dim_obj(dimensions); NpyIter_Deallocate(iter); return PyArray_Return(ret); @@ -1100,7 +1114,7 @@ for (i = 0; i < dimensions.len; ++i) { Py_XDECREF(op[i]); } - PyDimMem_FREE(dimensions.ptr); + npy_free_cache_dim_obj(dimensions); NpyIter_Deallocate(iter); return NULL; } @@ -1133,8 +1147,11 @@ } NPY_END_ALLOW_THREADS; if (invalid) { - PyErr_SetString(PyExc_ValueError, - "invalid entry in index array"); + PyErr_Format(PyExc_ValueError, + "index %" NPY_INTP_FMT " is out of bounds for array with size " + "%" NPY_INTP_FMT, + val, unravel_size + ); return NPY_FAIL; } return NPY_SUCCEED; @@ -1167,8 +1184,11 @@ } NPY_END_ALLOW_THREADS; if (invalid) { - PyErr_SetString(PyExc_ValueError, - "invalid entry in index array"); + PyErr_Format(PyExc_ValueError, + "index %" NPY_INTP_FMT " is out of bounds for array with size " + "%" NPY_INTP_FMT, + val, unravel_size + ); return NPY_FAIL; } return NPY_SUCCEED; @@ -1200,12 +1220,6 @@ goto fail; } - if (dimensions.len == 0) { - PyErr_SetString(PyExc_ValueError, - "dims must have at least one value"); - goto fail; - } - unravel_size = PyArray_MultiplyList(dimensions.ptr, dimensions.len); if (!PyArray_Check(indices0)) { @@ -1326,6 +1340,20 @@ goto fail; } + + if (dimensions.len == 0 && PyArray_NDIM(indices) != 0) { + /* + * There's no index meaning "take the only element 10 times" + * on a zero-d array, so we have no choice but to error. (See gh-580) + * + * Do this check after iterating, so we give a better error message + * for invalid indices. + */ + PyErr_SetString(PyExc_ValueError, + "multiple indices are not supported for 0d arrays"); + goto fail; + } + /* Now make a tuple of views, one per index */ ret_tuple = PyTuple_New(dimensions.len); if (ret_tuple == NULL) { @@ -1352,7 +1380,7 @@ Py_DECREF(ret_arr); Py_XDECREF(indices); - PyDimMem_FREE(dimensions.ptr); + npy_free_cache_dim_obj(dimensions); NpyIter_Deallocate(iter); return ret_tuple; @@ -1362,7 +1390,7 @@ Py_XDECREF(ret_arr); Py_XDECREF(dtype); Py_XDECREF(indices); - PyDimMem_FREE(dimensions.ptr); + npy_free_cache_dim_obj(dimensions); NpyIter_Deallocate(iter); return NULL; } diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/conversion_utils.c python-numpy-1.14.5/numpy/core/src/multiarray/conversion_utils.c --- python-numpy-1.13.3/numpy/core/src/multiarray/conversion_utils.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/conversion_utils.c 2018-06-12 18:28:52.000000000 +0000 @@ -15,6 +15,7 @@ #include "arraytypes.h" #include "conversion_utils.h" +#include "alloc.h" static int PyArray_PyIntAsInt_ErrMsg(PyObject *o, const char * msg) NPY_GCC_NONNULL(2); @@ -119,7 +120,7 @@ return NPY_FAIL; } if (len > 0) { - seq->ptr = PyDimMem_NEW(len); + seq->ptr = npy_alloc_cache_dim(len); if (seq->ptr == NULL) { PyErr_NoMemory(); return NPY_FAIL; @@ -128,7 +129,7 @@ seq->len = len; nd = PyArray_IntpFromIndexSequence(obj, (npy_intp *)seq->ptr, len); if (nd == -1 || nd != len) { - PyDimMem_FREE(seq->ptr); + npy_free_cache_dim_obj(*seq); seq->ptr = NULL; return NPY_FAIL; } diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/convert.c python-numpy-1.14.5/numpy/core/src/multiarray/convert.c --- python-numpy-1.13.3/numpy/core/src/multiarray/convert.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/convert.c 2018-06-12 18:28:52.000000000 +0000 @@ -13,6 +13,7 @@ #include "npy_pycompat.h" +#include "common.h" #include "arrayobject.h" #include "ctors.h" #include "mapping.h" @@ -85,7 +86,7 @@ /* Base case */ if (startdim >= PyArray_NDIM(self)) { - return PyArray_DESCR(self)->f->getitem(dataptr,self); + return PyArray_GETITEM(self, dataptr); } n = PyArray_DIM(self, startdim); @@ -221,7 +222,7 @@ PyArray_IterNew((PyObject *)self); n4 = (format ? strlen((const char *)format) : 0); while (it->index < it->size) { - obj = PyArray_DESCR(self)->f->getitem(it->dataptr, self); + obj = PyArray_GETITEM(self, it->dataptr); if (obj == NULL) { Py_DECREF(it); return -1; @@ -411,7 +412,7 @@ else if (PyLong_Check(obj) || PyInt_Check(obj)) { /* Try long long before unsigned long long */ npy_longlong ll_v = PyLong_AsLongLong(obj); - if (ll_v == -1 && PyErr_Occurred()) { + if (error_converting(ll_v)) { /* Long long failed, try unsigned long long */ npy_ulonglong ull_v; PyErr_Clear(); @@ -441,7 +442,7 @@ /* Python float */ else if (PyFloat_Check(obj)) { npy_double v = PyFloat_AsDouble(obj); - if (v == -1 && PyErr_Occurred()) { + if (error_converting(v)) { return -1; } value = (char *)value_buffer; @@ -457,11 +458,11 @@ npy_double re, im; re = PyComplex_RealAsDouble(obj); - if (re == -1 && PyErr_Occurred()) { + if (error_converting(re)) { return -1; } im = PyComplex_ImagAsDouble(obj); - if (im == -1 && PyErr_Occurred()) { + if (error_converting(im)) { return -1; } value = (char *)value_buffer; @@ -616,7 +617,7 @@ const char *msg = "Numpy has detected that you may be viewing or writing to an array " "returned by selecting multiple fields in a structured array. \n\n" - "This code may break in numpy 1.13 because this will return a view " + "This code may break in numpy 1.15 because this will return a view " "instead of a copy -- see release notes for details."; /* 2016-09-19, 1.12 */ if (DEPRECATE_FUTUREWARNING(msg) < 0) { diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/convert_datatype.c python-numpy-1.14.5/numpy/core/src/multiarray/convert_datatype.c --- python-numpy-1.13.3/numpy/core/src/multiarray/convert_datatype.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/convert_datatype.c 2018-06-12 18:28:52.000000000 +0000 @@ -167,7 +167,7 @@ flex_type_num = (*flex_dtype)->type_num; /* Flexible types with expandable size */ - if ((*flex_dtype)->elsize == 0) { + if (PyDataType_ISUNSIZED(*flex_dtype)) { /* First replace the flex dtype */ PyArray_DESCR_REPLACE(*flex_dtype); if (*flex_dtype == NULL) { @@ -526,7 +526,7 @@ } ret = 0; - if (to->elsize == 0) { + if (PyDataType_ISUNSIZED(to)) { ret = 1; } /* @@ -1152,7 +1152,7 @@ else if (PyTypeNum_ISNUMBER(type_num2)) { PyArray_Descr *ret = NULL; PyArray_Descr *temp = PyArray_DescrNew(type1); - temp->elsize = 0; + PyDataType_MAKEUNSIZED(temp); PyArray_AdaptFlexibleDType(NULL, type2, &temp); if (temp->elsize > type1->elsize) { ret = ensure_dtype_nbo(temp); @@ -1190,7 +1190,7 @@ else if (PyTypeNum_ISNUMBER(type_num2)) { PyArray_Descr *ret = NULL; PyArray_Descr *temp = PyArray_DescrNew(type1); - temp->elsize = 0; + PyDataType_MAKEUNSIZED(temp); PyArray_AdaptFlexibleDType(NULL, type2, &temp); if (temp->elsize > type1->elsize) { ret = ensure_dtype_nbo(temp); @@ -1238,7 +1238,7 @@ if (PyTypeNum_ISNUMBER(type_num1)) { PyArray_Descr *ret = NULL; PyArray_Descr *temp = PyArray_DescrNew(type2); - temp->elsize = 0; + PyDataType_MAKEUNSIZED(temp); PyArray_AdaptFlexibleDType(NULL, type1, &temp); if (temp->elsize > type2->elsize) { ret = ensure_dtype_nbo(temp); @@ -1255,7 +1255,7 @@ if (PyTypeNum_ISNUMBER(type_num1)) { PyArray_Descr *ret = NULL; PyArray_Descr *temp = PyArray_DescrNew(type2); - temp->elsize = 0; + PyDataType_MAKEUNSIZED(temp); PyArray_AdaptFlexibleDType(NULL, type1, &temp); if (temp->elsize > type2->elsize) { ret = ensure_dtype_nbo(temp); @@ -1353,7 +1353,7 @@ case NPY_UINT: { npy_uint value = *(npy_uint *)valueptr; if (value <= NPY_MAX_UBYTE) { - if (value < NPY_MAX_BYTE) { + if (value <= NPY_MAX_BYTE) { *is_small_unsigned = 1; } return NPY_UBYTE; @@ -1945,7 +1945,7 @@ } storeflags = PyArray_FLAGS(arr); PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED); - ret = PyArray_DESCR(arr)->f->setitem(zero_obj, zeroval, arr); + ret = PyArray_SETITEM(arr, zeroval, zero_obj); ((PyArrayObject_fields *)arr)->flags = storeflags; if (ret < 0) { PyDataMem_FREE(zeroval); @@ -1992,7 +1992,7 @@ storeflags = PyArray_FLAGS(arr); PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED); - ret = PyArray_DESCR(arr)->f->setitem(one_obj, oneval, arr); + ret = PyArray_SETITEM(arr, oneval, one_obj); ((PyArrayObject_fields *)arr)->flags = storeflags; if (ret < 0) { PyDataMem_FREE(oneval); diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/ctors.c python-numpy-1.14.5/numpy/core/src/multiarray/ctors.c --- python-numpy-1.13.3/numpy/core/src/multiarray/ctors.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/ctors.c 2018-06-12 18:28:52.000000000 +0000 @@ -514,7 +514,7 @@ } else { char * b = (PyArray_BYTES(dst) + i * PyArray_STRIDES(dst)[0]); - res = PyArray_DESCR(dst)->f->setitem(o, b, dst); + res = PyArray_SETITEM(dst, b, o); } if (res < 0) { Py_DECREF(o); @@ -545,7 +545,7 @@ } else { char * b = (PyArray_BYTES(dst) + i * PyArray_STRIDES(dst)[0]); - res = PyArray_DESCR(dst)->f->setitem(o, b, dst); + res = PyArray_SETITEM(dst, b, o); } if (res < 0) { Py_DECREF(seq); @@ -934,7 +934,7 @@ /* Check datatype element size */ nbytes = descr->elsize; - if (nbytes == 0) { + if (PyDataType_ISUNSIZED(descr)) { if (!PyDataType_ISFLEXIBLE(descr)) { PyErr_SetString(PyExc_TypeError, "Empty data-type"); Py_DECREF(descr); @@ -1009,7 +1009,8 @@ } } else { - fa->flags = (flags & ~NPY_ARRAY_UPDATEIFCOPY); + fa->flags = (flags & ~NPY_ARRAY_WRITEBACKIFCOPY); + fa->flags = (fa->flags & ~NPY_ARRAY_UPDATEIFCOPY); } fa->descr = descr; fa->base = (PyObject *)NULL; @@ -1255,7 +1256,7 @@ if (descr == NULL) { return NULL; } - if (descr->elsize == 0) { + if (PyDataType_ISUNSIZED(descr)) { if (itemsize < 1) { PyErr_SetString(PyExc_ValueError, "data type must provide an itemsize"); @@ -1529,12 +1530,6 @@ if (!writeable) { tmp = PyArray_FromArrayAttr(op, requested_dtype, context); if (tmp != Py_NotImplemented) { - if (writeable - && PyArray_FailUnlessWriteable((PyArrayObject *)tmp, - "array interface object") < 0) { - Py_DECREF(tmp); - return -1; - } *out_arr = (PyArrayObject *)tmp; return (*out_arr) == NULL ? -1 : 0; } @@ -1623,7 +1618,7 @@ } /* If the type is flexible, determine its size */ - if ((*out_dtype)->elsize == 0 && + if (PyDataType_ISUNSIZED(*out_dtype) && PyTypeNum_ISEXTENDED((*out_dtype)->type_num)) { int itemsize = 0; int string_type = 0; @@ -1709,10 +1704,11 @@ /* If we got dimensions and dtype instead of an array */ if (arr == NULL) { - if (flags & NPY_ARRAY_UPDATEIFCOPY) { + if ((flags & NPY_ARRAY_WRITEBACKIFCOPY) || + (flags & NPY_ARRAY_UPDATEIFCOPY)) { Py_XDECREF(newtype); PyErr_SetString(PyExc_TypeError, - "UPDATEIFCOPY used for non-array input."); + "WRITEBACKIFCOPY used for non-array input."); return NULL; } else if (min_depth != 0 && ndim < min_depth) { @@ -1778,8 +1774,7 @@ } } else { - if (PyArray_DESCR(ret)->f->setitem(op, - PyArray_DATA(ret), ret) < 0) { + if (PyArray_SETITEM(ret, PyArray_DATA(ret), op) < 0) { Py_DECREF(ret); ret = NULL; } @@ -1817,6 +1812,7 @@ * NPY_ARRAY_NOTSWAPPED, * NPY_ARRAY_ENSURECOPY, * NPY_ARRAY_UPDATEIFCOPY, + * NPY_ARRAY_WRITEBACKIFCOPY, * NPY_ARRAY_FORCECAST, * NPY_ARRAY_ENSUREARRAY, * NPY_ARRAY_ELEMENTSTRIDES @@ -1841,10 +1837,13 @@ * Fortran arrays are always behaved (aligned, * notswapped, and writeable) and not (C) CONTIGUOUS (if > 1d). * - * NPY_ARRAY_UPDATEIFCOPY flag sets this flag in the returned array if a copy - * is made and the base argument points to the (possibly) misbehaved array. - * When the new array is deallocated, the original array held in base - * is updated with the contents of the new array. + * NPY_ARRAY_UPDATEIFCOPY is deprecated in favor of + * NPY_ARRAY_WRITEBACKIFCOPY in 1.14 + + * NPY_ARRAY_WRITEBACKIFCOPY flag sets this flag in the returned + * array if a copy is made and the base argument points to the (possibly) + * misbehaved array. Before returning to python, PyArray_ResolveWritebackIfCopy + * must be called to update the contents of the orignal array from the copy. * * NPY_ARRAY_FORCECAST will cause a cast to occur regardless of whether or not * it is safe. @@ -1860,7 +1859,7 @@ PyObject *obj; if (requires & NPY_ARRAY_NOTSWAPPED) { if (!descr && PyArray_Check(op) && - !PyArray_ISNBO(PyArray_DESCR((PyArrayObject *)op)->byteorder)) { + PyArray_ISBYTESWAPPED((PyArrayObject* )op)) { descr = PyArray_DescrNew(PyArray_DESCR((PyArrayObject *)op)); } else if (descr && !PyArray_ISNBO(descr->byteorder)) { @@ -1893,7 +1892,6 @@ { PyArrayObject *ret = NULL; - int itemsize; int copy = 0; int arrflags; PyArray_Descr *oldtype; @@ -1912,14 +1910,12 @@ newtype = oldtype; Py_INCREF(oldtype); } - itemsize = newtype->elsize; - if (itemsize == 0) { + if (PyDataType_ISUNSIZED(newtype)) { PyArray_DESCR_REPLACE(newtype); if (newtype == NULL) { return NULL; } newtype->elsize = oldtype->elsize; - itemsize = newtype->elsize; } /* If the casting if forced, use the 'unsafe' casting rule */ @@ -2011,9 +2007,30 @@ return NULL; } - if (flags & NPY_ARRAY_UPDATEIFCOPY) { + if (flags & NPY_ARRAY_UPDATEIFCOPY) { + /* This is the ONLY place the NPY_ARRAY_UPDATEIFCOPY flag + * is still used. + * Can be deleted once the flag itself is removed + */ + + /* 2017-Nov-10 1.14 */ + if (DEPRECATE("NPY_ARRAY_UPDATEIFCOPY, NPY_ARRAY_INOUT_ARRAY, and " + "NPY_ARRAY_INOUT_FARRAY are deprecated, use NPY_WRITEBACKIFCOPY, " + "NPY_ARRAY_INOUT_ARRAY2, or NPY_ARRAY_INOUT_FARRAY2 respectively " + "instead, and call PyArray_ResolveWritebackIfCopy before the " + "array is deallocated, i.e. before the last call to Py_DECREF.") < 0) + return NULL; + Py_INCREF(arr); + if (PyArray_SetWritebackIfCopyBase(ret, arr) < 0) { + Py_DECREF(ret); + return NULL; + } + PyArray_ENABLEFLAGS(ret, NPY_ARRAY_UPDATEIFCOPY); + PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEBACKIFCOPY); + } + else if (flags & NPY_ARRAY_WRITEBACKIFCOPY) { Py_INCREF(arr); - if (PyArray_SetUpdateIfCopyBase(ret, arr) < 0) { + if (PyArray_SetWritebackIfCopyBase(ret, arr) < 0) { Py_DECREF(ret); return NULL; } @@ -2308,12 +2325,7 @@ /* Case for data access through buffer */ else if (attr) { - if (n == 0) { - PyErr_SetString(PyExc_ValueError, - "__array_interface__ shape must be at least size 1"); - goto fail; - } - if (attr && (attr != Py_None)) { + if (attr != Py_None) { base = attr; } else { @@ -2896,7 +2908,7 @@ /* * PyArray_NewFromDescr steals a ref, - * but we need to look at type later. + * but we need to look at type later. * */ Py_INCREF(type); @@ -2923,17 +2935,25 @@ * Return 0 on success, -1 on failure. In case of failure, set a PyExc_Overflow * exception */ -static int _safe_ceil_to_intp(double value, npy_intp* ret) +static npy_intp +_arange_safe_ceil_to_intp(double value) { double ivalue; ivalue = npy_ceil(value); - if (ivalue < NPY_MIN_INTP || ivalue > NPY_MAX_INTP) { + /* condition inverted to handle NaN */ + if (npy_isnan(ivalue)) { + PyErr_SetString(PyExc_ValueError, + "arange: cannot compute length"); + return -1; + } + if (!(NPY_MIN_INTP <= ivalue && ivalue <= NPY_MAX_INTP)) { + PyErr_SetString(PyExc_OverflowError, + "arange: overflow while computing length"); return -1; } - *ret = (npy_intp)ivalue; - return 0; + return (npy_intp)ivalue; } @@ -2950,9 +2970,9 @@ int ret; NPY_BEGIN_THREADS_DEF; - if (_safe_ceil_to_intp((stop - start)/step, &length)) { - PyErr_SetString(PyExc_OverflowError, - "arange: overflow while computing length"); + length = _arange_safe_ceil_to_intp((stop - start)/step); + if (error_converting(length)) { + return NULL; } if (length <= 0) { @@ -3010,7 +3030,7 @@ } /* - * the formula is len = (intp) ceil((start - stop) / step); + * the formula is len = (intp) ceil((stop - start) / step); */ static npy_intp _calc_length(PyObject *start, PyObject *stop, PyObject *step, PyObject **next, int cmplx) @@ -3041,10 +3061,9 @@ Py_DECREF(val); return -1; } - if (_safe_ceil_to_intp(value, &len)) { + len = _arange_safe_ceil_to_intp(value); + if (error_converting(len)) { Py_DECREF(val); - PyErr_SetString(PyExc_OverflowError, - "arange: overflow while computing length"); return -1; } value = PyComplex_ImagAsDouble(val); @@ -3052,9 +3071,8 @@ if (error_converting(value)) { return -1; } - if (_safe_ceil_to_intp(value, &tmp)) { - PyErr_SetString(PyExc_OverflowError, - "arange: overflow while computing length"); + tmp = _arange_safe_ceil_to_intp(value); + if (error_converting(tmp)) { return -1; } len = PyArray_MIN(len, tmp); @@ -3065,9 +3083,8 @@ if (error_converting(value)) { return -1; } - if (_safe_ceil_to_intp(value, &len)) { - PyErr_SetString(PyExc_OverflowError, - "arange: overflow while computing length"); + len = _arange_safe_ceil_to_intp(value); + if (error_converting(len)) { return -1; } } @@ -3472,7 +3489,7 @@ Py_DECREF(type); return NULL; } - if (type->elsize == 0) { + if (PyDataType_ISUNSIZED(type)) { PyErr_SetString(PyExc_ValueError, "itemsize cannot be zero in type"); Py_DECREF(type); @@ -3689,12 +3706,13 @@ if (iter == NULL) { goto done; } - elcount = (count < 0) ? 0 : count; - if ((elsize = dtype->elsize) == 0) { + if (PyDataType_ISUNSIZED(dtype)) { PyErr_SetString(PyExc_ValueError, "Must specify length when using variable-size data-type."); goto done; } + elcount = (count < 0) ? 0 : count; + elsize = dtype->elsize; /* * We would need to alter the memory RENEW code to decrement any @@ -3715,14 +3733,15 @@ for (i = 0; (i < count || count == -1) && (value = PyIter_Next(iter)); i++) { if (i >= elcount) { + npy_intp nbytes; /* Grow PyArray_DATA(ret): this is similar for the strategy for PyListObject, but we use 50% overallocation => 0, 4, 8, 14, 23, 36, 56, 86 ... */ elcount = (i >> 1) + (i < 4 ? 4 : 2) + i; - if (elcount <= NPY_MAX_INTP/elsize) { - new_data = PyDataMem_RENEW(PyArray_DATA(ret), elcount * elsize); + if (!npy_mul_with_overflow_intp(&nbytes, elcount, elsize)) { + new_data = PyDataMem_RENEW(PyArray_DATA(ret), nbytes); } else { new_data = NULL; @@ -3738,7 +3757,7 @@ PyArray_DIMS(ret)[0] = i + 1; if (((item = index2ptr(ret, i)) == NULL) || - (PyArray_DESCR(ret)->f->setitem(value, item, ret) == -1)) { + PyArray_SETITEM(ret, item, value) == -1) { Py_DECREF(value); goto done; } diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/datetime_busdaycal.c python-numpy-1.14.5/numpy/core/src/multiarray/datetime_busdaycal.c --- python-numpy-1.13.3/numpy/core/src/multiarray/datetime_busdaycal.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/datetime_busdaycal.c 2018-06-12 17:31:56.000000000 +0000 @@ -18,6 +18,7 @@ #include "npy_config.h" #include "npy_pycompat.h" +#include "common.h" #include "numpy/arrayscalars.h" #include "lowlevel_strided_loops.h" #include "_datetime.h" @@ -168,7 +169,7 @@ } val = PyInt_AsLong(f); - if (val == -1 && PyErr_Occurred()) { + if (error_converting(val)) { Py_DECREF(f); Py_DECREF(obj); return 0; diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/datetime.c python-numpy-1.14.5/numpy/core/src/multiarray/datetime.c --- python-numpy-1.13.3/numpy/core/src/multiarray/datetime.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/datetime.c 2018-06-12 18:28:52.000000000 +0000 @@ -20,6 +20,7 @@ #include "npy_config.h" #include "npy_pycompat.h" +#include "common.h" #include "numpy/arrayscalars.h" #include "methods.h" #include "_datetime.h" @@ -1718,8 +1719,6 @@ * a date time unit enum value. The 'metastr' parameter * is used for error messages, and may be NULL. * - * Generic units have no representation as a string in this form. - * * Returns 0 on success, -1 on failure. */ NPY_NO_EXPORT NPY_DATETIMEUNIT @@ -1761,6 +1760,9 @@ return NPY_FR_as; } } + else if (len == 7 && !strncmp(str, "generic", 7)) { + return NPY_FR_GENERIC; + } /* If nothing matched, it's an error */ if (metastr == NULL) { @@ -1802,7 +1804,8 @@ */ NPY_NO_EXPORT int convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple, - PyArray_DatetimeMetaData *out_meta) + PyArray_DatetimeMetaData *out_meta, + npy_bool from_pickle) { char *basestr = NULL; Py_ssize_t len = 0, tuple_size; @@ -1853,13 +1856,62 @@ /* Convert the values to longs */ out_meta->num = PyInt_AsLong(PyTuple_GET_ITEM(tuple, 1)); - if (out_meta->num == -1 && PyErr_Occurred()) { + if (error_converting(out_meta->num)) { return -1; } - if (tuple_size == 4) { + /* + * The event metadata was removed way back in numpy 1.7 (cb4545), but was + * not deprecated at the time. + */ + + /* (unit, num, event) */ + if (tuple_size == 3) { + /* Numpy 1.14, 2017-08-11 */ + if (DEPRECATE( + "When passing a 3-tuple as (unit, num, event), the event " + "is ignored (since 1.7) - use (unit, num) instead") < 0) { + return -1; + } + } + /* (unit, num, den, event) */ + else if (tuple_size == 4) { + PyObject *event = PyTuple_GET_ITEM(tuple, 3); + if (from_pickle) { + /* if (event == 1) */ + PyObject *one = PyLong_FromLong(1); + int equal_one; + if (one == NULL) { + return -1; + } + equal_one = PyObject_RichCompareBool(event, one, Py_EQ); + if (equal_one == -1) { + return -1; + } + + /* if the event data is not 1, it had semantics different to how + * datetime types now behave, which are no longer respected. + */ + if (!equal_one) { + if (PyErr_WarnEx(PyExc_UserWarning, + "Loaded pickle file contains non-default event data " + "for a datetime type, which has been ignored since 1.7", + 1) < 0) { + return -1; + } + } + } + else if (event != Py_None) { + /* Numpy 1.14, 2017-08-11 */ + if (DEPRECATE( + "When passing a 4-tuple as (unit, num, den, event), the " + "event argument is ignored (since 1.7), so should be None" + ) < 0) { + return -1; + } + } den = PyInt_AsLong(PyTuple_GET_ITEM(tuple, 2)); - if (den == -1 && PyErr_Occurred()) { + if (error_converting(den)) { return -1; } } @@ -1895,8 +1947,8 @@ Py_ssize_t len = 0; if (PyTuple_Check(obj)) { - return convert_datetime_metadata_tuple_to_datetime_metadata(obj, - out_meta); + return convert_datetime_metadata_tuple_to_datetime_metadata( + obj, out_meta, NPY_FALSE); } /* Get an ASCII string */ @@ -2126,7 +2178,7 @@ return -1; } out->year = PyInt_AsLong(tmp); - if (out->year == -1 && PyErr_Occurred()) { + if (error_converting(out->year)) { Py_DECREF(tmp); return -1; } @@ -2138,7 +2190,7 @@ return -1; } out->month = PyInt_AsLong(tmp); - if (out->month == -1 && PyErr_Occurred()) { + if (error_converting(out->month)) { Py_DECREF(tmp); return -1; } @@ -2150,7 +2202,7 @@ return -1; } out->day = PyInt_AsLong(tmp); - if (out->day == -1 && PyErr_Occurred()) { + if (error_converting(out->day)) { Py_DECREF(tmp); return -1; } @@ -2184,7 +2236,7 @@ return -1; } out->hour = PyInt_AsLong(tmp); - if (out->hour == -1 && PyErr_Occurred()) { + if (error_converting(out->hour)) { Py_DECREF(tmp); return -1; } @@ -2196,7 +2248,7 @@ return -1; } out->min = PyInt_AsLong(tmp); - if (out->min == -1 && PyErr_Occurred()) { + if (error_converting(out->min)) { Py_DECREF(tmp); return -1; } @@ -2208,7 +2260,7 @@ return -1; } out->sec = PyInt_AsLong(tmp); - if (out->sec == -1 && PyErr_Occurred()) { + if (error_converting(out->sec)) { Py_DECREF(tmp); return -1; } @@ -2220,7 +2272,7 @@ return -1; } out->us = PyInt_AsLong(tmp); - if (out->us == -1 && PyErr_Occurred()) { + if (error_converting(out->us)) { Py_DECREF(tmp); return -1; } @@ -2271,7 +2323,7 @@ return -1; } seconds_offset = PyInt_AsLong(tmp); - if (seconds_offset == -1 && PyErr_Occurred()) { + if (error_converting(seconds_offset)) { Py_DECREF(tmp); return -1; } @@ -2456,7 +2508,7 @@ } PyArray_DESCR(arr)->f->copyswap(&dt, PyArray_DATA(arr), - !PyArray_ISNOTSWAPPED(arr), + PyArray_ISBYTESWAPPED(arr), obj); /* Copy the value directly if units weren't specified */ @@ -2654,7 +2706,7 @@ } PyArray_DESCR(arr)->f->copyswap(&dt, PyArray_DATA(arr), - !PyArray_ISNOTSWAPPED(arr), + PyArray_ISBYTESWAPPED(arr), obj); /* Copy the value directly if units weren't specified */ @@ -2694,7 +2746,7 @@ return -1; } days = PyLong_AsLongLong(tmp); - if (days == -1 && PyErr_Occurred()) { + if (error_converting(days)) { Py_DECREF(tmp); return -1; } @@ -2706,7 +2758,7 @@ return -1; } seconds = PyInt_AsLong(tmp); - if (seconds == -1 && PyErr_Occurred()) { + if (error_converting(seconds)) { Py_DECREF(tmp); return -1; } @@ -2718,7 +2770,7 @@ return -1; } useconds = PyInt_AsLong(tmp); - if (useconds == -1 && PyErr_Occurred()) { + if (error_converting(useconds)) { Py_DECREF(tmp); return -1; } diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/_datetime.h python-numpy-1.14.5/numpy/core/src/multiarray/_datetime.h --- python-numpy-1.13.3/numpy/core/src/multiarray/_datetime.h 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/_datetime.h 2018-06-12 17:31:56.000000000 +0000 @@ -175,7 +175,8 @@ */ NPY_NO_EXPORT int convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple, - PyArray_DatetimeMetaData *out_meta); + PyArray_DatetimeMetaData *out_meta, + npy_bool from_pickle); /* * Gets a tzoffset in minutes by calling the fromutc() function on diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/datetime_strings.c python-numpy-1.14.5/numpy/core/src/multiarray/datetime_strings.c --- python-numpy-1.13.3/numpy/core/src/multiarray/datetime_strings.c 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/datetime_strings.c 2018-06-12 18:28:52.000000000 +0000 @@ -885,15 +885,16 @@ * string was too short). */ NPY_NO_EXPORT int -make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, +make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, npy_intp outlen, int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, NPY_CASTING casting) { npy_datetimestruct dts_local; int timezone_offset = 0; - char *substr = outstr, sublen = outlen; - int tmplen; + char *substr = outstr; + npy_intp sublen = outlen; + npy_intp tmplen; /* Handle NaT, and treat a datetime with generic units as NaT */ if (dts->year == NPY_DATETIME_NAT || base == NPY_FR_GENERIC) { @@ -1321,7 +1322,7 @@ string_too_short: PyErr_Format(PyExc_RuntimeError, "The string provided for NumPy ISO datetime formatting " - "was too short, with length %d", + "was too short, with length %"NPY_INTP_FMT, outlen); return -1; } diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/datetime_strings.h python-numpy-1.14.5/numpy/core/src/multiarray/datetime_strings.h --- python-numpy-1.13.3/numpy/core/src/multiarray/datetime_strings.h 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/datetime_strings.h 2018-06-12 17:31:56.000000000 +0000 @@ -70,7 +70,7 @@ * string was too short). */ NPY_NO_EXPORT int -make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, +make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, npy_intp outlen, int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, NPY_CASTING casting); diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/descriptor.c python-numpy-1.14.5/numpy/core/src/multiarray/descriptor.c --- python-numpy-1.13.3/numpy/core/src/multiarray/descriptor.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/descriptor.c 2018-06-12 18:28:52.000000000 +0000 @@ -15,7 +15,9 @@ #include "_datetime.h" #include "common.h" +#include "templ_common.h" /* for npy_mul_with_overflow_intp */ #include "descriptor.h" +#include "alloc.h" /* * offset: A starting offset. @@ -196,7 +198,7 @@ * allows commas inside of [], for parameterized dtypes to use. */ sqbracket = 0; - for (i = 1; i < len; i++) { + for (i = 0; i < len; i++) { switch (type[i]) { case ',': if (sqbracket == 0) { @@ -258,26 +260,22 @@ res = _use_inherit(type, val, &errflag); if (res || errflag) { Py_DECREF(type); - if (res) { - return res; - } - else { - return NULL; - } + return res; } PyErr_Clear(); /* * We get here if res was NULL but errflag wasn't set * --- i.e. the conversion to a data-descr failed in _use_inherit */ - if (type->elsize == 0) { + if (PyDataType_ISUNSIZED(type)) { /* interpret next item as a typesize */ int itemsize = PyArray_PyIntAsInt(PyTuple_GET_ITEM(obj,1)); if (error_converting(itemsize)) { PyErr_SetString(PyExc_ValueError, "invalid itemsize in generic type tuple"); - goto fail; + Py_DECREF(type); + return NULL; } PyArray_DESCR_REPLACE(type); if (type->type_num == NPY_UNICODE) { @@ -286,6 +284,7 @@ else { type->elsize = itemsize; } + return type; } else if (type->metadata && (PyDict_Check(val) || PyDictProxy_Check(val))) { /* Assume it's a metadata dictionary */ @@ -293,6 +292,7 @@ Py_DECREF(type); return NULL; } + return type; } else { /* @@ -301,12 +301,12 @@ * a new fields attribute. */ PyArray_Dims shape = {NULL, -1}; - PyArray_Descr *newdescr; + PyArray_Descr *newdescr = NULL; npy_intp items; - int i; + int i, overflowed; + int nbytes; if (!(PyArray_IntpConverter(val, &shape)) || (shape.len > NPY_MAXDIMS)) { - PyDimMem_FREE(shape.ptr); PyErr_SetString(PyExc_ValueError, "invalid shape in fixed-type tuple."); goto fail; @@ -320,14 +320,9 @@ && PyNumber_Check(val)) || (shape.len == 0 && PyTuple_Check(val))) { - PyDimMem_FREE(shape.ptr); + npy_free_cache_dim_obj(shape); return type; } - newdescr = PyArray_DescrNewFromType(NPY_VOID); - if (newdescr == NULL) { - PyDimMem_FREE(shape.ptr); - goto fail; - } /* validate and set shape */ for (i=0; i < shape.len; i++) { @@ -335,34 +330,36 @@ PyErr_SetString(PyExc_ValueError, "invalid shape in fixed-type tuple: " "dimension smaller then zero."); - PyDimMem_FREE(shape.ptr); goto fail; } if (shape.ptr[i] > NPY_MAX_INT) { PyErr_SetString(PyExc_ValueError, "invalid shape in fixed-type tuple: " "dimension does not fit into a C int."); - PyDimMem_FREE(shape.ptr); goto fail; } } items = PyArray_OverflowMultiplyList(shape.ptr, shape.len); - if ((items < 0) || (items > (NPY_MAX_INT / type->elsize))) { + if (items < 0 || items > NPY_MAX_INT) { + overflowed = 1; + } + else { + overflowed = npy_mul_with_overflow_int( + &nbytes, type->elsize, (int) items); + } + if (overflowed) { PyErr_SetString(PyExc_ValueError, "invalid shape in fixed-type tuple: dtype size in " "bytes must fit into a C int."); - PyDimMem_FREE(shape.ptr); goto fail; } - newdescr->elsize = type->elsize * items; - if (newdescr->elsize == -1) { - PyDimMem_FREE(shape.ptr); + newdescr = PyArray_DescrNewFromType(NPY_VOID); + if (newdescr == NULL) { goto fail; } - + newdescr->elsize = nbytes; newdescr->subarray = PyArray_malloc(sizeof(PyArray_ArrayDescr)); if (newdescr->subarray == NULL) { - Py_DECREF(newdescr); PyErr_NoMemory(); goto fail; } @@ -381,7 +378,6 @@ */ newdescr->subarray->shape = PyTuple_New(shape.len); if (newdescr->subarray->shape == NULL) { - PyDimMem_FREE(shape.ptr); goto fail; } for (i=0; i < shape.len; i++) { @@ -389,21 +385,19 @@ PyInt_FromLong((long)shape.ptr[i])); if (PyTuple_GET_ITEM(newdescr->subarray->shape, i) == NULL) { - Py_DECREF(newdescr->subarray->shape); - newdescr->subarray->shape = NULL; - PyDimMem_FREE(shape.ptr); goto fail; } } - PyDimMem_FREE(shape.ptr); - type = newdescr; - } - return type; + npy_free_cache_dim_obj(shape); + return newdescr; - fail: - Py_XDECREF(type); - return NULL; + fail: + Py_XDECREF(type); + Py_XDECREF(newdescr); + npy_free_cache_dim_obj(shape); + return NULL; + } } /* @@ -518,11 +512,7 @@ } if ((PyDict_GetItem(fields, name) != NULL) || (title -#if defined(NPY_PY3K) - && PyUString_Check(title) -#else - && (PyUString_Check(title) || PyUnicode_Check(title)) -#endif + && PyBaseString_Check(title) && (PyDict_GetItem(fields, title) != NULL))) { #if defined(NPY_PY3K) name = PyUnicode_AsUTF8String(name); @@ -557,11 +547,7 @@ Py_INCREF(title); PyTuple_SET_ITEM(tup, 2, title); PyDict_SetItem(fields, name, tup); -#if defined(NPY_PY3K) - if (PyUString_Check(title)) { -#else - if (PyUString_Check(title) || PyUnicode_Check(title)) { -#endif + if (PyBaseString_Check(title)) { if (PyDict_GetItem(fields, title) != NULL) { PyErr_SetString(PyExc_ValueError, "title already used as a name or title."); @@ -852,15 +838,18 @@ if (new == NULL) { goto fail; } - if (new->elsize && new->elsize != conv->elsize) { + if (PyDataType_ISUNSIZED(new)) { + new->elsize = conv->elsize; + } + else if (new->elsize != conv->elsize) { PyErr_SetString(PyExc_ValueError, "mismatch in size of old and new data-descriptor"); goto fail; } - if (new->elsize && invalid_union_object_dtype(new, conv)) { + else if (invalid_union_object_dtype(new, conv)) { goto fail; } - new->elsize = conv->elsize; + if (PyDataType_HASFIELDS(conv)) { Py_XDECREF(new->fields); new->fields = conv->fields; @@ -1130,7 +1119,7 @@ goto fail; } offset = PyArray_PyIntAsInt(off); - if (offset == -1 && PyErr_Occurred()) { + if (error_converting(offset)) { Py_DECREF(off); Py_DECREF(tup); Py_DECREF(ind); @@ -1184,11 +1173,7 @@ Py_DECREF(tup); goto fail; } -#if defined(NPY_PY3K) - if (!PyUString_Check(name)) { -#else - if (!(PyUString_Check(name) || PyUnicode_Check(name))) { -#endif + if (!PyBaseString_Check(name)) { PyErr_SetString(PyExc_ValueError, "field names must be strings"); Py_DECREF(tup); @@ -1205,11 +1190,7 @@ PyDict_SetItem(fields, name, tup); Py_DECREF(name); if (len == 3) { -#if defined(NPY_PY3K) - if (PyUString_Check(title)) { -#else - if (PyUString_Check(title) || PyUnicode_Check(title)) { -#endif + if (PyBaseString_Check(title)) { if (PyDict_GetItem(fields, title) != NULL) { PyErr_SetString(PyExc_ValueError, "title already used as a name or title."); @@ -1269,7 +1250,7 @@ PyErr_Clear(); } else { itemsize = (int)PyArray_PyIntAsInt(tmp); - if (itemsize == -1 && PyErr_Occurred()) { + if (error_converting(itemsize)) { Py_DECREF(new); return NULL; } @@ -1652,7 +1633,7 @@ goto fail; } - if (((*at)->elsize == 0) && (elsize != 0)) { + if (PyDataType_ISUNSIZED(*at) && (*at)->elsize != elsize) { PyArray_DESCR_REPLACE(*at); (*at)->elsize = elsize; } @@ -1902,7 +1883,7 @@ len -= suffix_len; res = PyUString_FromStringAndSize(typeobj->tp_name+prefix_len, len); } - if (PyTypeNum_ISFLEXIBLE(self->type_num) && self->elsize != 0) { + if (PyTypeNum_ISFLEXIBLE(self->type_num) && !PyDataType_ISUNSIZED(self)) { PyObject *p; p = PyUString_FromFormat("%d", self->elsize * 8); PyUString_ConcatAndDel(&res, p); @@ -2886,7 +2867,8 @@ if (convert_datetime_metadata_tuple_to_datetime_metadata( PyTuple_GET_ITEM(metadata, 1), - &temp_dt_data) < 0) { + &temp_dt_data, + NPY_TRUE) < 0) { return NULL; } @@ -3118,7 +3100,7 @@ * * Returns 1 if it has a simple layout, 0 otherwise. */ -static int +NPY_NO_EXPORT int is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype) { PyObject *names, *fields, *key, *tup, *title; @@ -3358,8 +3340,8 @@ sub = arraydescr_struct_dict_str(dtype, includealignflag); } - /* If the data type has a non-void (subclassed) type, show it */ - if (dtype->type_num == NPY_VOID && dtype->typeobj != &PyVoidArrType_Type) { + /* If the data type isn't the default, void, show it */ + if (dtype->typeobj != &PyVoidArrType_Type) { /* * Note: We cannot get the type name from dtype->typeobj->tp_name * because its value depends on whether the type is dynamically or @@ -3556,7 +3538,7 @@ return PyUString_FromString("'O'"); case NPY_STRING: - if (dtype->elsize == 0) { + if (PyDataType_ISUNSIZED(dtype)) { return PyUString_FromString("'S'"); } else { @@ -3564,7 +3546,7 @@ } case NPY_UNICODE: - if (dtype->elsize == 0) { + if (PyDataType_ISUNSIZED(dtype)) { return PyUString_FromFormat("'%sU'", byteorder); } else { @@ -3573,7 +3555,7 @@ } case NPY_VOID: - if (dtype->elsize == 0) { + if (PyDataType_ISUNSIZED(dtype)) { return PyUString_FromString("'V'"); } else { @@ -3757,11 +3739,9 @@ return (PyObject *)new; } -static PyObject * -descr_subscript(PyArray_Descr *self, PyObject *op) +static int +_check_has_fields(PyArray_Descr *self) { - PyObject *retval; - if (!PyDataType_HASFIELDS(self)) { PyObject *astr = arraydescr_str(self); #if defined(NPY_PY3K) @@ -3772,74 +3752,88 @@ PyErr_Format(PyExc_KeyError, "There are no fields in dtype %s.", PyBytes_AsString(astr)); Py_DECREF(astr); - return NULL; + return -1; } -#if defined(NPY_PY3K) - if (PyUString_Check(op)) { -#else - if (PyUString_Check(op) || PyUnicode_Check(op)) { -#endif - PyObject *obj = PyDict_GetItem(self->fields, op); - PyObject *descr; - PyObject *s; - - if (obj == NULL) { - if (PyUnicode_Check(op)) { - s = PyUnicode_AsUnicodeEscapeString(op); - } - else { - s = op; - } - - PyErr_Format(PyExc_KeyError, - "Field named \'%s\' not found.", PyBytes_AsString(s)); - if (s != op) { - Py_DECREF(s); - } - return NULL; - } - descr = PyTuple_GET_ITEM(obj, 0); - Py_INCREF(descr); - retval = descr; + else { + return 0; } - else if (PyInt_Check(op)) { - PyObject *name; - int size = PyTuple_GET_SIZE(self->names); - int value = PyArray_PyIntAsInt(op); - int orig_value = value; +} - if (PyErr_Occurred()) { - return NULL; +static PyObject * +_subscript_by_name(PyArray_Descr *self, PyObject *op) +{ + PyObject *obj = PyDict_GetItem(self->fields, op); + PyObject *descr; + PyObject *s; + + if (obj == NULL) { + if (PyUnicode_Check(op)) { + s = PyUnicode_AsUnicodeEscapeString(op); } - if (value < 0) { - value += size; + else { + s = op; } - if (value < 0 || value >= size) { - PyErr_Format(PyExc_IndexError, - "Field index %d out of range.", orig_value); - return NULL; + + PyErr_Format(PyExc_KeyError, + "Field named \'%s\' not found.", PyBytes_AsString(s)); + if (s != op) { + Py_DECREF(s); } - name = PyTuple_GET_ITEM(self->names, value); - retval = descr_subscript(self, name); + return NULL; } - else { - PyErr_SetString(PyExc_ValueError, - "Field key must be an integer, string, or unicode."); + descr = PyTuple_GET_ITEM(obj, 0); + Py_INCREF(descr); + return descr; +} + +static PyObject * +_subscript_by_index(PyArray_Descr *self, Py_ssize_t i) +{ + PyObject *name = PySequence_GetItem(self->names, i); + if (name == NULL) { + PyErr_Format(PyExc_IndexError, + "Field index %zd out of range.", i); return NULL; } - return retval; + return _subscript_by_name(self, name); +} + +static PyObject * +descr_subscript(PyArray_Descr *self, PyObject *op) +{ + if (_check_has_fields(self) < 0) { + return NULL; + } + + if (PyBaseString_Check(op)) { + return _subscript_by_name(self, op); + } + else { + Py_ssize_t i = PyArray_PyIntAsIntp(op); + if (error_converting(i)) { + /* if converting to an int gives a type error, adjust the message */ + PyObject *err = PyErr_Occurred(); + if (PyErr_GivenExceptionMatches(err, PyExc_TypeError)) { + PyErr_SetString(PyExc_TypeError, + "Field key must be an integer, string, or unicode."); + } + return NULL; + } + return _subscript_by_index(self, i); + } } static PySequenceMethods descr_as_sequence = { - descr_length, - (binaryfunc)NULL, - descr_repeat, - NULL, NULL, - NULL, /* sq_ass_item */ - NULL, /* ssizessizeobjargproc sq_ass_slice */ - 0, /* sq_contains */ - 0, /* sq_inplace_concat */ - 0, /* sq_inplace_repeat */ + (lenfunc) descr_length, /* sq_length */ + (binaryfunc) NULL, /* sq_concat */ + (ssizeargfunc) descr_repeat, /* sq_repeat */ + (ssizeargfunc) NULL, /* sq_item */ + (ssizessizeargfunc) NULL, /* sq_slice */ + (ssizeobjargproc) NULL, /* sq_ass_item */ + (ssizessizeobjargproc) NULL, /* sq_ass_slice */ + (objobjproc) NULL, /* sq_contains */ + (binaryfunc) NULL, /* sq_inplace_concat */ + (ssizeargfunc) NULL, /* sq_inplace_repeat */ }; static PyMappingMethods descr_as_mapping = { diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/descriptor.h python-numpy-1.14.5/numpy/core/src/multiarray/descriptor.h --- python-numpy-1.13.3/numpy/core/src/multiarray/descriptor.h 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/descriptor.h 2018-06-12 17:31:56.000000000 +0000 @@ -10,6 +10,10 @@ NPY_NO_EXPORT PyArray_Descr * _arraydescr_fromobj(PyObject *obj); + +NPY_NO_EXPORT int +is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype); + /* * Creates a string repr of the dtype, excluding the 'dtype()' part * surrounding the object. This object may be a string, a list, or diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/dragon4.c python-numpy-1.14.5/numpy/core/src/multiarray/dragon4.c --- python-numpy-1.13.3/numpy/core/src/multiarray/dragon4.c 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/dragon4.c 2018-06-12 18:28:52.000000000 +0000 @@ -0,0 +1,2767 @@ +/* + * Copyright (c) 2014 Ryan Juckett + * http://www.ryanjuckett.com/ + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgment in the product documentation would be + * appreciated but is not required. + * + * 2. Altered source versions must be plainly marked as such, and must not be + * misrepresented as being the original software. + * + * 3. This notice may not be removed or altered from any source + * distribution. + */ + +/* + * This file contains a modified version of Ryan Juckett's Dragon4 + * implementation, which has been ported from C++ to C and which has + * modifications specific to printing floats in numpy. + */ + +#include "dragon4.h" +#include +#include +#include +#include + +#include + +#if 0 +#define DEBUG_ASSERT(stmnt) assert(stmnt) +#else +#define DEBUG_ASSERT(stmnt) do {} while(0) +#endif + +/* + * Get the log base 2 of a 32-bit unsigned integer. + * http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup + */ +static npy_uint32 +LogBase2_32(npy_uint32 val) +{ + static const npy_uint8 logTable[256] = + { + 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 + }; + + npy_uint32 temp; + + temp = val >> 24; + if (temp) { + return 24 + logTable[temp]; + } + + temp = val >> 16; + if (temp) { + return 16 + logTable[temp]; + } + + temp = val >> 8; + if (temp) { + return 8 + logTable[temp]; + } + + return logTable[val]; +} + +static npy_uint32 +LogBase2_64(npy_uint64 val) +{ + npy_uint64 temp; + + temp = val >> 32; + if (temp) { + return 32 + LogBase2_32((npy_uint32)temp); + } + + return LogBase2_32((npy_uint32)val); +} + + +/* + * Maximum number of 32 bit blocks needed in high precision arithmetic to print + * out 128 bit IEEE floating point values. 1023 chosen to be large enough for + * 128 bit floats, and BigInt is exactly 4kb (nice for page/cache?) + */ +#define c_BigInt_MaxBlocks 1023 + +/* + * This structure stores a high precision unsigned integer. It uses a buffer of + * 32 bit integer blocks along with a length. The lowest bits of the integer + * are stored at the start of the buffer and the length is set to the minimum + * value that contains the integer. Thus, there are never any zero blocks at + * the end of the buffer. + */ +typedef struct BigInt { + npy_uint32 length; + npy_uint32 blocks[c_BigInt_MaxBlocks]; +} BigInt; + +/* Copy integer */ +static void +BigInt_Copy(BigInt *dst, const BigInt *src) +{ + npy_uint32 length = src->length; + npy_uint32 * dstp = dst->blocks; + const npy_uint32 *srcp; + for (srcp = src->blocks; srcp != src->blocks + length; ++dstp, ++srcp) { + *dstp = *srcp; + } + dst->length = length; +} + +/* Basic type accessors */ +static void +BigInt_Set_uint64(BigInt *i, npy_uint64 val) +{ + if (val > 0xFFFFFFFF) { + i->blocks[0] = val & 0xFFFFFFFF; + i->blocks[1] = (val >> 32) & 0xFFFFFFFF; + i->length = 2; + } + else if (val != 0) { + i->blocks[0] = val & 0xFFFFFFFF; + i->length = 1; + } + else { + i->length = 0; + } +} + +static void +BigInt_Set_uint32(BigInt *i, npy_uint32 val) +{ + if (val != 0) { + i->blocks[0] = val; + i->length = (val != 0); + } + else { + i->length = 0; + } +} + +/* + * Returns 0 if (lhs = rhs), negative if (lhs < rhs), positive if (lhs > rhs) + */ +static npy_int32 +BigInt_Compare(const BigInt *lhs, const BigInt *rhs) +{ + int i; + + /* A bigger length implies a bigger number. */ + npy_int32 lengthDiff = lhs->length - rhs->length; + if (lengthDiff != 0) { + return lengthDiff; + } + + /* Compare blocks one by one from high to low. */ + for (i = lhs->length - 1; i >= 0; --i) { + if (lhs->blocks[i] == rhs->blocks[i]) { + continue; + } + else if (lhs->blocks[i] > rhs->blocks[i]) { + return 1; + } + else { + return -1; + } + } + + /* no blocks differed */ + return 0; +} + +/* result = lhs + rhs */ +static void +BigInt_Add(BigInt *result, const BigInt *lhs, const BigInt *rhs) +{ + /* determine which operand has the smaller length */ + const BigInt *large, *small; + npy_uint64 carry = 0; + const npy_uint32 *largeCur, *smallCur, *largeEnd, *smallEnd; + npy_uint32 *resultCur; + + if (lhs->length < rhs->length) { + small = lhs; + large = rhs; + } + else { + small = rhs; + large = lhs; + } + + /* The output will be at least as long as the largest input */ + result->length = large->length; + + /* Add each block and add carry the overflow to the next block */ + largeCur = large->blocks; + largeEnd = largeCur + large->length; + smallCur = small->blocks; + smallEnd = smallCur + small->length; + resultCur = result->blocks; + while (smallCur != smallEnd) { + npy_uint64 sum = carry + (npy_uint64)(*largeCur) + + (npy_uint64)(*smallCur); + carry = sum >> 32; + *resultCur = sum & 0xFFFFFFFF; + ++largeCur; + ++smallCur; + ++resultCur; + } + + /* Add the carry to any blocks that only exist in the large operand */ + while (largeCur != largeEnd) { + npy_uint64 sum = carry + (npy_uint64)(*largeCur); + carry = sum >> 32; + (*resultCur) = sum & 0xFFFFFFFF; + ++largeCur; + ++resultCur; + } + + /* If there's still a carry, append a new block */ + if (carry != 0) { + DEBUG_ASSERT(carry == 1); + DEBUG_ASSERT((npy_uint32)(resultCur - result->blocks) == + large->length && (large->length < c_BigInt_MaxBlocks)); + *resultCur = 1; + result->length = large->length + 1; + } + else { + result->length = large->length; + } +} + +/* + * result = lhs * rhs + */ +static void +BigInt_Multiply(BigInt *result, const BigInt *lhs, const BigInt *rhs) +{ + const BigInt *large; + const BigInt *small; + npy_uint32 maxResultLen; + npy_uint32 *cur, *end, *resultStart; + const npy_uint32 *smallCur; + + DEBUG_ASSERT(result != lhs && result != rhs); + + /* determine which operand has the smaller length */ + if (lhs->length < rhs->length) { + small = lhs; + large = rhs; + } + else { + small = rhs; + large = lhs; + } + + /* set the maximum possible result length */ + maxResultLen = large->length + small->length; + DEBUG_ASSERT(maxResultLen <= c_BigInt_MaxBlocks); + + /* clear the result data */ + for (cur = result->blocks, end = cur + maxResultLen; cur != end; ++cur) { + *cur = 0; + } + + /* perform standard long multiplication for each small block */ + resultStart = result->blocks; + for (smallCur = small->blocks; + smallCur != small->blocks + small->length; + ++smallCur, ++resultStart) { + /* + * if non-zero, multiply against all the large blocks and add into the + * result + */ + const npy_uint32 multiplier = *smallCur; + if (multiplier != 0) { + const npy_uint32 *largeCur = large->blocks; + npy_uint32 *resultCur = resultStart; + npy_uint64 carry = 0; + do { + npy_uint64 product = (*resultCur) + + (*largeCur)*(npy_uint64)multiplier + carry; + carry = product >> 32; + *resultCur = product & 0xFFFFFFFF; + ++largeCur; + ++resultCur; + } while(largeCur != large->blocks + large->length); + + DEBUG_ASSERT(resultCur < result->blocks + maxResultLen); + *resultCur = (npy_uint32)(carry & 0xFFFFFFFF); + } + } + + /* check if the terminating block has no set bits */ + if (maxResultLen > 0 && result->blocks[maxResultLen - 1] == 0) { + result->length = maxResultLen-1; + } + else { + result->length = maxResultLen; + } +} + +/* result = lhs * rhs */ +static void +BigInt_Multiply_int(BigInt *result, const BigInt *lhs, npy_uint32 rhs) +{ + /* perform long multiplication */ + npy_uint32 carry = 0; + npy_uint32 *resultCur = result->blocks; + const npy_uint32 *pLhsCur = lhs->blocks; + const npy_uint32 *pLhsEnd = lhs->blocks + lhs->length; + for ( ; pLhsCur != pLhsEnd; ++pLhsCur, ++resultCur) { + npy_uint64 product = (npy_uint64)(*pLhsCur) * rhs + carry; + *resultCur = (npy_uint32)(product & 0xFFFFFFFF); + carry = product >> 32; + } + + /* if there is a remaining carry, grow the array */ + if (carry != 0) { + /* grow the array */ + DEBUG_ASSERT(lhs->length + 1 <= c_BigInt_MaxBlocks); + *resultCur = (npy_uint32)carry; + result->length = lhs->length + 1; + } + else { + result->length = lhs->length; + } +} + +/* result = in * 2 */ +static void +BigInt_Multiply2(BigInt *result, const BigInt *in) +{ + /* shift all the blocks by one */ + npy_uint32 carry = 0; + + npy_uint32 *resultCur = result->blocks; + const npy_uint32 *pLhsCur = in->blocks; + const npy_uint32 *pLhsEnd = in->blocks + in->length; + for ( ; pLhsCur != pLhsEnd; ++pLhsCur, ++resultCur) { + npy_uint32 cur = *pLhsCur; + *resultCur = (cur << 1) | carry; + carry = cur >> 31; + } + + if (carry != 0) { + /* grow the array */ + DEBUG_ASSERT(in->length + 1 <= c_BigInt_MaxBlocks); + *resultCur = carry; + result->length = in->length + 1; + } + else { + result->length = in->length; + } +} + +/* result = result * 2 */ +static void +BigInt_Multiply2_inplace(BigInt *result) +{ + /* shift all the blocks by one */ + npy_uint32 carry = 0; + + npy_uint32 *cur = result->blocks; + npy_uint32 *end = result->blocks + result->length; + for ( ; cur != end; ++cur) { + npy_uint32 tmpcur = *cur; + *cur = (tmpcur << 1) | carry; + carry = tmpcur >> 31; + } + + if (carry != 0) { + /* grow the array */ + DEBUG_ASSERT(result->length + 1 <= c_BigInt_MaxBlocks); + *cur = carry; + ++result->length; + } +} + +/* result = result * 10 */ +static void +BigInt_Multiply10(BigInt *result) +{ + /* multiply all the blocks */ + npy_uint64 carry = 0; + + npy_uint32 *cur = result->blocks; + npy_uint32 *end = result->blocks + result->length; + for ( ; cur != end; ++cur) { + npy_uint64 product = (npy_uint64)(*cur) * 10ull + carry; + (*cur) = (npy_uint32)(product & 0xFFFFFFFF); + carry = product >> 32; + } + + if (carry != 0) { + /* grow the array */ + DEBUG_ASSERT(result->length + 1 <= c_BigInt_MaxBlocks); + *cur = (npy_uint32)carry; + ++result->length; + } +} + +static npy_uint32 g_PowerOf10_U32[] = +{ + 1, /* 10 ^ 0 */ + 10, /* 10 ^ 1 */ + 100, /* 10 ^ 2 */ + 1000, /* 10 ^ 3 */ + 10000, /* 10 ^ 4 */ + 100000, /* 10 ^ 5 */ + 1000000, /* 10 ^ 6 */ + 10000000, /* 10 ^ 7 */ +}; + +/* + * Note: This has a lot of wasted space in the big integer structures of the + * early table entries. It wouldn't be terribly hard to make the multiply + * function work on integer pointers with an array length instead of + * the BigInt struct which would allow us to store a minimal amount of + * data here. + */ +static BigInt g_PowerOf10_Big[] = +{ + /* 10 ^ 8 */ + { 1, { 100000000 } }, + /* 10 ^ 16 */ + { 2, { 0x6fc10000, 0x002386f2 } }, + /* 10 ^ 32 */ + { 4, { 0x00000000, 0x85acef81, 0x2d6d415b, 0x000004ee, } }, + /* 10 ^ 64 */ + { 7, { 0x00000000, 0x00000000, 0xbf6a1f01, 0x6e38ed64, 0xdaa797ed, + 0xe93ff9f4, 0x00184f03, } }, + /* 10 ^ 128 */ + { 14, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x2e953e01, + 0x03df9909, 0x0f1538fd, 0x2374e42f, 0xd3cff5ec, 0xc404dc08, + 0xbccdb0da, 0xa6337f19, 0xe91f2603, 0x0000024e, } }, + /* 10 ^ 256 */ + { 27, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x982e7c01, 0xbed3875b, + 0xd8d99f72, 0x12152f87, 0x6bde50c6, 0xcf4a6e70, 0xd595d80f, + 0x26b2716e, 0xadc666b0, 0x1d153624, 0x3c42d35a, 0x63ff540e, + 0xcc5573c0, 0x65f9ef17, 0x55bc28f2, 0x80dcc7f7, 0xf46eeddc, + 0x5fdcefce, 0x000553f7, } }, + /* 10 ^ 512 */ + { 54, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xfc6cf801, 0x77f27267, 0x8f9546dc, 0x5d96976f, + 0xb83a8a97, 0xc31e1ad9, 0x46c40513, 0x94e65747, 0xc88976c1, + 0x4475b579, 0x28f8733b, 0xaa1da1bf, 0x703ed321, 0x1e25cfea, + 0xb21a2f22, 0xbc51fb2e, 0x96e14f5d, 0xbfa3edac, 0x329c57ae, + 0xe7fc7153, 0xc3fc0695, 0x85a91924, 0xf95f635e, 0xb2908ee0, + 0x93abade4, 0x1366732a, 0x9449775c, 0x69be5b0e, 0x7343afac, + 0xb099bc81, 0x45a71d46, 0xa2699748, 0x8cb07303, 0x8a0b1f13, + 0x8cab8a97, 0xc1d238d9, 0x633415d4, 0x0000001c, } }, + /* 10 ^ 1024 */ + { 107, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x2919f001, 0xf55b2b72, 0x6e7c215b, + 0x1ec29f86, 0x991c4e87, 0x15c51a88, 0x140ac535, 0x4c7d1e1a, + 0xcc2cd819, 0x0ed1440e, 0x896634ee, 0x7de16cfb, 0x1e43f61f, + 0x9fce837d, 0x231d2b9c, 0x233e55c7, 0x65dc60d7, 0xf451218b, + 0x1c5cd134, 0xc9635986, 0x922bbb9f, 0xa7e89431, 0x9f9f2a07, + 0x62be695a, 0x8e1042c4, 0x045b7a74, 0x1abe1de3, 0x8ad822a5, + 0xba34c411, 0xd814b505, 0xbf3fdeb3, 0x8fc51a16, 0xb1b896bc, + 0xf56deeec, 0x31fb6bfd, 0xb6f4654b, 0x101a3616, 0x6b7595fb, + 0xdc1a47fe, 0x80d98089, 0x80bda5a5, 0x9a202882, 0x31eb0f66, + 0xfc8f1f90, 0x976a3310, 0xe26a7b7e, 0xdf68368a, 0x3ce3a0b8, + 0x8e4262ce, 0x75a351a2, 0x6cb0b6c9, 0x44597583, 0x31b5653f, + 0xc356e38a, 0x35faaba6, 0x0190fba0, 0x9fc4ed52, 0x88bc491b, + 0x1640114a, 0x005b8041, 0xf4f3235e, 0x1e8d4649, 0x36a8de06, + 0x73c55349, 0xa7e6bd2a, 0xc1a6970c, 0x47187094, 0xd2db49ef, + 0x926c3f5b, 0xae6209d4, 0x2d433949, 0x34f4a3c6, 0xd4305d94, + 0xd9d61a05, 0x00000325, } }, + /* 10 ^ 2048 */ + { 213, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x1333e001, + 0xe3096865, 0xb27d4d3f, 0x49e28dcf, 0xec2e4721, 0xee87e354, + 0xb6067584, 0x368b8abb, 0xa5e5a191, 0x2ed56d55, 0xfd827773, + 0xea50d142, 0x51b78db2, 0x98342c9e, 0xc850dabc, 0x866ed6f1, + 0x19342c12, 0x92794987, 0xd2f869c2, 0x66912e4a, 0x71c7fd8f, + 0x57a7842d, 0x235552eb, 0xfb7fedcc, 0xf3861ce0, 0x38209ce1, + 0x9713b449, 0x34c10134, 0x8c6c54de, 0xa7a8289c, 0x2dbb6643, + 0xe3cb64f3, 0x8074ff01, 0xe3892ee9, 0x10c17f94, 0xa8f16f92, + 0xa8281ed6, 0x967abbb3, 0x5a151440, 0x9952fbed, 0x13b41e44, + 0xafe609c3, 0xa2bca416, 0xf111821f, 0xfb1264b4, 0x91bac974, + 0xd6c7d6ab, 0x8e48ff35, 0x4419bd43, 0xc4a65665, 0x685e5510, + 0x33554c36, 0xab498697, 0x0dbd21fe, 0x3cfe491d, 0x982da466, + 0xcbea4ca7, 0x9e110c7b, 0x79c56b8a, 0x5fc5a047, 0x84d80e2e, + 0x1aa9f444, 0x730f203c, 0x6a57b1ab, 0xd752f7a6, 0x87a7dc62, + 0x944545ff, 0x40660460, 0x77c1a42f, 0xc9ac375d, 0xe866d7ef, + 0x744695f0, 0x81428c85, 0xa1fc6b96, 0xd7917c7b, 0x7bf03c19, + 0x5b33eb41, 0x5715f791, 0x8f6cae5f, 0xdb0708fd, 0xb125ac8e, + 0x785ce6b7, 0x56c6815b, 0x6f46eadb, 0x4eeebeee, 0x195355d8, + 0xa244de3c, 0x9d7389c0, 0x53761abd, 0xcf99d019, 0xde9ec24b, + 0x0d76ce39, 0x70beb181, 0x2e55ecee, 0xd5f86079, 0xf56d9d4b, + 0xfb8886fb, 0x13ef5a83, 0x408f43c5, 0x3f3389a4, 0xfad37943, + 0x58ccf45c, 0xf82df846, 0x415c7f3e, 0x2915e818, 0x8b3d5cf4, + 0x6a445f27, 0xf8dbb57a, 0xca8f0070, 0x8ad803ec, 0xb2e87c34, + 0x038f9245, 0xbedd8a6c, 0xc7c9dee0, 0x0eac7d56, 0x2ad3fa14, + 0xe0de0840, 0xf775677c, 0xf1bd0ad5, 0x92be221e, 0x87fa1fb9, + 0xce9d04a4, 0xd2c36fa9, 0x3f6f7024, 0xb028af62, 0x907855ee, + 0xd83e49d6, 0x4efac5dc, 0xe7151aab, 0x77cd8c6b, 0x0a753b7d, + 0x0af908b4, 0x8c983623, 0xe50f3027, 0x94222771, 0x1d08e2d6, + 0xf7e928e6, 0xf2ee5ca6, 0x1b61b93c, 0x11eb962b, 0x9648b21c, + 0xce2bcba1, 0x34f77154, 0x7bbebe30, 0xe526a319, 0x8ce329ac, + 0xde4a74d2, 0xb5dc53d5, 0x0009e8b3, } }, + /* 10 ^ 4096 */ + { 426, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x2a67c001, 0xd4724e8d, + 0x8efe7ae7, 0xf89a1e90, 0xef084117, 0x54e05154, 0x13b1bb51, + 0x506be829, 0xfb29b172, 0xe599574e, 0xf0da6146, 0x806c0ed3, + 0xb86ae5be, 0x45155e93, 0xc0591cc2, 0x7e1e7c34, 0x7c4823da, + 0x1d1f4cce, 0x9b8ba1e8, 0xd6bfdf75, 0xe341be10, 0xc2dfae78, + 0x016b67b2, 0x0f237f1a, 0x3dbeabcd, 0xaf6a2574, 0xcab3e6d7, + 0x142e0e80, 0x61959127, 0x2c234811, 0x87009701, 0xcb4bf982, + 0xf8169c84, 0x88052f8c, 0x68dde6d4, 0xbc131761, 0xff0b0905, + 0x54ab9c41, 0x7613b224, 0x1a1c304e, 0x3bfe167b, 0x441c2d47, + 0x4f6cea9c, 0x78f06181, 0xeb659fb8, 0x30c7ae41, 0x947e0d0e, + 0xa1ebcad7, 0xd97d9556, 0x2130504d, 0x1a8309cb, 0xf2acd507, + 0x3f8ec72a, 0xfd82373a, 0x95a842bc, 0x280f4d32, 0xf3618ac0, + 0x811a4f04, 0x6dc3a5b4, 0xd3967a1b, 0x15b8c898, 0xdcfe388f, + 0x454eb2a0, 0x8738b909, 0x10c4e996, 0x2bd9cc11, 0x3297cd0c, + 0x655fec30, 0xae0725b1, 0xf4090ee8, 0x037d19ee, 0x398c6fed, + 0x3b9af26b, 0xc994a450, 0xb5341743, 0x75a697b2, 0xac50b9c1, + 0x3ccb5b92, 0xffe06205, 0xa8329761, 0xdfea5242, 0xeb83cadb, + 0xe79dadf7, 0x3c20ee69, 0x1e0a6817, 0x7021b97a, 0x743074fa, + 0x176ca776, 0x77fb8af6, 0xeca19beb, 0x92baf1de, 0xaf63b712, + 0xde35c88b, 0xa4eb8f8c, 0xe137d5e9, 0x40b464a0, 0x87d1cde8, + 0x42923bbd, 0xcd8f62ff, 0x2e2690f3, 0x095edc16, 0x59c89f1b, + 0x1fa8fd5d, 0x5138753d, 0x390a2b29, 0x80152f18, 0x2dd8d925, + 0xf984d83e, 0x7a872e74, 0xc19e1faf, 0xed4d542d, 0xecf9b5d0, + 0x9462ea75, 0xc53c0adf, 0x0caea134, 0x37a2d439, 0xc8fa2e8a, + 0x2181327e, 0x6e7bb827, 0x2d240820, 0x50be10e0, 0x5893d4b8, + 0xab312bb9, 0x1f2b2322, 0x440b3f25, 0xbf627ede, 0x72dac789, + 0xb608b895, 0x78787e2a, 0x86deb3f0, 0x6fee7aab, 0xbb9373f4, + 0x27ecf57b, 0xf7d8b57e, 0xfca26a9f, 0x3d04e8d2, 0xc9df13cb, + 0x3172826a, 0xcd9e8d7c, 0xa8fcd8e0, 0xb2c39497, 0x307641d9, + 0x1cc939c1, 0x2608c4cf, 0xb6d1c7bf, 0x3d326a7e, 0xeeaf19e6, + 0x8e13e25f, 0xee63302b, 0x2dfe6d97, 0x25971d58, 0xe41d3cc4, + 0x0a80627c, 0xab8db59a, 0x9eea37c8, 0xe90afb77, 0x90ca19cf, + 0x9ee3352c, 0x3613c850, 0xfe78d682, 0x788f6e50, 0x5b060904, + 0xb71bd1a4, 0x3fecb534, 0xb32c450c, 0x20c33857, 0xa6e9cfda, + 0x0239f4ce, 0x48497187, 0xa19adb95, 0xb492ed8a, 0x95aca6a8, + 0x4dcd6cd9, 0xcf1b2350, 0xfbe8b12a, 0x1a67778c, 0x38eb3acc, + 0xc32da383, 0xfb126ab1, 0xa03f40a8, 0xed5bf546, 0xe9ce4724, + 0x4c4a74fd, 0x73a130d8, 0xd9960e2d, 0xa2ebd6c1, 0x94ab6feb, + 0x6f233b7c, 0x49126080, 0x8e7b9a73, 0x4b8c9091, 0xd298f999, + 0x35e836b5, 0xa96ddeff, 0x96119b31, 0x6b0dd9bc, 0xc6cc3f8d, + 0x282566fb, 0x72b882e7, 0xd6769f3b, 0xa674343d, 0x00fc509b, + 0xdcbf7789, 0xd6266a3f, 0xae9641fd, 0x4e89541b, 0x11953407, + 0x53400d03, 0x8e0dd75a, 0xe5b53345, 0x108f19ad, 0x108b89bc, + 0x41a4c954, 0xe03b2b63, 0x437b3d7f, 0x97aced8e, 0xcbd66670, + 0x2c5508c2, 0x650ebc69, 0x5c4f2ef0, 0x904ff6bf, 0x9985a2df, + 0x9faddd9e, 0x5ed8d239, 0x25585832, 0xe3e51cb9, 0x0ff4f1d4, + 0x56c02d9a, 0x8c4ef804, 0xc1a08a13, 0x13fd01c8, 0xe6d27671, + 0xa7c234f4, 0x9d0176cc, 0xd0d73df2, 0x4d8bfa89, 0x544f10cd, + 0x2b17e0b2, 0xb70a5c7d, 0xfd86fe49, 0xdf373f41, 0x214495bb, + 0x84e857fd, 0x00d313d5, 0x0496fcbe, 0xa4ba4744, 0xe8cac982, + 0xaec29e6e, 0x87ec7038, 0x7000a519, 0xaeee333b, 0xff66e42c, + 0x8afd6b25, 0x03b4f63b, 0xbd7991dc, 0x5ab8d9c7, 0x2ed4684e, + 0x48741a6c, 0xaf06940d, 0x2fdc6349, 0xb03d7ecd, 0xe974996f, + 0xac7867f9, 0x52ec8721, 0xbcdd9d4a, 0x8edd2d00, 0x3557de06, + 0x41c759f8, 0x3956d4b9, 0xa75409f2, 0x123cd8a1, 0xb6100fab, + 0x3e7b21e2, 0x2e8d623b, 0x92959da2, 0xbca35f77, 0x200c03a5, + 0x35fcb457, 0x1bb6c6e4, 0xf74eb928, 0x3d5d0b54, 0x87cc1d21, + 0x4964046f, 0x18ae4240, 0xd868b275, 0x8bd2b496, 0x1c5563f4, + 0xc234d8f5, 0xf868e970, 0xf9151fff, 0xae7be4a2, 0x271133ee, + 0xbb0fd922, 0x25254932, 0xa60a9fc0, 0x104bcd64, 0x30290145, + 0x00000062, } }, +}; + +/* result = 10^exponent */ +static void +BigInt_Pow10(BigInt *result, npy_uint32 exponent) +{ + /* create two temporary values to reduce large integer copy operations */ + BigInt temp1; + BigInt temp2; + BigInt *curTemp = &temp1; + BigInt *pNextTemp = &temp2; + npy_uint32 smallExponent; + npy_uint32 tableIdx = 0; + + /* make sure the exponent is within the bounds of the lookup table data */ + DEBUG_ASSERT(exponent < 8192); + + /* + * initialize the result by looking up a 32-bit power of 10 corresponding to + * the first 3 bits + */ + smallExponent = exponent & 0x7; + BigInt_Set_uint32(curTemp, g_PowerOf10_U32[smallExponent]); + + /* remove the low bits that we used for the 32-bit lookup table */ + exponent >>= 3; + + /* while there are remaining bits in the exponent to be processed */ + while (exponent != 0) { + /* if the current bit is set, multiply by this power of 10 */ + if (exponent & 1) { + BigInt *pSwap; + + /* multiply into the next temporary */ + BigInt_Multiply(pNextTemp, curTemp, &g_PowerOf10_Big[tableIdx]); + + /* swap to the next temporary */ + pSwap = curTemp; + curTemp = pNextTemp; + pNextTemp = pSwap; + } + + /* advance to the next bit */ + ++tableIdx; + exponent >>= 1; + } + + /* output the result */ + BigInt_Copy(result, curTemp); +} + +/* result = in * 10^exponent */ +static void +BigInt_MultiplyPow10(BigInt *result, const BigInt *in, npy_uint32 exponent) +{ + + /* create two temporary values to reduce large integer copy operations */ + BigInt temp1; + BigInt temp2; + BigInt *curTemp = &temp1; + BigInt *pNextTemp = &temp2; + npy_uint32 smallExponent; + npy_uint32 tableIdx = 0; + + /* make sure the exponent is within the bounds of the lookup table data */ + DEBUG_ASSERT(exponent < 8192); + + /* + * initialize the result by looking up a 32-bit power of 10 corresponding to + * the first 3 bits + */ + smallExponent = exponent & 0x7; + if (smallExponent != 0) { + BigInt_Multiply_int(curTemp, in, g_PowerOf10_U32[smallExponent]); + } + else { + BigInt_Copy(curTemp, in); + } + + /* remove the low bits that we used for the 32-bit lookup table */ + exponent >>= 3; + + /* while there are remaining bits in the exponent to be processed */ + while (exponent != 0) { + /* if the current bit is set, multiply by this power of 10 */ + if (exponent & 1) { + BigInt *pSwap; + + /* multiply into the next temporary */ + BigInt_Multiply(pNextTemp, curTemp, &g_PowerOf10_Big[tableIdx]); + + // swap to the next temporary + pSwap = curTemp; + curTemp = pNextTemp; + pNextTemp = pSwap; + } + + /* advance to the next bit */ + ++tableIdx; + exponent >>= 1; + } + + /* output the result */ + BigInt_Copy(result, curTemp); +} + +/* result = 2^exponent */ +static inline void +BigInt_Pow2(BigInt *result, npy_uint32 exponent) +{ + npy_uint32 bitIdx; + npy_uint32 blockIdx = exponent / 32; + npy_uint32 i; + + DEBUG_ASSERT(blockIdx < c_BigInt_MaxBlocks); + + for (i = 0; i <= blockIdx; ++i) { + result->blocks[i] = 0; + } + + result->length = blockIdx + 1; + + bitIdx = (exponent % 32); + result->blocks[blockIdx] |= (1 << bitIdx); +} + +/* + * This function will divide two large numbers under the assumption that the + * result is within the range [0,10) and the input numbers have been shifted + * to satisfy: + * - The highest block of the divisor is greater than or equal to 8 such that + * there is enough precision to make an accurate first guess at the quotient. + * - The highest block of the divisor is less than the maximum value on an + * unsigned 32-bit integer such that we can safely increment without overflow. + * - The dividend does not contain more blocks than the divisor such that we + * can estimate the quotient by dividing the equivalently placed high blocks. + * + * quotient = floor(dividend / divisor) + * remainder = dividend - quotient*divisor + * + * dividend is updated to be the remainder and the quotient is returned. + */ +static npy_uint32 +BigInt_DivideWithRemainder_MaxQuotient9(BigInt *dividend, const BigInt *divisor) +{ + npy_uint32 length, quotient; + const npy_uint32 *finalDivisorBlock; + npy_uint32 *finalDividendBlock; + + /* + * Check that the divisor has been correctly shifted into range and that it + * is not smaller than the dividend in length. + */ + DEBUG_ASSERT(!divisor->length == 0 && + divisor->blocks[divisor->length-1] >= 8 && + divisor->blocks[divisor->length-1] < 0xFFFFFFFF && + dividend->length <= divisor->length); + + /* + * If the dividend is smaller than the divisor, the quotient is zero and the + * divisor is already the remainder. + */ + length = divisor->length; + if (dividend->length < divisor->length) { + return 0; + } + + finalDivisorBlock = divisor->blocks + length - 1; + finalDividendBlock = dividend->blocks + length - 1; + + /* + * Compute an estimated quotient based on the high block value. This will + * either match the actual quotient or undershoot by one. + */ + quotient = *finalDividendBlock / (*finalDivisorBlock + 1); + DEBUG_ASSERT(quotient <= 9); + + /* Divide out the estimated quotient */ + if (quotient != 0) { + /* dividend = dividend - divisor*quotient */ + const npy_uint32 *divisorCur = divisor->blocks; + npy_uint32 *dividendCur = dividend->blocks; + + npy_uint64 borrow = 0; + npy_uint64 carry = 0; + do { + npy_uint64 difference, product; + + product = (npy_uint64)*divisorCur * (npy_uint64)quotient + carry; + carry = product >> 32; + + difference = (npy_uint64)*dividendCur + - (product & 0xFFFFFFFF) - borrow; + borrow = (difference >> 32) & 1; + + *dividendCur = difference & 0xFFFFFFFF; + + ++divisorCur; + ++dividendCur; + } while(divisorCur <= finalDivisorBlock); + + /* remove all leading zero blocks from dividend */ + while (length > 0 && dividend->blocks[length - 1] == 0) { + --length; + } + + dividend->length = length; + } + + /* + * If the dividend is still larger than the divisor, we overshot our + * estimate quotient. To correct, we increment the quotient and subtract one + * more divisor from the dividend. + */ + if (BigInt_Compare(dividend, divisor) >= 0) { + /* dividend = dividend - divisor */ + const npy_uint32 *divisorCur = divisor->blocks; + npy_uint32 *dividendCur = dividend->blocks; + npy_uint64 borrow = 0; + + ++quotient; + + do { + npy_uint64 difference = (npy_uint64)*dividendCur + - (npy_uint64)*divisorCur - borrow; + borrow = (difference >> 32) & 1; + + *dividendCur = difference & 0xFFFFFFFF; + + ++divisorCur; + ++dividendCur; + } while(divisorCur <= finalDivisorBlock); + + /* remove all leading zero blocks from dividend */ + while (length > 0 && dividend->blocks[length - 1] == 0) { + --length; + } + + dividend->length = length; + } + + return quotient; +} + +/* result = result << shift */ +static void +BigInt_ShiftLeft(BigInt *result, npy_uint32 shift) +{ + npy_uint32 shiftBlocks = shift / 32; + npy_uint32 shiftBits = shift % 32; + + /* process blocks high to low so that we can safely process in place */ + const npy_uint32 *pInBlocks = result->blocks; + npy_int32 inLength = result->length; + npy_uint32 *pInCur, *pOutCur; + + DEBUG_ASSERT(inLength + shiftBlocks < c_BigInt_MaxBlocks); + DEBUG_ASSERT(shift != 0); + + /* check if the shift is block aligned */ + if (shiftBits == 0) { + npy_uint32 i; + + /* copy blocks from high to low */ + for (pInCur = result->blocks + result->length, + pOutCur = pInCur + shiftBlocks; + pInCur >= pInBlocks; + --pInCur, --pOutCur) { + *pOutCur = *pInCur; + } + + /* zero the remaining low blocks */ + for (i = 0; i < shiftBlocks; ++i) { + result->blocks[i] = 0; + } + + result->length += shiftBlocks; + } + /* else we need to shift partial blocks */ + else { + npy_uint32 i; + npy_int32 inBlockIdx = inLength - 1; + npy_uint32 outBlockIdx = inLength + shiftBlocks; + + /* output the initial blocks */ + const npy_uint32 lowBitsShift = (32 - shiftBits); + npy_uint32 highBits = 0; + npy_uint32 block = result->blocks[inBlockIdx]; + npy_uint32 lowBits = block >> lowBitsShift; + + /* set the length to hold the shifted blocks */ + DEBUG_ASSERT(outBlockIdx < c_BigInt_MaxBlocks); + result->length = outBlockIdx + 1; + + while (inBlockIdx > 0) { + result->blocks[outBlockIdx] = highBits | lowBits; + highBits = block << shiftBits; + + --inBlockIdx; + --outBlockIdx; + + block = result->blocks[inBlockIdx]; + lowBits = block >> lowBitsShift; + } + + /* output the final blocks */ + DEBUG_ASSERT(outBlockIdx == shiftBlocks + 1); + result->blocks[outBlockIdx] = highBits | lowBits; + result->blocks[outBlockIdx-1] = block << shiftBits; + + /* zero the remaining low blocks */ + for (i = 0; i < shiftBlocks; ++i) { + result->blocks[i] = 0; + } + + /* check if the terminating block has no set bits */ + if (result->blocks[result->length - 1] == 0) { + --result->length; + } + } +} + + +/* + * This is an implementation the Dragon4 algorithm to convert a binary number in + * floating point format to a decimal number in string format. The function + * returns the number of digits written to the output buffer and the output is + * not NUL terminated. + * + * The floating point input value is (mantissa * 2^exponent). + * + * See the following papers for more information on the algorithm: + * "How to Print Floating-Point Numbers Accurately" + * Steele and White + * http://kurtstephens.com/files/p372-steele.pdf + * "Printing Floating-Point Numbers Quickly and Accurately" + * Burger and Dybvig + * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.72.4656 + * + * This implementation is essentially a port of the "Figure 3" Scheme code from + * Burger and Dybvig, but with the following additional differences: + * 1. Instead of finding the highest k such that high < B**k, we search + * for the one where v < B**k. This has a downside that if a power + * of 10 exists between v and high, we will output a 9 instead of a 1 as + * first digit, violating the "no-carry" guarantee of the paper. This is + * accounted for in a new post-processing loop which implements a carry + * operation. The upside is one less BigInt multiplication. + * 2. The approximate value of k found is offset by a different amount + * (0.69), in order to hit the "fast" branch more often. This is + * extensively described on Ryan Juckett's website. + * 3. The fixed precision mode is much simpler than proposed in the paper. + * It simply outputs digits by repeatedly dividing by 10. The new "carry" + * loop at the end rounds this output nicely. + * There is also some new code to account for details of the BigInt + * implementation, which are not present in the paper since it does not specify + * details of the integer calculations. + * + * There is some more documentation of these changes on Ryan Juckett's website + * at http://www.ryanjuckett.com/programming/printing-floating-point-numbers/ + * + * Ryan Juckett's implementation did not implement "IEEE unbiased rounding", + * except in the last digit. This has been added back, following the Burger & + * Dybvig code, using the isEven variable. + * + * Arguments: + * * mantissa - value significand + * * exponent - value exponent in base 2 + * * mantissaBit - index of the highest set mantissa bit + * * hasUnequalMargins - is the high margin twice as large as the low margin + * * cutoffMode - how to interpret cutoffNumber: fractional or total digits? + * * cutoffNumber - cut off printing after this many digits. -1 for no cutoff + * * pOutBuffer - buffer to output into + * * bufferSize - maximum characters that can be printed to pOutBuffer + * * pOutExponent - the base 10 exponent of the first digit + */ +static npy_uint32 +Dragon4(const npy_uint64 mantissa, const npy_int32 exponent, + const npy_uint32 mantissaBit, const npy_bool hasUnequalMargins, + const DigitMode digitMode, const CutoffMode cutoffMode, + npy_int32 cutoffNumber, char *pOutBuffer, + npy_uint32 bufferSize, npy_int32 *pOutExponent) +{ + char *curDigit = pOutBuffer; + + /* + * We compute values in integer format by rescaling as + * mantissa = scaledValue / scale + * marginLow = scaledMarginLow / scale + * marginHigh = scaledMarginHigh / scale + * Here, marginLow and marginHigh represent 1/2 of the distance to the next + * floating point value above/below the mantissa. + * + * scaledMarginHigh is a pointer so that it can point to scaledMarginLow in + * the case they must be equal to each other, otherwise it will point to + * optionalMarginHigh. + */ + BigInt scale; + BigInt scaledValue; + BigInt scaledMarginLow; + BigInt *scaledMarginHigh; + BigInt optionalMarginHigh; + + const npy_float64 log10_2 = 0.30102999566398119521373889472449; + npy_int32 digitExponent, cutoffExponent, hiBlock; + npy_uint32 outputDigit; /* current digit being output */ + npy_uint32 outputLen; + npy_bool isEven = (mantissa % 2) == 0; + npy_int32 cmp; + + /* values used to determine how to round */ + npy_bool low, high, roundDown; + + DEBUG_ASSERT(bufferSize > 0); + + /* if the mantissa is zero, the value is zero regardless of the exponent */ + if (mantissa == 0) { + *curDigit = '0'; + *pOutExponent = 0; + return 1; + } + + if (hasUnequalMargins) { + /* if we have no fractional component */ + if (exponent > 0) { + /* + * 1) Expand the input value by multiplying out the mantissa and + * exponent. This represents the input value in its whole number + * representation. + * 2) Apply an additional scale of 2 such that later comparisons + * against the margin values are simplified. + * 3) Set the margin value to the lowest mantissa bit's scale. + */ + + /* scaledValue = 2 * 2 * mantissa*2^exponent */ + BigInt_Set_uint64(&scaledValue, mantissa); + BigInt_ShiftLeft(&scaledValue, exponent + 2); + + /* scale = 2 * 2 * 1 */ + BigInt_Set_uint32(&scale, 4); + + /* scaledMarginLow = 2 * 2^(exponent-1) */ + BigInt_Pow2(&scaledMarginLow, exponent); + + /* scaledMarginHigh = 2 * 2 * 2^(exponent-1) */ + BigInt_Pow2(&optionalMarginHigh, exponent + 1); + } + /* else we have a fractional exponent */ + else { + /* + * In order to track the mantissa data as an integer, we store it as + * is with a large scale + */ + + /* scaledValue = 2 * 2 * mantissa */ + BigInt_Set_uint64(&scaledValue, mantissa); + BigInt_ShiftLeft(&scaledValue, 2); + + /* scale = 2 * 2 * 2^(-exponent) */ + BigInt_Pow2(&scale, -exponent + 2); + + /* scaledMarginLow = 2 * 2^(-1) */ + BigInt_Set_uint32(&scaledMarginLow, 1); + + /* scaledMarginHigh = 2 * 2 * 2^(-1) */ + BigInt_Set_uint32(&optionalMarginHigh, 2); + } + + /* the high and low margins are different */ + scaledMarginHigh = &optionalMarginHigh; + } + else { + /* if we have no fractional component */ + if (exponent > 0) { + /* scaledValue = 2 * mantissa*2^exponent */ + BigInt_Set_uint64(&scaledValue, mantissa); + BigInt_ShiftLeft(&scaledValue, exponent + 1); + + /* scale = 2 * 1 */ + BigInt_Set_uint32(&scale, 2); + + /* scaledMarginLow = 2 * 2^(exponent-1) */ + BigInt_Pow2(&scaledMarginLow, exponent); + } + /* else we have a fractional exponent */ + else { + /* + * In order to track the mantissa data as an integer, we store it as + * is with a large scale + */ + + /* scaledValue = 2 * mantissa */ + BigInt_Set_uint64(&scaledValue, mantissa); + BigInt_ShiftLeft(&scaledValue, 1); + + /* scale = 2 * 2^(-exponent) */ + BigInt_Pow2(&scale, -exponent + 1); + + /* scaledMarginLow = 2 * 2^(-1) */ + BigInt_Set_uint32(&scaledMarginLow, 1); + } + + /* the high and low margins are equal */ + scaledMarginHigh = &scaledMarginLow; + } + + /* + * Compute an estimate for digitExponent that will be correct or undershoot + * by one. This optimization is based on the paper "Printing Floating-Point + * Numbers Quickly and Accurately" by Burger and Dybvig + * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.72.4656 + * We perform an additional subtraction of 0.69 to increase the frequency of + * a failed estimate because that lets us take a faster branch in the code. + * 0.69 is chosen because 0.69 + log10(2) is less than one by a reasonable + * epsilon that will account for any floating point error. + * + * We want to set digitExponent to floor(log10(v)) + 1 + * v = mantissa*2^exponent + * log2(v) = log2(mantissa) + exponent; + * log10(v) = log2(v) * log10(2) + * floor(log2(v)) = mantissaBit + exponent; + * log10(v) - log10(2) < (mantissaBit + exponent) * log10(2) <= log10(v) + * log10(v) < (mantissaBit + exponent) * log10(2) + log10(2) + * <= log10(v) + log10(2) + * floor(log10(v)) < ceil((mantissaBit + exponent) * log10(2)) + * <= floor(log10(v)) + 1 + */ + digitExponent = (npy_int32)( + ceil((npy_float64)((npy_int32)mantissaBit + exponent) * log10_2 - 0.69)); + + /* + * if the digit exponent is smaller than the smallest desired digit for + * fractional cutoff, pull the digit back into legal range at which point we + * will round to the appropriate value. Note that while our value for + * digitExponent is still an estimate, this is safe because it only + * increases the number. This will either correct digitExponent to an + * accurate value or it will clamp it above the accurate value. + */ + if (cutoffNumber >= 0 && cutoffMode == CutoffMode_FractionLength && + digitExponent <= -cutoffNumber) { + digitExponent = -cutoffNumber + 1; + } + + + /* Divide value by 10^digitExponent. */ + if (digitExponent > 0) { + /* A positive exponent creates a division so we multiply the scale. */ + BigInt temp; + BigInt_MultiplyPow10(&temp, &scale, digitExponent); + BigInt_Copy(&scale, &temp); + } + else if (digitExponent < 0) { + /* + * A negative exponent creates a multiplication so we multiply up the + * scaledValue, scaledMarginLow and scaledMarginHigh. + */ + BigInt pow10, temp; + BigInt_Pow10(&pow10, -digitExponent); + + BigInt_Multiply(&temp, &scaledValue, &pow10); + BigInt_Copy(&scaledValue, &temp); + + BigInt_Multiply(&temp, &scaledMarginLow, &pow10); + BigInt_Copy(&scaledMarginLow, &temp); + + if (scaledMarginHigh != &scaledMarginLow) { + BigInt_Multiply2(scaledMarginHigh, &scaledMarginLow); + } + } + + /* If (value >= 1), our estimate for digitExponent was too low */ + if (BigInt_Compare(&scaledValue, &scale) >= 0) { + /* + * The exponent estimate was incorrect. + * Increment the exponent and don't perform the premultiply needed + * for the first loop iteration. + */ + digitExponent = digitExponent + 1; + } + else { + /* + * The exponent estimate was correct. + * Multiply larger by the output base to prepare for the first loop + * iteration. + */ + BigInt_Multiply10(&scaledValue); + BigInt_Multiply10(&scaledMarginLow); + if (scaledMarginHigh != &scaledMarginLow) { + BigInt_Multiply2(scaledMarginHigh, &scaledMarginLow); + } + } + + /* + * Compute the cutoff exponent (the exponent of the final digit to print). + * Default to the maximum size of the output buffer. + */ + cutoffExponent = digitExponent - bufferSize; + if (cutoffNumber >= 0) { + npy_int32 desiredCutoffExponent; + + if (cutoffMode == CutoffMode_TotalLength) { + desiredCutoffExponent = digitExponent - cutoffNumber; + if (desiredCutoffExponent > cutoffExponent) { + cutoffExponent = desiredCutoffExponent; + } + } + /* Otherwise it's CutoffMode_FractionLength. Print cutoffNumber digits + * past the decimal point or until we reach the buffer size + */ + else { + desiredCutoffExponent = -cutoffNumber; + if (desiredCutoffExponent > cutoffExponent) { + cutoffExponent = desiredCutoffExponent; + } + } + } + + /* Output the exponent of the first digit we will print */ + *pOutExponent = digitExponent-1; + + /* + * In preparation for calling BigInt_DivideWithRemainder_MaxQuotient9(), we + * need to scale up our values such that the highest block of the + * denominator is greater than or equal to 8. We also need to guarantee that + * the numerator can never have a length greater than the denominator after + * each loop iteration. This requires the highest block of the denominator + * to be less than or equal to 429496729 which is the highest number that + * can be multiplied by 10 without overflowing to a new block. + */ + DEBUG_ASSERT(scale.length > 0); + hiBlock = scale.blocks[scale.length - 1]; + if (hiBlock < 8 || hiBlock > 429496729) { + npy_uint32 hiBlockLog2, shift; + + /* + * Perform a bit shift on all values to get the highest block of the + * denominator into the range [8,429496729]. We are more likely to make + * accurate quotient estimations in + * BigInt_DivideWithRemainder_MaxQuotient9() with higher denominator + * values so we shift the denominator to place the highest bit at index + * 27 of the highest block. This is safe because (2^28 - 1) = 268435455 + * which is less than 429496729. This means that all values with a + * highest bit at index 27 are within range. + */ + hiBlockLog2 = LogBase2_32(hiBlock); + DEBUG_ASSERT(hiBlockLog2 < 3 || hiBlockLog2 > 27); + shift = (32 + 27 - hiBlockLog2) % 32; + + BigInt_ShiftLeft(&scale, shift); + BigInt_ShiftLeft(&scaledValue, shift); + BigInt_ShiftLeft(&scaledMarginLow, shift); + if (scaledMarginHigh != &scaledMarginLow) { + BigInt_Multiply2(scaledMarginHigh, &scaledMarginLow); + } + } + + if (digitMode == DigitMode_Unique) { + /* + * For the unique cutoff mode, we will try to print until we have + * reached a level of precision that uniquely distinguishes this value + * from its neighbors. If we run out of space in the output buffer, we + * terminate early. + */ + for (;;) { + BigInt scaledValueHigh; + + digitExponent = digitExponent-1; + + /* divide out the scale to extract the digit */ + outputDigit = + BigInt_DivideWithRemainder_MaxQuotient9(&scaledValue, &scale); + DEBUG_ASSERT(outputDigit < 10); + + /* update the high end of the value */ + BigInt_Add(&scaledValueHigh, &scaledValue, scaledMarginHigh); + + /* + * stop looping if we are far enough away from our neighboring + * values or if we have reached the cutoff digit + */ + cmp = BigInt_Compare(&scaledValue, &scaledMarginLow); + low = isEven ? (cmp <= 0) : (cmp < 0); + cmp = BigInt_Compare(&scaledValueHigh, &scale); + high = isEven ? (cmp >= 0) : (cmp > 0); + if (low | high | (digitExponent == cutoffExponent)) + break; + + /* store the output digit */ + *curDigit = (char)('0' + outputDigit); + ++curDigit; + + /* multiply larger by the output base */ + BigInt_Multiply10(&scaledValue); + BigInt_Multiply10(&scaledMarginLow); + if (scaledMarginHigh != &scaledMarginLow) { + BigInt_Multiply2(scaledMarginHigh, &scaledMarginLow); + } + } + } + else { + /* + * For exact digit mode, we will try to print until we + * have exhausted all precision (i.e. all remaining digits are zeros) or + * until we reach the desired cutoff digit. + */ + low = NPY_FALSE; + high = NPY_FALSE; + + for (;;) { + digitExponent = digitExponent-1; + + /* divide out the scale to extract the digit */ + outputDigit = + BigInt_DivideWithRemainder_MaxQuotient9(&scaledValue, &scale); + DEBUG_ASSERT(outputDigit < 10); + + if ((scaledValue.length == 0) | (digitExponent == cutoffExponent)) { + break; + } + + /* store the output digit */ + *curDigit = (char)('0' + outputDigit); + ++curDigit; + + /* multiply larger by the output base */ + BigInt_Multiply10(&scaledValue); + } + } + + /* default to rounding down the final digit if value got too close to 0 */ + roundDown = low; + + /* if it is legal to round up and down */ + if (low == high) { + npy_int32 compare; + + /* + * round to the closest digit by comparing value with 0.5. To do this we + * need to convert the inequality to large integer values. + * compare( value, 0.5 ) + * compare( scale * value, scale * 0.5 ) + * compare( 2 * scale * value, scale ) + */ + BigInt_Multiply2_inplace(&scaledValue); + compare = BigInt_Compare(&scaledValue, &scale); + roundDown = compare < 0; + + /* + * if we are directly in the middle, round towards the even digit (i.e. + * IEEE rounding rules) + */ + if (compare == 0) { + roundDown = (outputDigit & 1) == 0; + } + } + + /* print the rounded digit */ + if (roundDown) { + *curDigit = (char)('0' + outputDigit); + ++curDigit; + } + else { + /* handle rounding up */ + if (outputDigit == 9) { + /* find the first non-nine prior digit */ + for (;;) { + /* if we are at the first digit */ + if (curDigit == pOutBuffer) { + /* output 1 at the next highest exponent */ + *curDigit = '1'; + ++curDigit; + *pOutExponent += 1; + break; + } + + --curDigit; + if (*curDigit != '9') { + /* increment the digit */ + *curDigit += 1; + ++curDigit; + break; + } + } + } + else { + /* values in the range [0,8] can perform a simple round up */ + *curDigit = (char)('0' + outputDigit + 1); + ++curDigit; + } + } + + /* return the number of digits output */ + outputLen = (npy_uint32)(curDigit - pOutBuffer); + DEBUG_ASSERT(outputLen <= bufferSize); + return outputLen; +} + + +/* + * Helper union to decompose a 16-bit IEEE float. + * sign: 1 bit + * exponent: 5 bits + * mantissa: 10 bits + */ +typedef union FloatUnion16 +{ + npy_uint16 integer; +} FloatUnion16; + +npy_bool IsNegative_F16(FloatUnion16 *v) { return (v->integer >> 15) != 0; } +npy_uint32 GetExponent_F16(FloatUnion16 *v) { return (v->integer >> 10) & 0x1F;} +npy_uint32 GetMantissa_F16(FloatUnion16 *v) { return v->integer & 0x3FF; } + + +/* + * Helper union to decompose a 32-bit IEEE float. + * sign: 1 bit + * exponent: 8 bits + * mantissa: 23 bits + */ +typedef union FloatUnion32 +{ + npy_float32 floatingPoint; + npy_uint32 integer; +} FloatUnion32; + +npy_bool IsNegative_F32(FloatUnion32 *v) { return (v->integer >> 31) != 0; } +npy_uint32 GetExponent_F32(FloatUnion32 *v) { return (v->integer >> 23) & 0xFF;} +npy_uint32 GetMantissa_F32(FloatUnion32 *v) { return v->integer & 0x7FFFFF; } + +/* + * Helper union to decompose a 64-bit IEEE float. + * sign: 1 bit + * exponent: 11 bits + * mantissa: 52 bits + */ +typedef union FloatUnion64 +{ + npy_float64 floatingPoint; + npy_uint64 integer; +} FloatUnion64; +npy_bool IsNegative_F64(FloatUnion64 *v) { return (v->integer >> 63) != 0; } +npy_uint32 GetExponent_F64(FloatUnion64 *v) { return (v->integer >> 52) & 0x7FF; } +npy_uint64 GetMantissa_F64(FloatUnion64 *v) { return v->integer & 0xFFFFFFFFFFFFFull; } + +/* + * Helper unions and datatype to decompose a 80-bit IEEE float + * sign: 1 bit, second u64 + * exponent: 15 bits, second u64 + * intbit 1 bit, first u64 + * mantissa: 63 bits, first u64 + */ + +/* + * Since systems have different types of long doubles, and may not necessarily + * have a 128-byte format we can use to pass values around, here we create + * our own 128-bit storage type for convenience. + */ +typedef struct FloatVal128 { + npy_uint64 integer[2]; +} FloatVal128; +npy_bool IsNegative_F128(FloatVal128 *v) { + return ((v->integer[1] >> 15) & 0x1) != 0; +} +npy_uint32 GetExponent_F128(FloatVal128 *v) { return v->integer[1] & 0x7FFF; } +npy_uint64 GetMantissa_F128(FloatVal128 *v) { + return v->integer[0] & 0x7FFFFFFFFFFFFFFFull; +} + +/* + * then for each different definition of long double, we create a union to + * unpack the float data safely. We can then copy these integers to a + * FloatVal128. + */ +#ifdef NPY_FLOAT128 +typedef union FloatUnion128 +{ + npy_float128 floatingPoint; + struct { + npy_uint64 a; + npy_uint16 b; + } integer; +} FloatUnion128; +#endif + +#ifdef NPY_FLOAT96 +typedef union FloatUnion96 +{ + npy_float96 floatingPoint; + struct { + npy_uint64 a; + npy_uint32 b; + } integer; +} FloatUnion96; +#endif + +#ifdef NPY_FLOAT80 +typedef union FloatUnion80 +{ + npy_float80 floatingPoint; + struct { + npy_uint64 a; + npy_uint16 b; + } integer; +} FloatUnion80; +#endif + + +/* + * The main changes above this point, relative to Ryan Juckett's code, are: + * 1. fixed overflow problems when mantissa was 64 bits (in float128 types), + * by replacing multiplication by 2 or 4 by BigInt_ShiftLeft calls. + * 2. Increased c_BigInt_MaxBlocks + * 3. Added more entries to the g_PowerOf10_Big table + * 4. Added unbiased rounding calculation with isEven + * + * Below this point, the FormatPositional and FormatScientific functions have + * been more significantly rewritten. The Dragon4_PrintFloat16 and + * Dragon4_PrintFloat128 functions are new, and were adapted from the 64 and 32 + * bit versions. The python interfacing functions (in the header) are new. + */ + + +/* + * Outputs the positive number with positional notation: ddddd.dddd + * The output is always NUL terminated and the output length (not including the + * NUL) is returned. + * Arguments: + * buffer - buffer to output into + * bufferSize - maximum characters that can be printed to buffer + * mantissa - value significand + * exponent - value exponent in base 2 + * signbit - value of the sign position. Should be '+', '-' or '' + * mantissaBit - index of the highest set mantissa bit + * hasUnequalMargins - is the high margin twice as large as the low margin + * precision - Negative prints as many digits as are needed for a unique + * number. Positive specifies the maximum number of significant + * digits to print past the decimal point. + * trim_mode - how to treat trailing 0s and '.'. See TrimMode comments. + * digits_left - pad characters to left of decimal point. -1 for no padding + * digits_right - pad characters to right of decimal point. -1 for no padding + * padding adds whitespace until there are the specified + * number characters to sides of decimal point. Applies after + * trim_mode characters were removed. If digits_right is + * positive and the decimal point was trimmed, decimal point + * will be replaced by a whitespace character. + */ +static npy_uint32 +FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, + npy_int32 exponent, char signbit, npy_uint32 mantissaBit, + npy_bool hasUnequalMargins, DigitMode digit_mode, + CutoffMode cutoff_mode, npy_int32 precision, + TrimMode trim_mode, npy_int32 digits_left, + npy_int32 digits_right) +{ + npy_int32 printExponent; + npy_int32 numDigits, numWholeDigits=0, has_sign=0; + + npy_int32 maxPrintLen = (npy_int32)bufferSize - 1, pos = 0; + + /* track the # of digits past the decimal point that have been printed */ + npy_int32 numFractionDigits = 0, desiredFractionalDigits; + + DEBUG_ASSERT(bufferSize > 0); + + if (digit_mode != DigitMode_Unique) { + DEBUG_ASSERT(precision >= 0); + } + + if (signbit == '+' && pos < maxPrintLen) { + buffer[pos++] = '+'; + has_sign = 1; + } + else if (signbit == '-' && pos < maxPrintLen) { + buffer[pos++] = '-'; + has_sign = 1; + } + + numDigits = Dragon4(mantissa, exponent, mantissaBit, hasUnequalMargins, + digit_mode, cutoff_mode, precision, buffer + has_sign, + maxPrintLen - has_sign, &printExponent); + + DEBUG_ASSERT(numDigits > 0); + DEBUG_ASSERT(numDigits <= bufferSize); + + /* if output has a whole number */ + if (printExponent >= 0) { + /* leave the whole number at the start of the buffer */ + numWholeDigits = printExponent+1; + if (numDigits <= numWholeDigits) { + npy_int32 count = numWholeDigits - numDigits; + pos += numDigits; + + /* don't overflow the buffer */ + if (pos + count > maxPrintLen) { + count = maxPrintLen - pos; + } + + /* add trailing zeros up to the decimal point */ + numDigits += count; + for ( ; count > 0; count--) { + buffer[pos++] = '0'; + } + } + /* insert the decimal point prior to the fraction */ + else if (numDigits > numWholeDigits) { + npy_int32 maxFractionDigits; + + numFractionDigits = numDigits - numWholeDigits; + maxFractionDigits = maxPrintLen - numWholeDigits - 1 - pos; + if (numFractionDigits > maxFractionDigits) { + numFractionDigits = maxFractionDigits; + } + + memmove(buffer + pos + numWholeDigits + 1, + buffer + pos + numWholeDigits, numFractionDigits); + pos += numWholeDigits; + buffer[pos] = '.'; + numDigits = numWholeDigits + 1 + numFractionDigits; + pos += 1 + numFractionDigits; + } + } + else { + /* shift out the fraction to make room for the leading zeros */ + npy_int32 numFractionZeros = 0; + if (pos + 2 < maxPrintLen) { + npy_int32 maxFractionZeros, digitsStartIdx, maxFractionDigits, i; + + maxFractionZeros = maxPrintLen - 2 - pos; + numFractionZeros = -(printExponent + 1); + if (numFractionZeros > maxFractionZeros) { + numFractionZeros = maxFractionZeros; + } + + digitsStartIdx = 2 + numFractionZeros; + + /* + * shift the significant digits right such that there is room for + * leading zeros + */ + numFractionDigits = numDigits; + maxFractionDigits = maxPrintLen - digitsStartIdx - pos; + if (numFractionDigits > maxFractionDigits) { + numFractionDigits = maxFractionDigits; + } + + memmove(buffer + pos + digitsStartIdx, buffer + pos, + numFractionDigits); + + /* insert the leading zeros */ + for (i = 2; i < digitsStartIdx; ++i) { + buffer[pos + i] = '0'; + } + + /* update the counts */ + numFractionDigits += numFractionZeros; + numDigits = numFractionDigits; + } + + /* add the decimal point */ + if (pos + 1 < maxPrintLen) { + buffer[pos+1] = '.'; + } + + /* add the initial zero */ + if (pos < maxPrintLen) { + buffer[pos] = '0'; + numDigits += 1; + } + numWholeDigits = 1; + pos += 2 + numFractionDigits; + } + + /* always add decimal point, except for DprZeros mode */ + if (trim_mode != TrimMode_DptZeros && numFractionDigits == 0 && + pos < maxPrintLen){ + buffer[pos++] = '.'; + } + + desiredFractionalDigits = precision; + if (cutoff_mode == CutoffMode_TotalLength && precision >= 0) { + desiredFractionalDigits = precision - numWholeDigits; + } + + if (trim_mode == TrimMode_LeaveOneZero) { + /* if we didn't print any fractional digits, add a trailing 0 */ + if (numFractionDigits == 0 && pos < maxPrintLen) { + buffer[pos++] = '0'; + numFractionDigits++; + } + } + else if (trim_mode == TrimMode_None && + digit_mode != DigitMode_Unique && + desiredFractionalDigits > numFractionDigits && + pos < maxPrintLen) { + /* add trailing zeros up to precision length */ + /* compute the number of trailing zeros needed */ + npy_int32 count = desiredFractionalDigits - numFractionDigits; + if (pos + count > maxPrintLen) { + count = maxPrintLen - pos; + } + numFractionDigits += count; + + for ( ; count > 0; count--) { + buffer[pos++] = '0'; + } + } + /* else, for trim_mode Zeros or DptZeros, there is nothing more to add */ + + /* + * when rounding, we may still end up with trailing zeros. Remove them + * depending on trim settings. + */ + if (precision >= 0 && trim_mode != TrimMode_None && numFractionDigits > 0){ + while (buffer[pos-1] == '0') { + pos--; + numFractionDigits--; + } + if (trim_mode == TrimMode_LeaveOneZero && buffer[pos-1] == '.') { + buffer[pos++] = '0'; + numFractionDigits++; + } + } + + /* add any whitespace padding to right side */ + if (digits_right >= numFractionDigits) { + npy_int32 count = digits_right - numFractionDigits; + + /* in trim_mode DptZeros, if right padding, add a space for the . */ + if (trim_mode == TrimMode_DptZeros && numFractionDigits == 0 + && pos < maxPrintLen) { + buffer[pos++] = ' '; + } + + if (pos + count > maxPrintLen) { + count = maxPrintLen - pos; + } + + for ( ; count > 0; count--) { + buffer[pos++] = ' '; + } + } + /* add any whitespace padding to left side */ + if (digits_left > numWholeDigits + has_sign) { + npy_int32 shift = digits_left - (numWholeDigits + has_sign); + npy_int32 count = pos; + + if (count + shift > maxPrintLen){ + count = maxPrintLen - shift; + } + + if (count > 0) { + memmove(buffer + shift, buffer, count); + } + pos = shift + count; + for ( ; shift > 0; shift--) { + buffer[shift - 1] = ' '; + } + } + + /* terminate the buffer */ + DEBUG_ASSERT(pos <= maxPrintLen); + buffer[pos] = '\0'; + + return pos; +} + +/* + * Outputs the positive number with scientific notation: d.dddde[sign]ddd + * The output is always NUL terminated and the output length (not including the + * NUL) is returned. + * Arguments: + * buffer - buffer to output into + * bufferSize - maximum characters that can be printed to buffer + * mantissa - value significand + * exponent - value exponent in base 2 + * signbit - value of the sign position. Should be '+', '-' or '' + * mantissaBit - index of the highest set mantissa bit + * hasUnequalMargins - is the high margin twice as large as the low margin + * precision - Negative prints as many digits as are needed for a unique + * number. Positive specifies the maximum number of significant + * digits to print past the decimal point. + * trim_mode - how to treat trailing 0s and '.'. See TrimMode comments. + * digits_left - pad characters to left of decimal point. -1 for no padding + * exp_digits - pads exponent with zeros until it has this many digits + */ +static npy_uint32 +FormatScientific (char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, + npy_int32 exponent, char signbit, npy_uint32 mantissaBit, + npy_bool hasUnequalMargins, DigitMode digit_mode, + npy_int32 precision, TrimMode trim_mode, + npy_int32 digits_left, npy_int32 exp_digits) +{ + npy_int32 printExponent; + npy_int32 numDigits; + char *pCurOut; + npy_int32 numFractionDigits; + npy_int32 leftchars; + + if (digit_mode != DigitMode_Unique) { + DEBUG_ASSERT(precision >= 0); + } + + + DEBUG_ASSERT(bufferSize > 0); + + pCurOut = buffer; + + /* add any whitespace padding to left side */ + leftchars = 1 + (signbit == '-' || signbit == '+'); + if (digits_left > leftchars) { + int i; + for (i = 0; i < digits_left - leftchars && bufferSize > 1; i++){ + *pCurOut = ' '; + pCurOut++; + --bufferSize; + } + } + + if (signbit == '+' && bufferSize > 1) { + *pCurOut = '+'; + pCurOut++; + --bufferSize; + } + else if (signbit == '-' && bufferSize > 1) { + *pCurOut = '-'; + pCurOut++; + --bufferSize; + } + + numDigits = Dragon4(mantissa, exponent, mantissaBit, hasUnequalMargins, + digit_mode, CutoffMode_TotalLength, precision + 1, + pCurOut, bufferSize, &printExponent); + + DEBUG_ASSERT(numDigits > 0); + DEBUG_ASSERT(numDigits <= bufferSize); + + /* keep the whole number as the first digit */ + if (bufferSize > 1) { + pCurOut += 1; + bufferSize -= 1; + } + + /* insert the decimal point prior to the fractional number */ + numFractionDigits = numDigits-1; + if (numFractionDigits > 0 && bufferSize > 1) { + npy_int32 maxFractionDigits = (npy_int32)bufferSize - 2; + + if (numFractionDigits > maxFractionDigits) { + numFractionDigits = maxFractionDigits; + } + + memmove(pCurOut + 1, pCurOut, numFractionDigits); + pCurOut[0] = '.'; + pCurOut += (1 + numFractionDigits); + bufferSize -= (1 + numFractionDigits); + } + + /* always add decimal point, except for DprZeros mode */ + if (trim_mode != TrimMode_DptZeros && numFractionDigits == 0 && + bufferSize > 1){ + *pCurOut = '.'; + ++pCurOut; + --bufferSize; + } + + if (trim_mode == TrimMode_LeaveOneZero) { + /* if we didn't print any fractional digits, add the 0 */ + if (numFractionDigits == 0 && bufferSize > 1) { + *pCurOut = '0'; + ++pCurOut; + --bufferSize; + ++numFractionDigits; + } + } + else if (trim_mode == TrimMode_None && + digit_mode != DigitMode_Unique) { + /* add trailing zeros up to precision length */ + if (precision > (npy_int32)numFractionDigits) { + char *pEnd; + /* compute the number of trailing zeros needed */ + npy_int32 numZeros = (precision - numFractionDigits); + + if (numZeros > (npy_int32)bufferSize - 1) { + numZeros = (npy_int32)bufferSize - 1; + } + + for (pEnd = pCurOut + numZeros; pCurOut < pEnd; ++pCurOut) { + *pCurOut = '0'; + ++numFractionDigits; + } + } + } + /* else, for trim_mode Zeros or DptZeros, there is nothing more to add */ + + /* + * when rounding, we may still end up with trailing zeros. Remove them + * depending on trim settings. + */ + if (precision >= 0 && trim_mode != TrimMode_None && numFractionDigits > 0){ + --pCurOut; + while (*pCurOut == '0') { + --pCurOut; + ++bufferSize; + --numFractionDigits; + } + if (trim_mode == TrimMode_LeaveOneZero && *pCurOut == '.') { + ++pCurOut; + *pCurOut = '0'; + --bufferSize; + ++numFractionDigits; + } + ++pCurOut; + } + + /* print the exponent into a local buffer and copy into output buffer */ + if (bufferSize > 1) { + char exponentBuffer[7]; + npy_int32 digits[5]; + npy_int32 i, exp_size, count; + + if (exp_digits > 5) { + exp_digits = 5; + } + if (exp_digits < 0) { + exp_digits = 2; + } + + exponentBuffer[0] = 'e'; + if (printExponent >= 0) { + exponentBuffer[1] = '+'; + } + else { + exponentBuffer[1] = '-'; + printExponent = -printExponent; + } + + DEBUG_ASSERT(printExponent < 100000); + + /* get exp digits */ + for (i = 0; i < 5; i++){ + digits[i] = printExponent % 10; + printExponent /= 10; + } + /* count back over leading zeros */ + for (i = 5; i > exp_digits && digits[i-1] == 0; i--) { + } + exp_size = i; + /* write remaining digits to tmp buf */ + for (i = exp_size; i > 0; i--){ + exponentBuffer[2 + (exp_size-i)] = (char)('0' + digits[i-1]); + } + + /* copy the exponent buffer into the output */ + count = exp_size + 2; + if (count > (npy_int32)bufferSize - 1) { + count = (npy_int32)bufferSize - 1; + } + memcpy(pCurOut, exponentBuffer, count); + pCurOut += count; + bufferSize -= count; + } + + + DEBUG_ASSERT(bufferSize > 0); + pCurOut[0] = '\0'; + + return pCurOut - buffer; +} + +/* + * Print a hexadecimal value with a given width. + * The output string is always NUL terminated and the string length (not + * including the NUL) is returned. + */ +/* Unused for now +static npy_uint32 +PrintHex(char * buffer, npy_uint32 bufferSize, npy_uint64 value, + npy_uint32 width) +{ + const char digits[] = "0123456789abcdef"; + char *pCurOut; + + DEBUG_ASSERT(bufferSize > 0); + + npy_uint32 maxPrintLen = bufferSize-1; + if (width > maxPrintLen) { + width = maxPrintLen; + } + + pCurOut = buffer; + while (width > 0) { + --width; + + npy_uint8 digit = (npy_uint8)((value >> 4ull*(npy_uint64)width) & 0xF); + *pCurOut = digits[digit]; + + ++pCurOut; + } + + *pCurOut = '\0'; + return pCurOut - buffer; +} +*/ + +/* + * Print special case values for infinities and NaNs. + * The output string is always NUL terminated and the string length (not + * including the NUL) is returned. + */ +static npy_uint32 +PrintInfNan(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, + npy_uint32 mantissaHexWidth, char signbit) +{ + npy_uint32 maxPrintLen = bufferSize-1; + npy_uint32 pos = 0; + + DEBUG_ASSERT(bufferSize > 0); + + /* Check for infinity */ + if (mantissa == 0) { + npy_uint32 printLen; + + /* only print sign for inf values (though nan can have a sign set) */ + if (signbit == '+') { + if (pos < maxPrintLen-1){ + buffer[pos++] = '+'; + } + } + else if (signbit == '-') { + if (pos < maxPrintLen-1){ + buffer[pos++] = '-'; + } + } + + /* copy and make sure the buffer is terminated */ + printLen = (3 < maxPrintLen - pos) ? 3 : maxPrintLen - pos; + memcpy(buffer + pos, "inf", printLen); + buffer[pos + printLen] = '\0'; + return pos + printLen; + } + else { + /* copy and make sure the buffer is terminated */ + npy_uint32 printLen = (3 < maxPrintLen - pos) ? 3 : maxPrintLen - pos; + memcpy(buffer + pos, "nan", printLen); + buffer[pos + printLen] = '\0'; + + /* + * // XXX: Should we change this for numpy? + * // append HEX value + * if (maxPrintLen > 3) { + * printLen += PrintHex(buffer+3, bufferSize-3, mantissa, + * mantissaHexWidth); + * } + */ + + return pos + printLen; + } +} + +/* + * These functions print a floating-point number as a decimal string. + * The output string is always NUL terminated and the string length (not + * including the NUL) is returned. + * + * Arguments are: + * buffer - buffer to output into + * bufferSize - maximum characters that can be printed to buffer + * value - value significand + * scientific - boolean controlling whether scientific notation is used + * precision - If positive, specifies the number of decimals to show after + * decimal point. If negative, sufficient digits to uniquely + * specify the float will be output. + * trim_mode - how to treat trailing zeros and decimal point. See TrimMode. + * digits_right - pad the result with '' on the right past the decimal point + * digits_left - pad the result with '' on the right past the decimal point + * exp_digits - Only affects scientific output. If positive, pads the + * exponent with 0s until there are this many digits. If + * negative, only use sufficient digits. + */ +static npy_uint32 +Dragon4_PrintFloat16(char *buffer, npy_uint32 bufferSize, npy_uint16 value, + npy_bool scientific, DigitMode digit_mode, + CutoffMode cutoff_mode, npy_int32 precision, + npy_bool sign, TrimMode trim_mode, npy_int32 digits_left, + npy_int32 digits_right, npy_int32 exp_digits) +{ + FloatUnion16 floatUnion; + npy_uint32 floatExponent, floatMantissa; + + npy_uint32 mantissa; + npy_int32 exponent; + npy_uint32 mantissaBit; + npy_bool hasUnequalMargins; + char signbit = '\0'; + + if (bufferSize == 0) { + return 0; + } + + if (bufferSize == 1) { + buffer[0] = '\0'; + return 0; + } + + /* deconstruct the floating point value */ + floatUnion.integer = value; + floatExponent = GetExponent_F16(&floatUnion); + floatMantissa = GetMantissa_F16(&floatUnion); + + /* output the sign */ + if (IsNegative_F16(&floatUnion)) { + signbit = '-'; + } + else if (sign) { + signbit = '+'; + } + + /* if this is a special value */ + if (floatExponent == 0x1F) { + return PrintInfNan(buffer, bufferSize, floatMantissa, 3, signbit); + } + /* else this is a number */ + + /* factor the value into its parts */ + if (floatExponent != 0) { + /* + * normalized + * The floating point equation is: + * value = (1 + mantissa/2^10) * 2 ^ (exponent-15) + * We convert the integer equation by factoring a 2^10 out of the + * exponent + * value = (1 + mantissa/2^10) * 2^10 * 2 ^ (exponent-15-10) + * value = (2^10 + mantissa) * 2 ^ (exponent-15-10) + * Because of the implied 1 in front of the mantissa we have 10 bits of + * precision. + * m = (2^10 + mantissa) + * e = (exponent-15-10) + */ + mantissa = (1UL << 10) | floatMantissa; + exponent = floatExponent - 15 - 10; + mantissaBit = 10; + hasUnequalMargins = (floatExponent != 1) && (floatMantissa == 0); + } + else { + /* + * denormalized + * The floating point equation is: + * value = (mantissa/2^10) * 2 ^ (1-15) + * We convert the integer equation by factoring a 2^23 out of the + * exponent + * value = (mantissa/2^10) * 2^10 * 2 ^ (1-15-10) + * value = mantissa * 2 ^ (1-15-10) + * We have up to 10 bits of precision. + * m = (mantissa) + * e = (1-15-10) + */ + mantissa = floatMantissa; + exponent = 1 - 15 - 10; + mantissaBit = LogBase2_32(mantissa); + hasUnequalMargins = NPY_FALSE; + } + + /* format the value */ + if (scientific) { + return FormatScientific(buffer, bufferSize, mantissa, exponent, signbit, + mantissaBit, hasUnequalMargins, digit_mode, + precision, trim_mode, digits_left, exp_digits); + } + else { + return FormatPositional(buffer, bufferSize, mantissa, exponent, signbit, + mantissaBit, hasUnequalMargins, digit_mode, + cutoff_mode, precision, trim_mode, + digits_left, digits_right); + } +} + +static npy_uint32 +Dragon4_PrintFloat32(char *buffer, npy_uint32 bufferSize, npy_float32 value, + npy_bool scientific, DigitMode digit_mode, + CutoffMode cutoff_mode, npy_int32 precision, + npy_bool sign, TrimMode trim_mode, npy_int32 digits_left, + npy_int32 digits_right, npy_int32 exp_digits) +{ + FloatUnion32 floatUnion; + npy_uint32 floatExponent, floatMantissa; + + npy_uint32 mantissa; + npy_int32 exponent; + npy_uint32 mantissaBit; + npy_bool hasUnequalMargins; + char signbit = '\0'; + + if (bufferSize == 0) { + return 0; + } + + if (bufferSize == 1) { + buffer[0] = '\0'; + return 0; + } + + /* deconstruct the floating point value */ + floatUnion.floatingPoint = value; + floatExponent = GetExponent_F32(&floatUnion); + floatMantissa = GetMantissa_F32(&floatUnion); + + /* output the sign */ + if (IsNegative_F32(&floatUnion)) { + signbit = '-'; + } + else if (sign) { + signbit = '+'; + } + + /* if this is a special value */ + if (floatExponent == 0xFF) { + return PrintInfNan(buffer, bufferSize, floatMantissa, 6, signbit); + } + /* else this is a number */ + + /* factor the value into its parts */ + if (floatExponent != 0) { + /* + * normalized + * The floating point equation is: + * value = (1 + mantissa/2^23) * 2 ^ (exponent-127) + * We convert the integer equation by factoring a 2^23 out of the + * exponent + * value = (1 + mantissa/2^23) * 2^23 * 2 ^ (exponent-127-23) + * value = (2^23 + mantissa) * 2 ^ (exponent-127-23) + * Because of the implied 1 in front of the mantissa we have 24 bits of + * precision. + * m = (2^23 + mantissa) + * e = (exponent-127-23) + */ + mantissa = (1UL << 23) | floatMantissa; + exponent = floatExponent - 127 - 23; + mantissaBit = 23; + hasUnequalMargins = (floatExponent != 1) && (floatMantissa == 0); + } + else { + /* + * denormalized + * The floating point equation is: + * value = (mantissa/2^23) * 2 ^ (1-127) + * We convert the integer equation by factoring a 2^23 out of the + * exponent + * value = (mantissa/2^23) * 2^23 * 2 ^ (1-127-23) + * value = mantissa * 2 ^ (1-127-23) + * We have up to 23 bits of precision. + * m = (mantissa) + * e = (1-127-23) + */ + mantissa = floatMantissa; + exponent = 1 - 127 - 23; + mantissaBit = LogBase2_32(mantissa); + hasUnequalMargins = NPY_FALSE; + } + + /* format the value */ + if (scientific) { + return FormatScientific(buffer, bufferSize, mantissa, exponent, signbit, + mantissaBit, hasUnequalMargins, digit_mode, + precision, trim_mode, digits_left, exp_digits); + } + else { + return FormatPositional(buffer, bufferSize, mantissa, exponent, signbit, + mantissaBit, hasUnequalMargins, digit_mode, + cutoff_mode, precision, trim_mode, + digits_left, digits_right); + } +} + +static npy_uint32 +Dragon4_PrintFloat64(char *buffer, npy_uint32 bufferSize, npy_float64 value, + npy_bool scientific, DigitMode digit_mode, + CutoffMode cutoff_mode, npy_int32 precision, + npy_bool sign, TrimMode trim_mode, npy_int32 digits_left, + npy_int32 digits_right, npy_int32 exp_digits) +{ + FloatUnion64 floatUnion; + npy_uint32 floatExponent; + npy_uint64 floatMantissa; + + npy_uint64 mantissa; + npy_int32 exponent; + npy_uint32 mantissaBit; + npy_bool hasUnequalMargins; + char signbit = '\0'; + + if (bufferSize == 0) { + return 0; + } + + if (bufferSize == 1) { + buffer[0] = '\0'; + return 0; + } + + /* deconstruct the floating point value */ + floatUnion.floatingPoint = value; + floatExponent = GetExponent_F64(&floatUnion); + floatMantissa = GetMantissa_F64(&floatUnion); + + /* output the sign */ + if (IsNegative_F64(&floatUnion)) { + signbit = '-'; + } + else if (sign) { + signbit = '+'; + } + + /* if this is a special value */ + if (floatExponent == 0x7FF) { + return PrintInfNan(buffer, bufferSize, floatMantissa, 13, signbit); + } + /* else this is a number */ + + /* factor the value into its parts */ + if (floatExponent != 0) { + /* + * normal + * The floating point equation is: + * value = (1 + mantissa/2^52) * 2 ^ (exponent-1023) + * We convert the integer equation by factoring a 2^52 out of the + * exponent + * value = (1 + mantissa/2^52) * 2^52 * 2 ^ (exponent-1023-52) + * value = (2^52 + mantissa) * 2 ^ (exponent-1023-52) + * Because of the implied 1 in front of the mantissa we have 53 bits of + * precision. + * m = (2^52 + mantissa) + * e = (exponent-1023+1-53) + */ + mantissa = (1ull << 52) | floatMantissa; + exponent = floatExponent - 1023 - 52; + mantissaBit = 52; + hasUnequalMargins = (floatExponent != 1) && (floatMantissa == 0); + } + else { + /* + * subnormal + * The floating point equation is: + * value = (mantissa/2^52) * 2 ^ (1-1023) + * We convert the integer equation by factoring a 2^52 out of the + * exponent + * value = (mantissa/2^52) * 2^52 * 2 ^ (1-1023-52) + * value = mantissa * 2 ^ (1-1023-52) + * We have up to 52 bits of precision. + * m = (mantissa) + * e = (1-1023-52) + */ + mantissa = floatMantissa; + exponent = 1 - 1023 - 52; + mantissaBit = LogBase2_64(mantissa); + hasUnequalMargins = NPY_FALSE; + } + + /* format the value */ + if (scientific) { + return FormatScientific(buffer, bufferSize, mantissa, exponent, signbit, + mantissaBit, hasUnequalMargins, digit_mode, + precision, trim_mode, digits_left, exp_digits); + } + else { + return FormatPositional(buffer, bufferSize, mantissa, exponent, signbit, + mantissaBit, hasUnequalMargins, digit_mode, + cutoff_mode, precision, trim_mode, + digits_left, digits_right); + } +} + +#if !(defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_BE) || \ + defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_LE)) +static npy_uint32 +Dragon4_PrintFloat128(char *buffer, npy_uint32 bufferSize, FloatVal128 value, + npy_bool scientific, DigitMode digit_mode, + CutoffMode cutoff_mode, npy_int32 precision, + npy_bool sign, TrimMode trim_mode, npy_int32 digits_left, + npy_int32 digits_right, npy_int32 exp_digits) +{ + npy_uint32 floatExponent; + npy_uint64 floatMantissa; + + npy_uint64 mantissa; + npy_int32 exponent; + npy_uint32 mantissaBit; + npy_bool hasUnequalMargins; + char signbit = '\0'; + + if (bufferSize == 0) { + return 0; + } + + if (bufferSize == 1) { + buffer[0] = '\0'; + return 0; + } + + /* deconstruct the floating point value */ + floatExponent = GetExponent_F128(&value); + floatMantissa = GetMantissa_F128(&value); + + /* output the sign */ + if (IsNegative_F128(&value)) { + signbit = '-'; + } + else if (sign) { + signbit = '+'; + } + + /* if this is a special value */ + if (floatExponent == 0x7FFF) { + return PrintInfNan(buffer, bufferSize, floatMantissa, 16, signbit); + } + /* else this is a number */ + + /* factor the value into its parts */ + if (floatExponent != 0) { + /* + * normal + * The floating point equation is: + * value = (1 + mantissa/2^63) * 2 ^ (exponent-16383) + * We convert the integer equation by factoring a 2^63 out of the + * exponent + * value = (1 + mantissa/2^63) * 2^63 * 2 ^ (exponent-16383-63) + * value = (2^63 + mantissa) * 2 ^ (exponent-16383-63) + * Because of the implied 1 in front of the mantissa we have 64 bits of + * precision. + * m = (2^63 + mantissa) + * e = (exponent-16383+1-64) + */ + mantissa = (1ull << 63) | floatMantissa; + exponent = floatExponent - 16383 - 63; + mantissaBit = 63; + hasUnequalMargins = (floatExponent != 1) && (floatMantissa == 0); + } + else { + /* + * subnormal + * The floating point equation is: + * value = (mantissa/2^63) * 2 ^ (1-16383) + * We convert the integer equation by factoring a 2^52 out of the + * exponent + * value = (mantissa/2^63) * 2^52 * 2 ^ (1-16383-63) + * value = mantissa * 2 ^ (1-16383-63) + * We have up to 63 bits of precision. + * m = (mantissa) + * e = (1-16383-63) + */ + mantissa = floatMantissa; + exponent = 1 - 16383 - 63; + mantissaBit = LogBase2_64(mantissa); + hasUnequalMargins = NPY_FALSE; + } + + /* format the value */ + if (scientific) { + return FormatScientific(buffer, bufferSize, mantissa, exponent, signbit, + mantissaBit, hasUnequalMargins, digit_mode, + precision, trim_mode, digits_left, exp_digits); + } + else { + return FormatPositional(buffer, bufferSize, mantissa, exponent, signbit, + mantissaBit, hasUnequalMargins, digit_mode, + cutoff_mode, precision, trim_mode, + digits_left, digits_right); + } +} +#endif /* DOUBLE_DOUBLE */ + +PyObject * +Dragon4_Positional_AnySize(void *val, size_t size, DigitMode digit_mode, + CutoffMode cutoff_mode, int precision, int sign, + TrimMode trim, int pad_left, int pad_right) +{ + /* + * Use a very large buffer in case anyone tries to output a large numberG. + * 16384 should be enough to uniquely print any float128, which goes up + * to about 10^4932 */ + static char repr[16384]; +#if !(defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_BE) || \ + defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_LE)) + FloatVal128 val128; +#ifdef NPY_FLOAT128 + FloatUnion128 buf128; +#endif +#else /* DOUBLE_DOUBLE */ + PyObject *out, *ret; +#endif /* DOUBLE_DOUBLE */ + +#ifdef NPY_FLOAT80 + FloatUnion80 buf80;; +#endif +#ifdef NPY_FLOAT96 + FloatUnion96 buf96; +#endif + + switch (size) { + case 2: + Dragon4_PrintFloat16(repr, sizeof(repr), *(npy_float16*)val, + 0, digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right, -1); + break; + case 4: + Dragon4_PrintFloat32(repr, sizeof(repr), *(npy_float32*)val, + 0, digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right, -1); + break; + case 8: + Dragon4_PrintFloat64(repr, sizeof(repr), *(npy_float64*)val, + 0, digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right, -1); + break; +#ifdef NPY_FLOAT80 + case 10: + buf80.floatingPoint = *(npy_float80*)val; + val128.integer[0] = buf80.integer.a; + val128.integer[1] = buf80.integer.b; + Dragon4_PrintFloat128(repr, sizeof(repr), val128, + 0, digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right, -1); + break; +#endif +#ifdef NPY_FLOAT96 + case 12: + buf96.floatingPoint = *(npy_float96*)val; + val128.integer[0] = buf96.integer.a; + val128.integer[1] = buf96.integer.b; + Dragon4_PrintFloat128(repr, sizeof(repr), val128, + 0, digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right, -1); + break; +#endif +#ifdef NPY_FLOAT128 + case 16: +/* Numpy 1.14 does not support the DOUBLE_DOUBLE format properly */ +#if defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_BE) || \ + defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_LE) + PyOS_snprintf(repr, sizeof(repr), "%.*Lf", precision, + *(npy_float128*)val); + out = PyUString_FromString(repr); + if (out == NULL) { + return out; + } + /* strip trailing zeros to roughly emulate normal behavior */ + ret = PyObject_CallMethod(out, "rstrip", "s", "0"); + Py_DECREF(out); + return ret; +#else + buf128.floatingPoint = *(npy_float128*)val; + val128.integer[0] = buf128.integer.a; + val128.integer[1] = buf128.integer.b; + Dragon4_PrintFloat128(repr, sizeof(repr), val128, + 0, digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right, -1); +#endif /* DOUBLE_DOUBLE */ + break; +#endif /* NPY_FLOAT128 */ + default: + PyErr_Format(PyExc_ValueError, "unexpected itemsize %zu", size); + return NULL; + } + + return PyUString_FromString(repr); +} + +PyObject * +Dragon4_Positional(PyObject *obj, DigitMode digit_mode, CutoffMode cutoff_mode, + int precision, int sign, TrimMode trim, int pad_left, + int pad_right) +{ + double val; + + if (PyArray_IsScalar(obj, Half)) { + npy_half x = ((PyHalfScalarObject *)obj)->obval; + return Dragon4_Positional_AnySize(&x, sizeof(npy_half), + digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right); + } + else if (PyArray_IsScalar(obj, Float)) { + npy_float x = ((PyFloatScalarObject *)obj)->obval; + return Dragon4_Positional_AnySize(&x, sizeof(npy_float), + digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right); + } + else if (PyArray_IsScalar(obj, Double)) { + npy_double x = ((PyDoubleScalarObject *)obj)->obval; + return Dragon4_Positional_AnySize(&x, sizeof(npy_double), + digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right); + } + else if (PyArray_IsScalar(obj, LongDouble)) { + npy_longdouble x = ((PyLongDoubleScalarObject *)obj)->obval; + return Dragon4_Positional_AnySize(&x, sizeof(npy_longdouble), + digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right); + } + + val = PyFloat_AsDouble(obj); + if (PyErr_Occurred()) { + return NULL; + } + return Dragon4_Positional_AnySize(&val, sizeof(double), + digit_mode, cutoff_mode, precision, + sign, trim, pad_left, pad_right); +} + +PyObject * +Dragon4_Scientific_AnySize(void *val, size_t size, DigitMode digit_mode, + int precision, int sign, TrimMode trim, + int pad_left, int exp_digits) +{ + /* use a very large buffer in case anyone tries to output a large precision */ + static char repr[4096]; +#if !(defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_BE) || \ + defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_LE)) + FloatVal128 val128; +#ifdef NPY_FLOAT128 + FloatUnion128 buf128; +#endif +#endif /* DOUBLE_DOUBLE */ + +#ifdef NPY_FLOAT80 + FloatUnion80 buf80;; +#endif +#ifdef NPY_FLOAT96 + FloatUnion96 buf96; +#endif + + /* dummy, is ignored in scientific mode */ + CutoffMode cutoff_mode = CutoffMode_TotalLength; + + switch (size) { + case 2: + Dragon4_PrintFloat16(repr, sizeof(repr), *(npy_float16*)val, + 1, digit_mode, cutoff_mode, precision, sign, + trim, pad_left, -1, exp_digits); + break; + case 4: + Dragon4_PrintFloat32(repr, sizeof(repr), *(npy_float32*)val, + 1, digit_mode, cutoff_mode, precision, sign, + trim, pad_left, -1, exp_digits); + break; + case 8: + Dragon4_PrintFloat64(repr, sizeof(repr), *(npy_float64*)val, + 1, digit_mode, cutoff_mode, precision, sign, + trim, pad_left, -1, exp_digits); + break; +#ifdef NPY_FLOAT80 + case 10: + buf80.floatingPoint = *(npy_float80*)val; + val128.integer[0] = buf80.integer.a; + val128.integer[1] = buf80.integer.b; + Dragon4_PrintFloat128(repr, sizeof(repr), val128, + 1, digit_mode, cutoff_mode, precision, sign, + trim, pad_left, -1, exp_digits); + break; +#endif +#ifdef NPY_FLOAT96 + case 12: + buf96.floatingPoint = *(npy_float96*)val; + val128.integer[0] = buf96.integer.a; + val128.integer[1] = buf96.integer.b; + Dragon4_PrintFloat128(repr, sizeof(repr), val128, + 1, digit_mode, cutoff_mode, precision, sign, + trim, pad_left, -1, exp_digits); + break; +#endif +#ifdef NPY_FLOAT128 + case 16: +/* Numpy 1.14 does not support the DOUBLE_DOUBLE format properly */ +#if defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_BE) || \ + defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_LE) + PyOS_snprintf(repr, sizeof(repr), "%.*Le", precision, + *(npy_float128*)val); +#else + buf128.floatingPoint = *(npy_float128*)val; + val128.integer[0] = buf128.integer.a; + val128.integer[1] = buf128.integer.b; + Dragon4_PrintFloat128(repr, sizeof(repr), val128, + 1, digit_mode, cutoff_mode, precision, sign, + trim, pad_left, -1, exp_digits); +#endif /* DOUBLE_DOUBLE */ + break; +#endif /* NPY_FLOAT128 */ + default: + PyErr_Format(PyExc_ValueError, "unexpected itemsize %zu", size); + return NULL; + } + + return PyUString_FromString(repr); +} + +PyObject * +Dragon4_Scientific(PyObject *obj, DigitMode digit_mode, int precision, + int sign, TrimMode trim, int pad_left, int exp_digits) +{ + double val; + + if (PyArray_IsScalar(obj, Half)) { + npy_half x = ((PyHalfScalarObject *)obj)->obval; + return Dragon4_Scientific_AnySize(&x, sizeof(npy_half), + digit_mode, precision, + sign, trim, pad_left, exp_digits); + } + else if (PyArray_IsScalar(obj, Float)) { + npy_float x = ((PyFloatScalarObject *)obj)->obval; + return Dragon4_Scientific_AnySize(&x, sizeof(npy_float), + digit_mode, precision, + sign, trim, pad_left, exp_digits); + } + else if (PyArray_IsScalar(obj, Double)) { + npy_double x = ((PyDoubleScalarObject *)obj)->obval; + return Dragon4_Scientific_AnySize(&x, sizeof(npy_double), + digit_mode, precision, + sign, trim, pad_left, exp_digits); + } + else if (PyArray_IsScalar(obj, LongDouble)) { + npy_longdouble x = ((PyLongDoubleScalarObject *)obj)->obval; + return Dragon4_Scientific_AnySize(&x, sizeof(npy_longdouble), + digit_mode, precision, + sign, trim, pad_left, exp_digits); + } + + val = PyFloat_AsDouble(obj); + if (PyErr_Occurred()) { + return NULL; + } + return Dragon4_Scientific_AnySize(&val, sizeof(double), + digit_mode, precision, + sign, trim, pad_left, exp_digits); +} diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/dragon4.h python-numpy-1.14.5/numpy/core/src/multiarray/dragon4.h --- python-numpy-1.13.3/numpy/core/src/multiarray/dragon4.h 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/dragon4.h 2018-06-12 17:35:36.000000000 +0000 @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2014 Ryan Juckett + * http://www.ryanjuckett.com/ + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgment in the product documentation would be + * appreciated but is not required. + * + * 2. Altered source versions must be plainly marked as such, and must not be + * misrepresented as being the original software. + * + * 3. This notice may not be removed or altered from any source + * distribution. + */ + +/* + * This file contains a modified version of Ryan Juckett's Dragon4 + * implementation, which has been ported from C++ to C and which has + * modifications specific to printing floats in numpy. + */ + +#ifndef _NPY_DRAGON4_H_ +#define _NPY_DRAGON4_H_ + +#include "Python.h" +#include "structmember.h" +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE +#include "numpy/arrayobject.h" +#include "npy_config.h" +#include "npy_pycompat.h" +#include "numpy/arrayscalars.h" + +typedef enum DigitMode +{ + /* Round digits to print shortest uniquely identifiable number. */ + DigitMode_Unique, + /* Output the digits of the number as if with infinite precision */ + DigitMode_Exact, +} DigitMode; + +typedef enum CutoffMode +{ + /* up to cutoffNumber significant digits */ + CutoffMode_TotalLength, + /* up to cutoffNumber significant digits past the decimal point */ + CutoffMode_FractionLength, +} CutoffMode; + +typedef enum TrimMode +{ + TrimMode_None, /* don't trim zeros, always leave a decimal point */ + TrimMode_LeaveOneZero, /* trim all but the zero before the decimal point */ + TrimMode_Zeros, /* trim all trailing zeros, leave decimal point */ + TrimMode_DptZeros, /* trim trailing zeros & trailing decimal point */ +} TrimMode; + +PyObject * +Dragon4_Positional_AnySize(void *val, size_t size, DigitMode digit_mode, + CutoffMode cutoff_mode, int precision, int sign, + TrimMode trim, int pad_left, int pad_right); + +PyObject * +Dragon4_Scientific_AnySize(void *val, size_t size, DigitMode digit_mode, + int precision, int sign, TrimMode trim, + int pad_left, int pad_right); + +PyObject * +Dragon4_Positional(PyObject *obj, DigitMode digit_mode, CutoffMode cutoff_mode, + int precision, int sign, TrimMode trim, int pad_left, + int pad_right); + +PyObject * +Dragon4_Scientific(PyObject *obj, DigitMode digit_mode, int precision, + int sign, TrimMode trim, int pad_left, int exp_digits); + +#endif + diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/dtype_transfer.c python-numpy-1.14.5/numpy/core/src/multiarray/dtype_transfer.c --- python-numpy-1.13.3/numpy/core/src/multiarray/dtype_transfer.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/dtype_transfer.c 2018-06-12 18:28:52.000000000 +0000 @@ -25,9 +25,11 @@ #include "ctors.h" #include "_datetime.h" #include "datetime_strings.h" +#include "descriptor.h" #include "shape.h" #include "lowlevel_strided_loops.h" +#include "alloc.h" #define NPY_LOWLEVEL_BUFFER_BLOCKSIZE 128 @@ -2342,7 +2344,7 @@ if (PyDataType_HASSUBARRAY(dst_dtype)) { if (!(PyArray_IntpConverter(dst_dtype->subarray->shape, &dst_shape))) { - PyDimMem_FREE(src_shape.ptr); + npy_free_cache_dim_obj(src_shape); PyErr_SetString(PyExc_ValueError, "invalid subarray shape"); return NPY_FAIL; @@ -2355,8 +2357,8 @@ * Just a straight one-element copy. */ if (dst_size == 1 && src_size == 1) { - PyDimMem_FREE(src_shape.ptr); - PyDimMem_FREE(dst_shape.ptr); + npy_free_cache_dim_obj(src_shape); + npy_free_cache_dim_obj(dst_shape); return PyArray_GetDTypeTransferFunction(aligned, src_stride, dst_stride, @@ -2367,8 +2369,8 @@ } /* Copy the src value to all the dst values */ else if (src_size == 1) { - PyDimMem_FREE(src_shape.ptr); - PyDimMem_FREE(dst_shape.ptr); + npy_free_cache_dim_obj(src_shape); + npy_free_cache_dim_obj(dst_shape); return get_one_to_n_transfer_function(aligned, src_stride, dst_stride, @@ -2382,8 +2384,8 @@ else if (src_shape.len == dst_shape.len && PyArray_CompareLists(src_shape.ptr, dst_shape.ptr, src_shape.len)) { - PyDimMem_FREE(src_shape.ptr); - PyDimMem_FREE(dst_shape.ptr); + npy_free_cache_dim_obj(src_shape); + npy_free_cache_dim_obj(dst_shape); return get_n_to_n_transfer_function(aligned, src_stride, dst_stride, @@ -2407,8 +2409,8 @@ out_stransfer, out_transferdata, out_needs_api); - PyDimMem_FREE(src_shape.ptr); - PyDimMem_FREE(dst_shape.ptr); + npy_free_cache_dim_obj(src_shape); + npy_free_cache_dim_obj(dst_shape); return ret; } } @@ -2520,7 +2522,7 @@ /* * Handles fields transfer. To call this, at least one of the dtypes - * must have fields + * must have fields. Does not take care of object<->structure conversion */ static int get_fields_transfer_function(int aligned, @@ -2531,22 +2533,26 @@ NpyAuxData **out_transferdata, int *out_needs_api) { - PyObject *names, *key, *tup, *title; + PyObject *key, *tup, *title; PyArray_Descr *src_fld_dtype, *dst_fld_dtype; - npy_int i, names_size, field_count, structsize; + npy_int i, field_count, structsize; int src_offset, dst_offset; _field_transfer_data *data; _single_field_transfer *fields; + int failed = 0; + + /* + * There are three cases to take care of: 1. src is non-structured, + * 2. dst is non-structured, or 3. both are structured. + */ - /* Copy the src value to all the fields of dst */ + /* 1. src is non-structured. Copy the src value to all the fields of dst */ if (!PyDataType_HASFIELDS(src_dtype)) { - names = dst_dtype->names; - names_size = PyTuple_GET_SIZE(dst_dtype->names); + field_count = PyTuple_GET_SIZE(dst_dtype->names); - field_count = names_size; + /* Allocate the field-data structure and populate it */ structsize = sizeof(_field_transfer_data) + (field_count + 1) * sizeof(_single_field_transfer); - /* Allocate the data and populate it */ data = (_field_transfer_data *)PyArray_malloc(structsize); if (data == NULL) { PyErr_NoMemory(); @@ -2556,8 +2562,8 @@ data->base.clone = &_field_transfer_data_clone; fields = &data->fields; - for (i = 0; i < names_size; ++i) { - key = PyTuple_GET_ITEM(names, i); + for (i = 0; i < field_count; ++i) { + key = PyTuple_GET_ITEM(dst_dtype->names, i); tup = PyDict_GetItem(dst_dtype->fields, key); if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, &dst_offset, &title)) { @@ -2583,7 +2589,7 @@ } /* - * If the references should be removed from src, add + * If references should be decrefd in src, add * another transfer function to do that. */ if (move_references && PyDataType_REFCHK(src_dtype)) { @@ -2611,24 +2617,19 @@ return NPY_SUCCEED; } - /* Copy the value of the first field to dst */ - else if (!PyDataType_HASFIELDS(dst_dtype)) { - names = src_dtype->names; - names_size = PyTuple_GET_SIZE(src_dtype->names); - /* - * If DECREF is needed on source fields, may need - * to process all the fields - */ - if (move_references && PyDataType_REFCHK(src_dtype)) { - field_count = names_size + 1; - } - else { - field_count = 1; + /* 2. dst is non-structured. Allow transfer from single-field src to dst */ + if (!PyDataType_HASFIELDS(dst_dtype)) { + if (PyTuple_GET_SIZE(src_dtype->names) != 1) { + PyErr_SetString(PyExc_ValueError, + "Can't cast from structure to non-structure, except if the " + "structure only has a single field."); + return NPY_FAIL; } + + /* Allocate the field-data structure and populate it */ structsize = sizeof(_field_transfer_data) + - field_count * sizeof(_single_field_transfer); - /* Allocate the data and populate it */ + 1 * sizeof(_single_field_transfer); data = (_field_transfer_data *)PyArray_malloc(structsize); if (data == NULL) { PyErr_NoMemory(); @@ -2638,286 +2639,102 @@ data->base.clone = &_field_transfer_data_clone; fields = &data->fields; - key = PyTuple_GET_ITEM(names, 0); + key = PyTuple_GET_ITEM(src_dtype->names, 0); tup = PyDict_GetItem(src_dtype->fields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, - &src_offset, &title)) { - PyArray_free(data); + if (!PyArg_ParseTuple(tup, "Oi|O", + &src_fld_dtype, &src_offset, &title)) { return NPY_FAIL; } - field_count = 0; - /* - * Special case bool type, the existence of fields implies True - * - * TODO: Perhaps a better behavior would be to combine all the - * input fields with an OR? The same would apply to subarrays. - */ - if (dst_dtype->type_num == NPY_BOOL) { - if (get_bool_setdstone_transfer_function(dst_stride, - &fields[field_count].stransfer, - &fields[field_count].data, - out_needs_api) != NPY_SUCCEED) { - PyArray_free(data); - return NPY_FAIL; - } - fields[field_count].src_offset = 0; - fields[field_count].dst_offset = 0; - fields[field_count].src_itemsize = 0; - field_count++; - - /* If the src field has references, may need to clear them */ - if (move_references && PyDataType_REFCHK(src_fld_dtype)) { - if (get_decsrcref_transfer_function(0, - src_stride, - src_fld_dtype, - &fields[field_count].stransfer, - &fields[field_count].data, - out_needs_api) != NPY_SUCCEED) { - NPY_AUXDATA_FREE(fields[0].data); - PyArray_free(data); - return NPY_FAIL; - } - fields[field_count].src_offset = src_offset; - fields[field_count].dst_offset = 0; - fields[field_count].src_itemsize = src_fld_dtype->elsize; - field_count++; - } - } - /* Transfer the first field to the output */ - else { - if (PyArray_GetDTypeTransferFunction(0, - src_stride, dst_stride, - src_fld_dtype, dst_dtype, - move_references, - &fields[field_count].stransfer, - &fields[field_count].data, - out_needs_api) != NPY_SUCCEED) { - PyArray_free(data); - return NPY_FAIL; - } - fields[field_count].src_offset = src_offset; - fields[field_count].dst_offset = 0; - fields[field_count].src_itemsize = src_fld_dtype->elsize; - field_count++; - } - /* - * If the references should be removed from src, add - * more transfer functions to decrement the references - * for all the other fields. - */ - if (move_references && PyDataType_REFCHK(src_dtype)) { - for (i = 1; i < names_size; ++i) { - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(src_dtype->fields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, - &src_offset, &title)) { - return NPY_FAIL; - } - if (PyDataType_REFCHK(src_fld_dtype)) { - if (get_decsrcref_transfer_function(0, - src_stride, - src_fld_dtype, - &fields[field_count].stransfer, - &fields[field_count].data, - out_needs_api) != NPY_SUCCEED) { - for (i = field_count-1; i >= 0; --i) { - NPY_AUXDATA_FREE(fields[i].data); - } - PyArray_free(data); - return NPY_FAIL; - } - fields[field_count].src_offset = src_offset; - fields[field_count].dst_offset = 0; - fields[field_count].src_itemsize = src_fld_dtype->elsize; - field_count++; - } - } + if (PyArray_GetDTypeTransferFunction(0, + src_stride, dst_stride, + src_fld_dtype, dst_dtype, + move_references, + &fields[0].stransfer, + &fields[0].data, + out_needs_api) != NPY_SUCCEED) { + PyArray_free(data); + return NPY_FAIL; } + fields[0].src_offset = src_offset; + fields[0].dst_offset = 0; + fields[0].src_itemsize = src_fld_dtype->elsize; - data->field_count = field_count; + data->field_count = 1; *out_stransfer = &_strided_to_strided_field_transfer; *out_transferdata = (NpyAuxData *)data; return NPY_SUCCEED; } - /* Match up the fields to copy */ - else { - /* Keeps track of the names we already used */ - PyObject *used_names_dict = NULL; - int cmpval; - - const char *msg = - "Assignment between structured arrays with different field names " - "will change in numpy 1.14.\n\n" - "Previously fields in the dst would be set to the value of the " - "identically-named field in the src. In numpy 1.14 fields will " - "instead be assigned 'by position': The Nth field of the dst " - "will be set to the Nth field of the src array.\n\n" - "See the release notes for details"; - /* - * 2016-09-19, 1.12 - * Warn if the field names of the dst and src are not - * identical, since then behavior will change in 1.13. - */ - cmpval = PyObject_RichCompareBool(src_dtype->names, - dst_dtype->names, Py_EQ); - if (PyErr_Occurred()) { - return NPY_FAIL; - } - if (cmpval != 1) { - if (DEPRECATE_FUTUREWARNING(msg) < 0) { - return NPY_FAIL; - } - } - names = dst_dtype->names; - names_size = PyTuple_GET_SIZE(dst_dtype->names); + /* 3. Otherwise both src and dst are structured arrays */ + field_count = PyTuple_GET_SIZE(dst_dtype->names); - /* - * If DECREF is needed on source fields, will need - * to also go through its fields. - */ - if (move_references && PyDataType_REFCHK(src_dtype)) { - field_count = names_size + PyTuple_GET_SIZE(src_dtype->names); - used_names_dict = PyDict_New(); - if (used_names_dict == NULL) { - return NPY_FAIL; - } - } - else { - field_count = names_size; - } - structsize = sizeof(_field_transfer_data) + - field_count * sizeof(_single_field_transfer); - /* Allocate the data and populate it */ - data = (_field_transfer_data *)PyArray_malloc(structsize); - if (data == NULL) { - PyErr_NoMemory(); - Py_XDECREF(used_names_dict); - return NPY_FAIL; - } - data->base.free = &_field_transfer_data_free; - data->base.clone = &_field_transfer_data_clone; - fields = &data->fields; + /* Match up the fields to copy (field-by-field transfer) */ + if (PyTuple_GET_SIZE(src_dtype->names) != field_count) { + PyErr_SetString(PyExc_ValueError, "structures must have the same size"); + return NPY_FAIL; + } - for (i = 0; i < names_size; ++i) { - key = PyTuple_GET_ITEM(names, i); - tup = PyDict_GetItem(dst_dtype->fields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, - &dst_offset, &title)) { - for (i = i-1; i >= 0; --i) { - NPY_AUXDATA_FREE(fields[i].data); - } - PyArray_free(data); - Py_XDECREF(used_names_dict); - return NPY_FAIL; - } - tup = PyDict_GetItem(src_dtype->fields, key); - if (tup != NULL) { - if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, - &src_offset, &title)) { - for (i = i-1; i >= 0; --i) { - NPY_AUXDATA_FREE(fields[i].data); - } - PyArray_free(data); - Py_XDECREF(used_names_dict); - return NPY_FAIL; - } - if (PyArray_GetDTypeTransferFunction(0, - src_stride, dst_stride, - src_fld_dtype, dst_fld_dtype, - move_references, - &fields[i].stransfer, - &fields[i].data, - out_needs_api) != NPY_SUCCEED) { - for (i = i-1; i >= 0; --i) { - NPY_AUXDATA_FREE(fields[i].data); - } - PyArray_free(data); - Py_XDECREF(used_names_dict); - return NPY_FAIL; - } - fields[i].src_offset = src_offset; - fields[i].dst_offset = dst_offset; - fields[i].src_itemsize = src_fld_dtype->elsize; + /* Allocate the field-data structure and populate it */ + structsize = sizeof(_field_transfer_data) + + field_count * sizeof(_single_field_transfer); + data = (_field_transfer_data *)PyArray_malloc(structsize); + if (data == NULL) { + PyErr_NoMemory(); + return NPY_FAIL; + } + data->base.free = &_field_transfer_data_free; + data->base.clone = &_field_transfer_data_clone; + fields = &data->fields; - if (used_names_dict != NULL) { - PyDict_SetItem(used_names_dict, key, Py_True); - } - } - else { - if (get_setdstzero_transfer_function(0, - dst_stride, - dst_fld_dtype, - &fields[i].stransfer, - &fields[i].data, - out_needs_api) != NPY_SUCCEED) { - for (i = i-1; i >= 0; --i) { - NPY_AUXDATA_FREE(fields[i].data); - } - PyArray_free(data); - Py_XDECREF(used_names_dict); - return NPY_FAIL; - } - fields[i].src_offset = 0; - fields[i].dst_offset = dst_offset; - fields[i].src_itemsize = 0; - } + /* set up the transfer function for each field */ + for (i = 0; i < field_count; ++i) { + key = PyTuple_GET_ITEM(dst_dtype->names, i); + tup = PyDict_GetItem(dst_dtype->fields, key); + if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype, + &dst_offset, &title)) { + failed = 1; + break; + } + key = PyTuple_GET_ITEM(src_dtype->names, i); + tup = PyDict_GetItem(src_dtype->fields, key); + if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, + &src_offset, &title)) { + failed = 1; + break; } - if (move_references && PyDataType_REFCHK(src_dtype)) { - /* Use field_count to track additional functions added */ - field_count = names_size; - - names = src_dtype->names; - names_size = PyTuple_GET_SIZE(src_dtype->names); - for (i = 0; i < names_size; ++i) { - key = PyTuple_GET_ITEM(names, i); - if (PyDict_GetItem(used_names_dict, key) == NULL) { - tup = PyDict_GetItem(src_dtype->fields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype, - &src_offset, &title)) { - for (i = field_count-1; i >= 0; --i) { - NPY_AUXDATA_FREE(fields[i].data); - } - PyArray_free(data); - Py_XDECREF(used_names_dict); - return NPY_FAIL; - } - if (PyDataType_REFCHK(src_fld_dtype)) { - if (get_decsrcref_transfer_function(0, - src_stride, - src_fld_dtype, - &fields[field_count].stransfer, - &fields[field_count].data, - out_needs_api) != NPY_SUCCEED) { - for (i = field_count-1; i >= 0; --i) { - NPY_AUXDATA_FREE(fields[i].data); - } - PyArray_free(data); - return NPY_FAIL; - } - fields[field_count].src_offset = src_offset; - fields[field_count].dst_offset = 0; - fields[field_count].src_itemsize = - src_fld_dtype->elsize; - field_count++; - } - } - } + if (PyArray_GetDTypeTransferFunction(0, + src_stride, dst_stride, + src_fld_dtype, dst_fld_dtype, + move_references, + &fields[i].stransfer, + &fields[i].data, + out_needs_api) != NPY_SUCCEED) { + failed = 1; + break; } + fields[i].src_offset = src_offset; + fields[i].dst_offset = dst_offset; + fields[i].src_itemsize = src_fld_dtype->elsize; + } - Py_XDECREF(used_names_dict); + if (failed) { + for (i = i-1; i >= 0; --i) { + NPY_AUXDATA_FREE(fields[i].data); + } + PyArray_free(data); + return NPY_FAIL; + } - data->field_count = field_count; + data->field_count = field_count; - *out_stransfer = &_strided_to_strided_field_transfer; - *out_transferdata = (NpyAuxData *)data; + *out_stransfer = &_strided_to_strided_field_transfer; + *out_transferdata = (NpyAuxData *)data; - return NPY_SUCCEED; - } + return NPY_SUCCEED; } static int @@ -3371,7 +3188,7 @@ return NPY_FAIL; } dst_size = PyArray_MultiplyList(dst_shape.ptr, dst_shape.len); - PyDimMem_FREE(dst_shape.ptr); + npy_free_cache_dim_obj(dst_shape); /* Get a function for contiguous dst of the subarray type */ if (get_setdstzero_transfer_function(aligned, @@ -3484,7 +3301,7 @@ return NPY_FAIL; } src_size = PyArray_MultiplyList(src_shape.ptr, src_shape.len); - PyDimMem_FREE(src_shape.ptr); + npy_free_cache_dim_obj(src_shape); /* Get a function for contiguous src of the subarray type */ if (get_decsrcref_transfer_function(aligned, @@ -3648,8 +3465,10 @@ * If there are no references and the data types are equivalent, * return a simple copy */ - if (!PyDataType_REFCHK(src_dtype) && !PyDataType_REFCHK(dst_dtype) && - PyArray_EquivTypes(src_dtype, dst_dtype)) { + if (PyArray_EquivTypes(src_dtype, dst_dtype) && + !PyDataType_REFCHK(src_dtype) && !PyDataType_REFCHK(dst_dtype) && + ( !PyDataType_HASFIELDS(dst_dtype) || + is_dtype_struct_simple_unaligned_layout(dst_dtype)) ) { /* * We can't pass through the aligned flag because it's not * appropriate. Consider a size-8 string, it will say it's diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/einsum.c.src python-numpy-1.14.5/numpy/core/src/multiarray/einsum.c.src --- python-numpy-1.13.3/numpy/core/src/multiarray/einsum.c.src 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/einsum.c.src 2018-06-12 18:28:52.000000000 +0000 @@ -2333,6 +2333,7 @@ npy_intp coord, shape[2], strides[2][2]; char *ptrs[2][2], *ptr; sum_of_products_fn sop; + NPY_BEGIN_THREADS_DEF; #if NPY_EINSUM_DBG_TRACING NpyIter_DebugPrint(iter); @@ -2363,6 +2364,7 @@ * Since the iterator wasn't tracking coordinates, the * loop provided by the iterator is in Fortran-order. */ + NPY_BEGIN_THREADS_THRESHOLDED(shape[1] * shape[0]); for (coord = shape[1]; coord > 0; --coord) { sop(1, ptrs[0], strides[0], shape[0]); @@ -2371,6 +2373,7 @@ ptr = ptrs[1][1] + strides[1][1]; ptrs[0][1] = ptrs[1][1] = ptr; } + NPY_END_THREADS; return 0; } @@ -2381,6 +2384,7 @@ npy_intp coords[2], shape[3], strides[3][2]; char *ptrs[3][2], *ptr; sum_of_products_fn sop; + NPY_BEGIN_THREADS_DEF; #if NPY_EINSUM_DBG_TRACING NpyIter_DebugPrint(iter); @@ -2414,6 +2418,7 @@ * Since the iterator wasn't tracking coordinates, the * loop provided by the iterator is in Fortran-order. */ + NPY_BEGIN_THREADS_THRESHOLDED(shape[2] * shape[1] * shape[0]); for (coords[1] = shape[2]; coords[1] > 0; --coords[1]) { for (coords[0] = shape[1]; coords[0] > 0; --coords[0]) { sop(1, ptrs[0], strides[0], shape[0]); @@ -2428,6 +2433,7 @@ ptr = ptrs[2][1] + strides[2][1]; ptrs[0][1] = ptrs[1][1] = ptrs[2][1] = ptr; } + NPY_END_THREADS; return 0; } @@ -2438,6 +2444,7 @@ npy_intp coord, shape[2], strides[2][3]; char *ptrs[2][3], *ptr; sum_of_products_fn sop; + NPY_BEGIN_THREADS_DEF; #if NPY_EINSUM_DBG_TRACING NpyIter_DebugPrint(iter); @@ -2468,6 +2475,7 @@ * Since the iterator wasn't tracking coordinates, the * loop provided by the iterator is in Fortran-order. */ + NPY_BEGIN_THREADS_THRESHOLDED(shape[1] * shape[0]); for (coord = shape[1]; coord > 0; --coord) { sop(2, ptrs[0], strides[0], shape[0]); @@ -2478,6 +2486,7 @@ ptr = ptrs[1][2] + strides[1][2]; ptrs[0][2] = ptrs[1][2] = ptr; } + NPY_END_THREADS; return 0; } @@ -2488,6 +2497,7 @@ npy_intp coords[2], shape[3], strides[3][3]; char *ptrs[3][3], *ptr; sum_of_products_fn sop; + NPY_BEGIN_THREADS_DEF; #if NPY_EINSUM_DBG_TRACING NpyIter_DebugPrint(iter); @@ -2521,6 +2531,7 @@ * Since the iterator wasn't tracking coordinates, the * loop provided by the iterator is in Fortran-order. */ + NPY_BEGIN_THREADS_THRESHOLDED(shape[2] * shape[1] * shape[0]); for (coords[1] = shape[2]; coords[1] > 0; --coords[1]) { for (coords[0] = shape[1]; coords[0] > 0; --coords[0]) { sop(2, ptrs[0], strides[0], shape[0]); @@ -2539,6 +2550,7 @@ ptr = ptrs[2][2] + strides[2][2]; ptrs[0][2] = ptrs[1][2] = ptrs[2][2] = ptr; } + NPY_END_THREADS; return 0; } diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/flagsobject.c python-numpy-1.14.5/numpy/core/src/multiarray/flagsobject.c --- python-numpy-1.13.3/numpy/core/src/multiarray/flagsobject.c 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/flagsobject.c 2018-06-12 18:28:52.000000000 +0000 @@ -208,11 +208,10 @@ _define_get(NPY_ARRAY_C_CONTIGUOUS, contiguous) _define_get(NPY_ARRAY_F_CONTIGUOUS, fortran) -_define_get(NPY_ARRAY_UPDATEIFCOPY, updateifcopy) +_define_get(NPY_ARRAY_WRITEBACKIFCOPY, writebackifcopy) _define_get(NPY_ARRAY_OWNDATA, owndata) _define_get(NPY_ARRAY_ALIGNED, aligned) _define_get(NPY_ARRAY_WRITEABLE, writeable) - _define_get(NPY_ARRAY_ALIGNED| NPY_ARRAY_WRITEABLE, behaved) _define_get(NPY_ARRAY_ALIGNED| @@ -220,6 +219,25 @@ NPY_ARRAY_C_CONTIGUOUS, carray) static PyObject * +arrayflags_updateifcopy_get(PyArrayFlagsObject *self) +{ + PyObject *item; + /* 2017-Nov-10 1.14 */ + if(DEPRECATE("UPDATEIFCOPY deprecated, use WRITEBACKIFCOPY instead") < 0) { + return NULL; + } + if ((self->flags & (NPY_ARRAY_UPDATEIFCOPY)) == (NPY_ARRAY_UPDATEIFCOPY)) { + item = Py_True; + } + else { + item = Py_False; + } + Py_INCREF(item); + return item; +} + + +static PyObject * arrayflags_forc_get(PyArrayFlagsObject *self) { PyObject *item; @@ -291,6 +309,35 @@ "Cannot set flags on array scalars."); return -1; } + /* 2017-Nov-10 1.14 */ + if(DEPRECATE("UPDATEIFCOPY deprecated, use WRITEBACKIFCOPY instead") < 0) { + return -1; + } + res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, Py_None, + (PyObject_IsTrue(obj) ? Py_True : Py_False)); + if (res == NULL) { + return -1; + } + Py_DECREF(res); + return 0; +} + +/* relies on setflags order being write, align, uic */ +static int +arrayflags_writebackifcopy_set(PyArrayFlagsObject *self, PyObject *obj) +{ + PyObject *res; + + if (obj == NULL) { + PyErr_SetString(PyExc_AttributeError, + "Cannot delete flags writebackifcopy attribute"); + return -1; + } + if (self->arr == NULL) { + PyErr_SetString(PyExc_ValueError, + "Cannot set flags on array scalars."); + return -1; + } res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, Py_None, (PyObject_IsTrue(obj) ? Py_True : Py_False)); if (res == NULL) { @@ -372,6 +419,10 @@ (getter)arrayflags_updateifcopy_get, (setter)arrayflags_updateifcopy_set, NULL, NULL}, + {"writebackifcopy", + (getter)arrayflags_writebackifcopy_get, + (setter)arrayflags_writebackifcopy_set, + NULL, NULL}, {"owndata", (getter)arrayflags_owndata_get, NULL, @@ -455,6 +506,8 @@ return arrayflags_owndata_get(self); case 'A': return arrayflags_aligned_get(self); + case 'X': + return arrayflags_writebackifcopy_get(self); case 'U': return arrayflags_updateifcopy_get(self); default: @@ -522,6 +575,11 @@ return arrayflags_fortran_get(self); } break; + case 15: + if (strncmp(key, "WRITEBACKIFCOPY", n) == 0) { + return arrayflags_writebackifcopy_get(self); + } + break; } fail: @@ -564,6 +622,10 @@ ((n==1) && (strncmp(key, "U", n) == 0))) { return arrayflags_updateifcopy_set(self, item); } + else if (((n==14) && (strncmp(key, "WRITEBACKIFCOPY", n) == 0)) || + ((n==1) && (strncmp(key, "X", n) == 0))) { + return arrayflags_writebackifcopy_set(self, item); + } fail: PyErr_SetString(PyExc_KeyError, "Unknown flag"); @@ -589,16 +651,17 @@ return PyUString_FromFormat( " %s : %s\n %s : %s\n" " %s : %s\n %s : %s\n" - " %s : %s\n %s : %s", - "C_CONTIGUOUS", _torf_(fl, NPY_ARRAY_C_CONTIGUOUS), - "F_CONTIGUOUS", _torf_(fl, NPY_ARRAY_F_CONTIGUOUS), - "OWNDATA", _torf_(fl, NPY_ARRAY_OWNDATA), - "WRITEABLE", _torf_(fl, NPY_ARRAY_WRITEABLE), - "ALIGNED", _torf_(fl, NPY_ARRAY_ALIGNED), - "UPDATEIFCOPY", _torf_(fl, NPY_ARRAY_UPDATEIFCOPY)); + " %s : %s\n %s : %s\n" + " %s : %s", + "C_CONTIGUOUS", _torf_(fl, NPY_ARRAY_C_CONTIGUOUS), + "F_CONTIGUOUS", _torf_(fl, NPY_ARRAY_F_CONTIGUOUS), + "OWNDATA", _torf_(fl, NPY_ARRAY_OWNDATA), + "WRITEABLE", _torf_(fl, NPY_ARRAY_WRITEABLE), + "ALIGNED", _torf_(fl, NPY_ARRAY_ALIGNED), + "WRITEBACKIFCOPY", _torf_(fl, NPY_ARRAY_WRITEBACKIFCOPY), + "UPDATEIFCOPY", _torf_(fl, NPY_ARRAY_UPDATEIFCOPY)); } - static int arrayflags_compare(PyArrayFlagsObject *self, PyArrayFlagsObject *other) { diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/getset.c python-numpy-1.14.5/numpy/core/src/multiarray/getset.c --- python-numpy-1.13.3/numpy/core/src/multiarray/getset.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/getset.c 2018-06-12 18:28:52.000000000 +0000 @@ -18,6 +18,7 @@ #include "getset.h" #include "arrayobject.h" #include "mem_overlap.h" +#include "alloc.h" /******************* array attribute get and set routines ******************/ @@ -65,12 +66,12 @@ } /* Free old dimensions and strides */ - PyDimMem_FREE(PyArray_DIMS(self)); + npy_free_cache_dim_array(self); nd = PyArray_NDIM(ret); ((PyArrayObject_fields *)self)->nd = nd; if (nd > 0) { /* create new dimensions and strides */ - ((PyArrayObject_fields *)self)->dimensions = PyDimMem_NEW(3*nd); + ((PyArrayObject_fields *)self)->dimensions = npy_alloc_cache_dim(3*nd); if (PyArray_DIMS(self) == NULL) { Py_DECREF(ret); PyErr_SetString(PyExc_MemoryError,""); @@ -158,11 +159,11 @@ memcpy(PyArray_STRIDES(self), newstrides.ptr, sizeof(npy_intp)*newstrides.len); PyArray_UpdateFlags(self, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS | NPY_ARRAY_ALIGNED); - PyDimMem_FREE(newstrides.ptr); + npy_free_cache_dim_obj(newstrides); return 0; fail: - PyDimMem_FREE(newstrides.ptr); + npy_free_cache_dim_obj(newstrides); return -1; } @@ -364,9 +365,11 @@ PyDataMem_FREE(PyArray_DATA(self)); } if (PyArray_BASE(self)) { - if (PyArray_FLAGS(self) & NPY_ARRAY_UPDATEIFCOPY) { + if ((PyArray_FLAGS(self) & NPY_ARRAY_WRITEBACKIFCOPY) || + (PyArray_FLAGS(self) & NPY_ARRAY_UPDATEIFCOPY)) { PyArray_ENABLEFLAGS((PyArrayObject *)PyArray_BASE(self), NPY_ARRAY_WRITEABLE); + PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY); PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY); } Py_DECREF(PyArray_BASE(self)); @@ -436,12 +439,6 @@ array_descr_set(PyArrayObject *self, PyObject *arg) { PyArray_Descr *newtype = NULL; - npy_intp newdim; - int i; - char *msg = "new type not compatible with array."; - PyObject *safe; - static PyObject *checkfunc = NULL; - if (arg == NULL) { PyErr_SetString(PyExc_AttributeError, @@ -458,91 +455,107 @@ /* check that we are not reinterpreting memory containing Objects. */ if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(newtype)) { + static PyObject *checkfunc = NULL; + PyObject *safe; + npy_cache_import("numpy.core._internal", "_view_is_safe", &checkfunc); if (checkfunc == NULL) { - return -1; + goto fail; } safe = PyObject_CallFunction(checkfunc, "OO", PyArray_DESCR(self), newtype); if (safe == NULL) { - Py_DECREF(newtype); - return -1; + goto fail; } Py_DECREF(safe); } - if (newtype->elsize == 0) { - /* Allow a void view */ - if (newtype->type_num == NPY_VOID) { - PyArray_DESCR_REPLACE(newtype); - if (newtype == NULL) { - return -1; - } - newtype->elsize = PyArray_DESCR(self)->elsize; - } - /* But no other flexible types */ - else { - PyErr_SetString(PyExc_TypeError, - "data-type must not be 0-sized"); - Py_DECREF(newtype); + /* + * Viewing as an unsized void implies a void dtype matching the size of the + * current dtype. + */ + if (newtype->type_num == NPY_VOID && + PyDataType_ISUNSIZED(newtype) && + newtype->elsize != PyArray_DESCR(self)->elsize) { + PyArray_DESCR_REPLACE(newtype); + if (newtype == NULL) { return -1; } + newtype->elsize = PyArray_DESCR(self)->elsize; } - - if ((newtype->elsize != PyArray_DESCR(self)->elsize) && - (PyArray_NDIM(self) == 0 || - !PyArray_ISONESEGMENT(self) || - PyDataType_HASSUBARRAY(newtype))) { - goto fail; - } - - /* Deprecate not C contiguous and a dimension changes */ - if (newtype->elsize != PyArray_DESCR(self)->elsize && - !PyArray_IS_C_CONTIGUOUS(self)) { - /* 11/27/2015 1.11.0 */ - if (DEPRECATE("Changing the shape of non-C contiguous array by\n" - "descriptor assignment is deprecated. To maintain\n" - "the Fortran contiguity of a multidimensional Fortran\n" - "array, use 'a.T.view(...).T' instead") < 0) { - return -1; + /* Changing the size of the dtype results in a shape change */ + if (newtype->elsize != PyArray_DESCR(self)->elsize) { + int axis; + npy_intp newdim; + + /* forbidden cases */ + if (PyArray_NDIM(self) == 0) { + PyErr_SetString(PyExc_ValueError, + "Changing the dtype of a 0d array is only supported " + "if the itemsize is unchanged"); + goto fail; } - } - - if (PyArray_IS_C_CONTIGUOUS(self)) { - i = PyArray_NDIM(self) - 1; - } - else { - i = 0; - } - if (newtype->elsize < PyArray_DESCR(self)->elsize) { - /* - * if it is compatible increase the size of the - * dimension at end (or at the front for NPY_ARRAY_F_CONTIGUOUS) - */ - if (PyArray_DESCR(self)->elsize % newtype->elsize != 0) { + else if (PyDataType_HASSUBARRAY(newtype)) { + PyErr_SetString(PyExc_ValueError, + "Changing the dtype to a subarray type is only supported " + "if the total itemsize is unchanged"); goto fail; } - newdim = PyArray_DESCR(self)->elsize / newtype->elsize; - PyArray_DIMS(self)[i] *= newdim; - PyArray_STRIDES(self)[i] = newtype->elsize; - } - else if (newtype->elsize > PyArray_DESCR(self)->elsize) { - /* - * Determine if last (or first if NPY_ARRAY_F_CONTIGUOUS) dimension - * is compatible - */ - newdim = PyArray_DIMS(self)[i] * PyArray_DESCR(self)->elsize; - if ((newdim % newtype->elsize) != 0) { + + /* determine which axis to resize */ + if (PyArray_IS_C_CONTIGUOUS(self)) { + axis = PyArray_NDIM(self) - 1; + } + else if (PyArray_IS_F_CONTIGUOUS(self)) { + /* 2015-11-27 1.11.0, gh-6747 */ + if (DEPRECATE( + "Changing the shape of an F-contiguous array by " + "descriptor assignment is deprecated. To maintain the " + "Fortran contiguity of a multidimensional Fortran " + "array, use 'a.T.view(...).T' instead") < 0) { + goto fail; + } + axis = 0; + } + else { + /* Don't mention the deprecated F-contiguous support */ + PyErr_SetString(PyExc_ValueError, + "To change to a dtype of a different size, the array must " + "be C-contiguous"); goto fail; } - PyArray_DIMS(self)[i] = newdim / newtype->elsize; - PyArray_STRIDES(self)[i] = newtype->elsize; + + if (newtype->elsize < PyArray_DESCR(self)->elsize) { + /* if it is compatible, increase the size of the relevant axis */ + if (newtype->elsize == 0 || + PyArray_DESCR(self)->elsize % newtype->elsize != 0) { + PyErr_SetString(PyExc_ValueError, + "When changing to a smaller dtype, its size must be a " + "divisor of the size of original dtype"); + goto fail; + } + newdim = PyArray_DESCR(self)->elsize / newtype->elsize; + PyArray_DIMS(self)[axis] *= newdim; + PyArray_STRIDES(self)[axis] = newtype->elsize; + } + else if (newtype->elsize > PyArray_DESCR(self)->elsize) { + /* if it is compatible, decrease the size of the relevant axis */ + newdim = PyArray_DIMS(self)[axis] * PyArray_DESCR(self)->elsize; + if ((newdim % newtype->elsize) != 0) { + PyErr_SetString(PyExc_ValueError, + "When changing to a larger dtype, its size must be a " + "divisor of the total size in bytes of the last axis " + "of the array."); + goto fail; + } + PyArray_DIMS(self)[axis] = newdim / newtype->elsize; + PyArray_STRIDES(self)[axis] = newtype->elsize; + } } - /* fall through -- adjust type*/ - Py_DECREF(PyArray_DESCR(self)); + /* Viewing as a subarray increases the number of dimensions */ if (PyDataType_HASSUBARRAY(newtype)) { /* * create new array object from data and update @@ -560,7 +573,7 @@ if (temp == NULL) { return -1; } - PyDimMem_FREE(PyArray_DIMS(self)); + npy_free_cache_dim_array(self); ((PyArrayObject_fields *)self)->dimensions = PyArray_DIMS(temp); ((PyArrayObject_fields *)self)->nd = PyArray_NDIM(temp); ((PyArrayObject_fields *)self)->strides = PyArray_STRIDES(temp); @@ -572,12 +585,12 @@ Py_DECREF(temp); } + Py_DECREF(PyArray_DESCR(self)); ((PyArrayObject_fields *)self)->descr = newtype; PyArray_UpdateFlags(self, NPY_ARRAY_UPDATE_ALL); return 0; fail: - PyErr_SetString(PyExc_ValueError, msg); Py_DECREF(newtype); return -1; } @@ -603,7 +616,7 @@ inter->itemsize = PyArray_DESCR(self)->elsize; inter->flags = PyArray_FLAGS(self); /* reset unused flags */ - inter->flags &= ~(NPY_ARRAY_UPDATEIFCOPY | NPY_ARRAY_OWNDATA); + inter->flags &= ~(NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_UPDATEIFCOPY |NPY_ARRAY_OWNDATA); if (PyArray_ISNOTSWAPPED(self)) inter->flags |= NPY_ARRAY_NOTSWAPPED; /* * Copy shape and strides over since these can be reset diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/item_selection.c python-numpy-1.14.5/numpy/core/src/multiarray/item_selection.c --- python-numpy-1.13.3/numpy/core/src/multiarray/item_selection.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/item_selection.c 2018-06-12 18:28:52.000000000 +0000 @@ -23,6 +23,7 @@ #include "npy_sort.h" #include "npy_partition.h" #include "npy_binsearch.h" +#include "alloc.h" /*NUMPY_API * Take @@ -86,8 +87,7 @@ } else { - int flags = NPY_ARRAY_CARRAY | - NPY_ARRAY_UPDATEIFCOPY; + int flags = NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY; if ((PyArray_NDIM(out) != nd) || !PyArray_CompareLists(PyArray_DIMS(out), shape, nd)) { @@ -234,13 +234,15 @@ Py_XDECREF(self); if (out != NULL && out != obj) { Py_INCREF(out); + PyArray_ResolveWritebackIfCopy(obj); Py_DECREF(obj); obj = out; } return (PyObject *)obj; fail: - PyArray_XDECREF_ERR(obj); + PyArray_DiscardWritebackIfCopy(obj); + Py_XDECREF(obj); Py_XDECREF(indices); Py_XDECREF(self); return NULL; @@ -272,7 +274,7 @@ if (!PyArray_ISCONTIGUOUS(self)) { PyArrayObject *obj; - int flags = NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY; + int flags = NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY; if (clipmode == NPY_RAISE) { flags |= NPY_ARRAY_ENSURECOPY; @@ -406,6 +408,7 @@ Py_XDECREF(values); Py_XDECREF(indices); if (copied) { + PyArray_ResolveWritebackIfCopy(self); Py_DECREF(self); } Py_RETURN_NONE; @@ -414,7 +417,8 @@ Py_XDECREF(indices); Py_XDECREF(values); if (copied) { - PyArray_XDECREF_ERR(self); + PyArray_DiscardWritebackIfCopy(self); + Py_XDECREF(self); } return NULL; } @@ -447,7 +451,7 @@ dtype = PyArray_DESCR(self); Py_INCREF(dtype); obj = (PyArrayObject *)PyArray_FromArray(self, dtype, - NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY); + NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY); if (obj != self) { copied = 1; } @@ -523,6 +527,7 @@ Py_XDECREF(values); Py_XDECREF(mask); if (copied) { + PyArray_ResolveWritebackIfCopy(self); Py_DECREF(self); } Py_RETURN_NONE; @@ -531,7 +536,8 @@ Py_XDECREF(mask); Py_XDECREF(values); if (copied) { - PyArray_XDECREF_ERR(self); + PyArray_DiscardWritebackIfCopy(self); + Py_XDECREF(self); } return NULL; } @@ -693,7 +699,7 @@ } else { int flags = NPY_ARRAY_CARRAY | - NPY_ARRAY_UPDATEIFCOPY | + NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_FORCECAST; if ((PyArray_NDIM(out) != multi->nd) @@ -765,9 +771,10 @@ Py_XDECREF(mps[i]); } Py_DECREF(ap); - PyDataMem_FREE(mps); + npy_free_cache(mps, n * sizeof(mps[0])); if (out != NULL && out != obj) { Py_INCREF(out); + PyArray_ResolveWritebackIfCopy(obj); Py_DECREF(obj); obj = out; } @@ -779,8 +786,9 @@ Py_XDECREF(mps[i]); } Py_XDECREF(ap); - PyDataMem_FREE(mps); - PyArray_XDECREF_ERR(obj); + npy_free_cache(mps, n * sizeof(mps[0])); + PyArray_DiscardWritebackIfCopy(obj); + Py_XDECREF(obj); return NULL; } @@ -827,7 +835,7 @@ NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(op)); if (needcopy) { - buffer = PyDataMem_NEW(N * elsize); + buffer = npy_alloc_cache(N * elsize); if (buffer == NULL) { ret = -1; goto fail; @@ -908,7 +916,7 @@ } fail: - PyDataMem_FREE(buffer); + npy_free_cache(buffer, N * elsize); NPY_END_THREADS_DESCR(PyArray_DESCR(op)); if (ret < 0 && !PyErr_Occurred()) { /* Out of memory during sorting or buffer creation */ @@ -972,7 +980,7 @@ NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(op)); if (needcopy) { - valbuffer = PyDataMem_NEW(N * elsize); + valbuffer = npy_alloc_cache(N * elsize); if (valbuffer == NULL) { ret = -1; goto fail; @@ -980,7 +988,7 @@ } if (needidxbuffer) { - idxbuffer = (npy_intp *)PyDataMem_NEW(N * sizeof(npy_intp)); + idxbuffer = (npy_intp *)npy_alloc_cache(N * sizeof(npy_intp)); if (idxbuffer == NULL) { ret = -1; goto fail; @@ -1070,8 +1078,8 @@ } fail: - PyDataMem_FREE(valbuffer); - PyDataMem_FREE(idxbuffer); + npy_free_cache(valbuffer, N * elsize); + npy_free_cache(idxbuffer, N * sizeof(npy_intp)); NPY_END_THREADS_DESCR(PyArray_DESCR(op)); if (ret < 0) { if (!PyErr_Occurred()) { @@ -1487,13 +1495,13 @@ char *valbuffer, *indbuffer; int *swaps; - valbuffer = PyDataMem_NEW(N*maxelsize); + valbuffer = npy_alloc_cache(N * maxelsize); if (valbuffer == NULL) { goto fail; } - indbuffer = PyDataMem_NEW(N*sizeof(npy_intp)); + indbuffer = npy_alloc_cache(N * sizeof(npy_intp)); if (indbuffer == NULL) { - PyDataMem_FREE(indbuffer); + npy_free_cache(indbuffer, N * sizeof(npy_intp)); goto fail; } swaps = malloc(n*sizeof(int)); @@ -1525,8 +1533,8 @@ #else if (rcode < 0) { #endif - PyDataMem_FREE(valbuffer); - PyDataMem_FREE(indbuffer); + npy_free_cache(valbuffer, N * maxelsize); + npy_free_cache(indbuffer, N * sizeof(npy_intp)); free(swaps); goto fail; } @@ -1536,8 +1544,8 @@ sizeof(npy_intp), N, sizeof(npy_intp)); PyArray_ITER_NEXT(rit); } - PyDataMem_FREE(valbuffer); - PyDataMem_FREE(indbuffer); + npy_free_cache(valbuffer, N * maxelsize); + npy_free_cache(indbuffer, N * sizeof(npy_intp)); free(swaps); } else { @@ -2406,7 +2414,7 @@ data += ind * strides[idim]; } - return PyArray_DESCR(self)->f->getitem(data, self); + return PyArray_GETITEM(self, data); } /* @@ -2435,5 +2443,5 @@ data += ind * strides[idim]; } - return PyArray_DESCR(self)->f->setitem(obj, data, self); + return PyArray_SETITEM(self, data, obj); } diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/iterators.c python-numpy-1.14.5/numpy/core/src/multiarray/iterators.c --- python-numpy-1.13.3/numpy/core/src/multiarray/iterators.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/iterators.c 2018-06-12 18:28:52.000000000 +0000 @@ -243,7 +243,9 @@ it->ao = ao; it->size = PyArray_SIZE(ao); it->nd_m1 = nd - 1; - it->factors[nd-1] = 1; + if (nd != 0) { + it->factors[nd-1] = 1; + } for (i = 0; i < nd; i++) { it->dims_m1[i] = PyArray_DIMS(ao)[i] - 1; it->strides[i] = PyArray_STRIDES(ao)[i]; @@ -340,7 +342,9 @@ it->ao = ao; it->size = PyArray_MultiplyList(dims, nd); it->nd_m1 = nd - 1; - it->factors[nd-1] = 1; + if (nd != 0) { + it->factors[nd-1] = 1; + } for (i = 0; i < nd; i++) { it->dims_m1[i] = dims[i] - 1; k = i - diff; @@ -917,7 +921,7 @@ if (PyBool_Check(ind)) { retval = 0; if (PyObject_IsTrue(ind)) { - retval = type->f->setitem(val, self->dataptr, self->ao); + retval = PyArray_SETITEM(self->ao, self->dataptr, val); } goto finish; } @@ -926,7 +930,7 @@ goto skip; } start = PyArray_PyIntAsIntp(ind); - if (start==-1 && PyErr_Occurred()) { + if (error_converting(start)) { PyErr_Clear(); } else { @@ -1055,7 +1059,28 @@ }; - +/* Two options: + * 1) underlying array is contiguous + * -- return 1-d wrapper around it + * 2) underlying array is not contiguous + * -- make new 1-d contiguous array with updateifcopy flag set + * to copy back to the old array + * + * If underlying array is readonly, then we make the output array readonly + * and updateifcopy does not apply. + * + * Changed 2017-07-21, 1.14.0. + * + * In order to start the process of removing UPDATEIFCOPY, see gh-7054, the + * behavior is changed to always return an non-writeable copy when the base + * array is non-contiguous. Doing that will hopefully smoke out those few + * folks who assign to the result with the expectation that the base array + * will be changed. At a later date non-contiguous arrays will always return + * writeable copies. + * + * Note that the type and argument expected for the __array__ method is + * ignored. + */ static PyArrayObject * iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op)) { @@ -1063,27 +1088,14 @@ PyArrayObject *ret; npy_intp size; - /* Any argument ignored */ - - /* Two options: - * 1) underlying array is contiguous - * -- return 1-d wrapper around it - * 2) underlying array is not contiguous - * -- make new 1-d contiguous array with updateifcopy flag set - * to copy back to the old array - * - * If underlying array is readonly, then we make the output array readonly - * and updateifcopy does not apply. - */ size = PyArray_SIZE(it->ao); Py_INCREF(PyArray_DESCR(it->ao)); + if (PyArray_ISCONTIGUOUS(it->ao)) { - ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - PyArray_DESCR(it->ao), - 1, &size, - NULL, PyArray_DATA(it->ao), - PyArray_FLAGS(it->ao), - (PyObject *)it->ao); + ret = (PyArrayObject *)PyArray_NewFromDescr( + &PyArray_Type, PyArray_DESCR(it->ao), 1, &size, + NULL, PyArray_DATA(it->ao), PyArray_FLAGS(it->ao), + (PyObject *)it->ao); if (ret == NULL) { return NULL; } @@ -1094,11 +1106,10 @@ } } else { - ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, - PyArray_DESCR(it->ao), - 1, &size, - NULL, NULL, - 0, (PyObject *)it->ao); + ret = (PyArrayObject *)PyArray_NewFromDescr( + &PyArray_Type, PyArray_DESCR(it->ao), 1, &size, + NULL, NULL, 0, + (PyObject *)it->ao); if (ret == NULL) { return NULL; } @@ -1106,16 +1117,7 @@ Py_DECREF(ret); return NULL; } - if (PyArray_ISWRITEABLE(it->ao)) { - Py_INCREF(it->ao); - if (PyArray_SetUpdateIfCopyBase(ret, it->ao) < 0) { - Py_DECREF(ret); - return NULL; - } - } - else { - PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEABLE); - } + PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEABLE); } return ret; @@ -1151,6 +1153,7 @@ return NULL; } ret = array_richcompare(new, other, cmp_op); + PyArray_ResolveWritebackIfCopy(new); Py_DECREF(new); return ret; } @@ -1323,7 +1326,9 @@ it->nd_m1 = mit->nd - 1; it->size = tmp; nd = PyArray_NDIM(it->ao); - it->factors[mit->nd-1] = 1; + if (nd != 0) { + it->factors[mit->nd-1] = 1; + } for (j = 0; j < mit->nd; j++) { it->dims_m1[j] = mit->dimensions[j] - 1; k = j + nd - mit->nd; @@ -1805,7 +1810,7 @@ storeflags = PyArray_FLAGS(ar->ao); PyArray_ENABLEFLAGS(ar->ao, NPY_ARRAY_BEHAVED); - st = PyArray_DESCR(ar->ao)->f->setitem((PyObject*)fill, ret, ar->ao); + st = PyArray_SETITEM(ar->ao, ret, (PyObject*)fill); ((PyArrayObject_fields *)ar->ao)->flags = storeflags; if (st < 0) { diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/mapping.c python-numpy-1.14.5/numpy/core/src/multiarray/mapping.c --- python-numpy-1.13.3/numpy/core/src/multiarray/mapping.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/mapping.c 2018-06-12 18:28:52.000000000 +0000 @@ -139,6 +139,196 @@ *ret = (PyArrayObject *)new; } +static NPY_INLINE void +multi_DECREF(PyObject **objects, npy_intp n) +{ + npy_intp i; + for (i = 0; i < n; i++) { + Py_DECREF(objects[i]); + } +} + +/** + * Unpack a tuple into an array of new references. Returns the number of objects + * unpacked. + * + * Useful if a tuple is being iterated over multiple times, or for a code path + * that doesn't always want the overhead of allocating a tuple. + */ +static NPY_INLINE npy_intp +unpack_tuple(PyTupleObject *index, PyObject **result, npy_intp result_n) +{ + npy_intp n, i; + n = PyTuple_GET_SIZE(index); + if (n > result_n) { + PyErr_SetString(PyExc_IndexError, + "too many indices for array"); + return -1; + } + for (i = 0; i < n; i++) { + result[i] = PyTuple_GET_ITEM(index, i); + Py_INCREF(result[i]); + } + return n; +} + +/* Unpack a single scalar index, taking a new reference to match unpack_tuple */ +static NPY_INLINE npy_intp +unpack_scalar(PyObject *index, PyObject **result, npy_intp result_n) +{ + Py_INCREF(index); + result[0] = index; + return 1; +} + +/** + * Turn an index argument into a c-array of `PyObject *`s, one for each index. + * + * When a scalar is passed, this is written directly to the buffer. When a + * tuple is passed, the tuple elements are unpacked into the buffer. + * + * When some other sequence is passed, this implements the following section + * from the advanced indexing docs to decide whether to unpack or just write + * one element: + * + * > In order to remain backward compatible with a common usage in Numeric, + * > basic slicing is also initiated if the selection object is any non-ndarray + * > sequence (such as a list) containing slice objects, the Ellipsis object, + * > or the newaxis object, but not for integer arrays or other embedded + * > sequences. + * + * It might be worth deprecating this behaviour (gh-4434), in which case the + * entire function should become a simple check of PyTuple_Check. + * + * @param index The index object, which may or may not be a tuple. This is + * a borrowed reference. + * @param result An empty buffer of PyObject* to write each index component + * to. The references written are new. + * @param result_n The length of the result buffer + * + * @returns The number of items in `result`, or -1 if an error occured. + * The entries in `result` at and beyond this index should be + * assumed to contain garbage, even if they were initialized + * to NULL, so are not safe to Py_XDECREF. Use multi_DECREF to + * dispose of them. + */ +NPY_NO_EXPORT npy_intp +unpack_indices(PyObject *index, PyObject **result, npy_intp result_n) +{ + npy_intp n, i; + npy_bool commit_to_unpack; + + /* Fast route for passing a tuple */ + if (PyTuple_CheckExact(index)) { + return unpack_tuple((PyTupleObject *)index, result, result_n); + } + + /* Obvious single-entry cases */ + if (0 /* to aid macros below */ +#if !defined(NPY_PY3K) + || PyInt_CheckExact(index) +#else + || PyLong_CheckExact(index) +#endif + || index == Py_None + || PySlice_Check(index) + || PyArray_Check(index) + || !PySequence_Check(index)) { + + return unpack_scalar(index, result, result_n); + } + + /* + * Passing a tuple subclass - coerce to the base type. This incurs an + * allocation, but doesn't need to be a fast path anyway + */ + if (PyTuple_Check(index)) { + PyTupleObject *tup = (PyTupleObject *) PySequence_Tuple(index); + if (tup == NULL) { + return -1; + } + n = unpack_tuple(tup, result, result_n); + Py_DECREF(tup); + return n; + } + + /* + * At this point, we're left with a non-tuple, non-array, sequence: + * typically, a list. We use some somewhat-arbitrary heuristics from here + * onwards to decided whether to treat that list as a single index, or a + * list of indices. + */ + + /* if len fails, treat like a scalar */ + n = PySequence_Size(index); + if (n < 0) { + PyErr_Clear(); + return unpack_scalar(index, result, result_n); + } + + /* + * Backwards compatibility only takes effect for short sequences - otherwise + * we treat it like any other scalar. + * + * Sequences < NPY_MAXDIMS with any slice objects + * or newaxis, Ellipsis or other arrays or sequences + * embedded, are considered equivalent to an indexing + * tuple. (`a[[[1,2], [3,4]]] == a[[1,2], [3,4]]`) + */ + if (n >= NPY_MAXDIMS) { + return unpack_scalar(index, result, result_n); + } + + /* In case we change result_n elsewhere */ + assert(n <= result_n); + + /* + * Some other type of short sequence - assume we should unpack it like a + * tuple, and then decide whether that was actually necessary. + */ + commit_to_unpack = 0; + for (i = 0; i < n; i++) { + PyObject *tmp_obj = result[i] = PySequence_GetItem(index, i); + + if (commit_to_unpack) { + /* propagate errors */ + if (tmp_obj == NULL) { + multi_DECREF(result, i); + return -1; + } + } + else { + /* + * if getitem fails (unusual) before we've committed, then stop + * unpacking + */ + if (tmp_obj == NULL) { + PyErr_Clear(); + break; + } + + /* decide if we should treat this sequence like a tuple */ + if (PyArray_Check(tmp_obj) + || PySequence_Check(tmp_obj) + || PySlice_Check(tmp_obj) + || tmp_obj == Py_Ellipsis + || tmp_obj == Py_None) { + commit_to_unpack = 1; + } + } + } + + /* unpacking was the right thing to do, and we already did it */ + if (commit_to_unpack) { + return n; + } + /* got to the end, never found an indication that we should have unpacked */ + else { + /* we partially filled result, so empty it first */ + multi_DECREF(result, i); + return unpack_scalar(index, result, result_n); + } +} /** * Prepare an npy_index_object from the python slicing object. @@ -174,7 +364,6 @@ int i; npy_intp n; - npy_bool make_tuple = 0; PyObject *obj = NULL; PyArrayObject *arr; @@ -182,81 +371,16 @@ int ellipsis_pos = -1; /* - * The index might be a multi-dimensional index, but not yet a tuple - * this makes it a tuple in that case. - * - * TODO: Refactor into its own function. + * The choice of only unpacking `2*NPY_MAXDIMS` items is historic. + * The longest "reasonable" index that produces a result of <= 32 dimensions + * is `(0,)*np.MAXDIMS + (None,)*np.MAXDIMS`. Longer indices can exist, but + * are uncommon. */ - if (!PyTuple_CheckExact(index) - /* Next three are just to avoid slow checks */ -#if !defined(NPY_PY3K) - && (!PyInt_CheckExact(index)) -#else - && (!PyLong_CheckExact(index)) -#endif - && (index != Py_None) - && (!PySlice_Check(index)) - && (!PyArray_Check(index)) - && (PySequence_Check(index))) { - /* - * Sequences < NPY_MAXDIMS with any slice objects - * or newaxis, Ellipsis or other arrays or sequences - * embedded, are considered equivalent to an indexing - * tuple. (`a[[[1,2], [3,4]]] == a[[1,2], [3,4]]`) - */ - - if (PyTuple_Check(index)) { - /* If it is already a tuple, make it an exact tuple anyway */ - n = 0; - make_tuple = 1; - } - else { - n = PySequence_Size(index); - } - if (n < 0 || n >= NPY_MAXDIMS) { - n = 0; - } - for (i = 0; i < n; i++) { - PyObject *tmp_obj = PySequence_GetItem(index, i); - /* if getitem fails (unusual) treat this as a single index */ - if (tmp_obj == NULL) { - PyErr_Clear(); - make_tuple = 0; - break; - } - if (PyArray_Check(tmp_obj) || PySequence_Check(tmp_obj) - || PySlice_Check(tmp_obj) || tmp_obj == Py_Ellipsis - || tmp_obj == Py_None) { - make_tuple = 1; - Py_DECREF(tmp_obj); - break; - } - Py_DECREF(tmp_obj); - } - - if (make_tuple) { - /* We want to interpret it as a tuple, so make it one */ - index = PySequence_Tuple(index); - if (index == NULL) { - return -1; - } - } - } + PyObject *raw_indices[NPY_MAXDIMS*2]; - /* If the index is not a tuple, handle it the same as (index,) */ - if (!PyTuple_CheckExact(index)) { - obj = index; - index_ndim = 1; - } - else { - n = PyTuple_GET_SIZE(index); - if (n > NPY_MAXDIMS * 2) { - PyErr_SetString(PyExc_IndexError, - "too many indices for array"); - goto fail; - } - index_ndim = (int)n; - obj = NULL; + index_ndim = unpack_indices(index, raw_indices, NPY_MAXDIMS*2); + if (index_ndim == -1) { + return -1; } /* @@ -275,14 +399,7 @@ goto failed_building_indices; } - /* Check for single index. obj is already set then. */ - if ((curr_idx != 0) || (obj == NULL)) { - obj = PyTuple_GET_ITEM(index, get_idx++); - } - else { - /* only one loop */ - get_idx += 1; - } + obj = raw_indices[get_idx++]; /**** Try the cascade of possible indices ****/ @@ -355,7 +472,7 @@ #endif npy_intp ind = PyArray_PyIntAsIntp(obj); - if ((ind == -1) && PyErr_Occurred()) { + if (error_converting(ind)) { PyErr_Clear(); } else { @@ -526,7 +643,7 @@ npy_intp ind = PyArray_PyIntAsIntp((PyObject *)arr); Py_DECREF(arr); - if ((ind == -1) && PyErr_Occurred()) { + if (error_converting(ind)) { goto failed_building_indices; } else { @@ -686,9 +803,7 @@ *ndim = new_ndim + fancy_ndim; *out_fancy_ndim = fancy_ndim; - if (make_tuple) { - Py_DECREF(index); - } + multi_DECREF(raw_indices, index_ndim); return index_type; @@ -696,10 +811,7 @@ for (i=0; i < curr_idx; i++) { Py_XDECREF(indices[i].object); } - fail: - if (make_tuple) { - Py_DECREF(index); - } + multi_DECREF(raw_indices, index_ndim); return -1; } @@ -1272,23 +1384,60 @@ } /* + * Helper function for _get_field_view which turns a multifield + * view into a "packed" copy, as done in numpy 1.14 and before. + * In numpy 1.15 this function is removed. + */ +NPY_NO_EXPORT int +_multifield_view_to_copy(PyArrayObject **view) { + static PyObject *copyfunc = NULL; + PyObject *viewcopy; + + /* return a repacked copy of the view */ + npy_cache_import("numpy.lib.recfunctions", "repack_fields", ©func); + if (copyfunc == NULL) { + goto view_fail; + } + + PyArray_CLEARFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE); + viewcopy = PyObject_CallFunction(copyfunc, "O", *view); + if (viewcopy == NULL) { + goto view_fail; + } + Py_DECREF(*view); + *view = (PyArrayObject*)viewcopy; + + /* warn when writing to the copy */ + PyArray_ENABLEFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE); + return 0; + +view_fail: + Py_DECREF(*view); + *view = NULL; + return 0; +} + + +/* * Attempts to subscript an array using a field name or list of field names. * * If an error occurred, return 0 and set view to NULL. If the subscript is not * a string or list of strings, return -1 and set view to NULL. Otherwise * return 0 and set view to point to a new view into arr for the given fields. + * + * In numpy 1.14 and before, in the case of a list of field names the returned + * view will actually be a copy by default, with fields packed together. + * The `force_view` argument causes a view to be returned. This argument can be + * removed in 1.15 when we plan to return a view always. */ NPY_NO_EXPORT int -_get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view) +_get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view, + int force_view) { *view = NULL; /* first check for a single field name */ -#if defined(NPY_PY3K) - if (PyUnicode_Check(ind)) { -#else - if (PyString_Check(ind) || PyUnicode_Check(ind)) { -#endif + if (PyBaseString_Check(ind)) { PyObject *tup; PyArray_Descr *fieldtype; npy_intp offset; @@ -1334,10 +1483,6 @@ PyObject *fields, *names; PyArray_Descr *view_dtype; - /* variables needed to make a copy, to remove in the future */ - static PyObject *copyfunc = NULL; - PyObject *viewcopy; - seqlen = PySequence_Size(ind); /* quit if have a 0-d array (seqlen==-1) or a 0-len array */ @@ -1369,11 +1514,7 @@ return -1; } -#if defined(NPY_PY3K) - if (!PyUnicode_Check(name)) { -#else - if (!PyString_Check(name) && !PyUnicode_Check(name)) { -#endif + if (!PyBaseString_Check(name)) { Py_DECREF(name); Py_DECREF(fields); Py_DECREF(names); @@ -1390,6 +1531,35 @@ Py_DECREF(names); return 0; } + /* disallow use of titles as index */ + if (PyTuple_Size(tup) == 3) { + PyObject *title = PyTuple_GET_ITEM(tup, 2); + int titlecmp = PyObject_RichCompareBool(title, name, Py_EQ); + if (titlecmp == 1) { + /* if title == name, we got a title, not a field name */ + PyErr_SetString(PyExc_KeyError, + "cannot use field titles in multi-field index"); + } + if (titlecmp != 0 || PyDict_SetItem(fields, title, tup) < 0) { + Py_DECREF(title); + Py_DECREF(name); + Py_DECREF(fields); + Py_DECREF(names); + return 0; + } + Py_DECREF(title); + } + /* disallow duplicate field indices */ + if (PyDict_Contains(fields, name)) { + PyObject *errmsg = PyUString_FromString( + "duplicate field of name "); + PyUString_ConcatAndDel(&errmsg, name); + PyErr_SetObject(PyExc_ValueError, errmsg); + Py_DECREF(errmsg); + Py_DECREF(fields); + Py_DECREF(names); + return 0; + } if (PyDict_SetItem(fields, name, tup) < 0) { Py_DECREF(name); Py_DECREF(fields); @@ -1433,30 +1603,11 @@ return 0; } - /* - * Return copy for now (future plan to return the view above). All the - * following code in this block can then be replaced by "return 0;" - */ - npy_cache_import("numpy.core._internal", "_copy_fields", ©func); - if (copyfunc == NULL) { - Py_DECREF(*view); - *view = NULL; - return 0; - } - - PyArray_CLEARFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE); - viewcopy = PyObject_CallFunction(copyfunc, "O", *view); - if (viewcopy == NULL) { - Py_DECREF(*view); - *view = NULL; + if (force_view) { return 0; } - Py_DECREF(*view); - *view = (PyArrayObject*)viewcopy; - /* warn when writing to the copy */ - PyArray_ENABLEFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE); - return 0; + return _multifield_view_to_copy(view); } return -1; } @@ -1484,16 +1635,11 @@ /* return fields if op is a string index */ if (PyDataType_HASFIELDS(PyArray_DESCR(self))) { PyArrayObject *view; - int ret = _get_field_view(self, op, &view); + int ret = _get_field_view(self, op, &view, 0); if (ret == 0){ if (view == NULL) { return NULL; } - - /* warn if writing to a copy. copies will have no base */ - if (PyArray_BASE(view) == NULL) { - PyArray_ENABLEFLAGS(view, NPY_ARRAY_WARN_ON_WRITE); - } return (PyObject*)view; } } @@ -1778,19 +1924,8 @@ /* field access */ if (PyDataType_HASFIELDS(PyArray_DESCR(self))){ PyArrayObject *view; - int ret = _get_field_view(self, ind, &view); + int ret = _get_field_view(self, ind, &view, 1); if (ret == 0){ - -#if defined(NPY_PY3K) - if (!PyUnicode_Check(ind)) { -#else - if (!PyString_Check(ind) && !PyUnicode_Check(ind)) { -#endif - PyErr_SetString(PyExc_ValueError, - "multi-field assignment is not supported"); - return -1; - } - if (view == NULL) { return -1; } @@ -3171,7 +3306,7 @@ * If copy_if_overlap != 0, check if `a` has memory overlap with any of the * arrays in `index` and with `extra_op`. If yes, make copies as appropriate * to avoid problems if `a` is modified during the iteration. - * `iter->array` may contain a copied array (with UPDATEIFCOPY set). + * `iter->array` may contain a copied array (UPDATEIFCOPY/WRITEBACKIFCOPY set). */ NPY_NO_EXPORT PyObject * PyArray_MapIterArrayCopyIfOverlap(PyArrayObject * a, PyObject * index, @@ -3205,7 +3340,7 @@ } Py_INCREF(a); - if (PyArray_SetUpdateIfCopyBase(a_copy, a) < 0) { + if (PyArray_SetWritebackIfCopyBase(a_copy, a) < 0) { goto fail; } diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/methods.c python-numpy-1.14.5/numpy/core/src/multiarray/methods.c --- python-numpy-1.13.3/numpy/core/src/multiarray/methods.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/methods.c 2018-06-12 18:28:52.000000000 +0000 @@ -13,14 +13,17 @@ #include "npy_import.h" #include "ufunc_override.h" #include "common.h" +#include "templ_common.h" /* for npy_mul_with_overflow_intp */ #include "ctors.h" #include "calculation.h" #include "convert_datatype.h" #include "item_selection.h" #include "conversion_utils.h" #include "shape.h" +#include "strfuncs.h" #include "methods.h" +#include "alloc.h" /* NpyArg_ParseKeywords @@ -201,11 +204,11 @@ } } ret = PyArray_Newshape(self, &newshape, order); - PyDimMem_FREE(newshape.ptr); + npy_free_cache_dim_obj(newshape); return ret; fail: - PyDimMem_FREE(newshape.ptr); + npy_free_cache_dim_obj(newshape); return NULL; } @@ -517,12 +520,13 @@ static PyObject * -array_byteswap(PyArrayObject *self, PyObject *args) +array_byteswap(PyArrayObject *self, PyObject *args, PyObject *kwds) { npy_bool inplace = NPY_FALSE; + static char *kwlist[] = {"inplace", NULL}; - if (!PyArg_ParseTuple(args, "|O&:byteswap", - PyArray_BoolConverter, &inplace)) { + if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&:byteswap", kwlist, + PyArray_BoolConverter, &inplace)) { return NULL; } return PyArray_Byteswap(self, inplace); @@ -637,7 +641,7 @@ npy_intp value, size = PyArray_SIZE(self); value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, 0)); - if (value == -1 && PyErr_Occurred()) { + if (error_converting(value)) { return NULL; } @@ -657,7 +661,7 @@ for (idim = 0; idim < ndim; ++idim) { value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, idim)); - if (value == -1 && PyErr_Occurred()) { + if (error_converting(value)) { return NULL; } multi_index[idim] = value; @@ -714,7 +718,7 @@ npy_intp value, size = PyArray_SIZE(self); value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, 0)); - if (value == -1 && PyErr_Occurred()) { + if (error_converting(value)) { return NULL; } @@ -734,7 +738,7 @@ for (idim = 0; idim < ndim; ++idim) { value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, idim)); - if (value == -1 && PyErr_Occurred()) { + if (error_converting(value)) { return NULL; } multi_index[idim] = value; @@ -971,20 +975,18 @@ /* convert to PyArray_Type */ if (!PyArray_CheckExact(self)) { PyArrayObject *new; - PyTypeObject *subtype = &PyArray_Type; - - if (!PyType_IsSubtype(Py_TYPE(self), &PyArray_Type)) { - subtype = &PyArray_Type; - } Py_INCREF(PyArray_DESCR(self)); - new = (PyArrayObject *)PyArray_NewFromDescr(subtype, - PyArray_DESCR(self), - PyArray_NDIM(self), - PyArray_DIMS(self), - PyArray_STRIDES(self), - PyArray_DATA(self), - PyArray_FLAGS(self), NULL); + new = (PyArrayObject *)PyArray_NewFromDescr( + &PyArray_Type, + PyArray_DESCR(self), + PyArray_NDIM(self), + PyArray_DIMS(self), + PyArray_STRIDES(self), + PyArray_DATA(self), + PyArray_FLAGS(self), + NULL + ); if (new == NULL) { return NULL; } @@ -1070,7 +1072,7 @@ /* Separate from array_copy to make __copy__ preserve Fortran contiguity. */ static PyObject * -array_copy_keeporder(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_copy_keeporder(PyArrayObject *self, PyObject *args) { if (!PyArg_ParseTuple(args, ":__copy__")) { return NULL; @@ -1111,7 +1113,7 @@ } ret = PyArray_Resize(self, &newshape, refcheck, NPY_CORDER); - PyDimMem_FREE(newshape.ptr); + npy_free_cache_dim_obj(newshape); if (ret == NULL) { return NULL; } @@ -1671,6 +1673,8 @@ Py_ssize_t len; npy_intp size, dimensions[NPY_MAXDIMS]; int nd; + npy_intp nbytes; + int overflowed; PyArrayObject_fields *fa = (PyArrayObject_fields *)self; @@ -1712,13 +1716,15 @@ return NULL; } size = PyArray_MultiplyList(dimensions, nd); - if (PyArray_DESCR(self)->elsize == 0) { - PyErr_SetString(PyExc_ValueError, "Invalid data-type size."); - return NULL; - } - if (size < 0 || size > NPY_MAX_INTP / PyArray_DESCR(self)->elsize) { - PyErr_NoMemory(); - return NULL; + if (size < 0) { + /* More items than are addressable */ + return PyErr_NoMemory(); + } + overflowed = npy_mul_with_overflow_intp( + &nbytes, size, PyArray_DESCR(self)->elsize); + if (overflowed) { + /* More bytes than are addressable */ + return PyErr_NoMemory(); } if (PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) { @@ -1760,7 +1766,7 @@ return NULL; } - if ((len != (PyArray_DESCR(self)->elsize * size))) { + if (len != nbytes) { PyErr_SetString(PyExc_ValueError, "buffer size does not" \ " match array size"); @@ -1776,10 +1782,11 @@ Py_XDECREF(PyArray_BASE(self)); fa->base = NULL; + PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY); PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY); if (PyArray_DIMS(self) != NULL) { - PyDimMem_FREE(PyArray_DIMS(self)); + npy_free_cache_dim_array(self); fa->dimensions = NULL; } @@ -1788,7 +1795,7 @@ fa->nd = nd; if (nd > 0) { - fa->dimensions = PyDimMem_NEW(3*nd); + fa->dimensions = npy_alloc_cache_dim(3*nd); if (fa->dimensions == NULL) { return PyErr_NoMemory(); } @@ -1802,7 +1809,7 @@ } if (!PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) { - int swap=!PyArray_ISNOTSWAPPED(self); + int swap = PyArray_ISBYTESWAPPED(self); fa->data = datastr; #ifndef NPY_PY3K /* Check that the string is not interned */ @@ -1816,13 +1823,13 @@ fa->data = PyDataMem_NEW(num); if (PyArray_DATA(self) == NULL) { fa->nd = 0; - PyDimMem_FREE(PyArray_DIMS(self)); + npy_free_cache_dim_array(self); Py_DECREF(rawdata); return PyErr_NoMemory(); } if (swap) { /* byte-swap on pickle-read */ - npy_intp numels = num / PyArray_DESCR(self)->elsize; + npy_intp numels = PyArray_SIZE(self); PyArray_DESCR(self)->f->copyswapn(PyArray_DATA(self), PyArray_DESCR(self)->elsize, datastr, PyArray_DESCR(self)->elsize, @@ -1860,7 +1867,7 @@ if (PyArray_DATA(self) == NULL) { fa->nd = 0; fa->data = PyDataMem_NEW(PyArray_DESCR(self)->elsize); - PyDimMem_FREE(PyArray_DIMS(self)); + npy_free_cache_dim_array(self); return PyErr_NoMemory(); } if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_NEEDS_INIT)) { @@ -2002,7 +2009,7 @@ return NULL; } ret = PyArray_Transpose(self, &permute); - PyDimMem_FREE(permute.ptr); + npy_free_cache_dim_obj(permute); } return ret; @@ -2317,11 +2324,12 @@ if (PyObject_IsTrue(uic)) { fa->flags = flagback; PyErr_SetString(PyExc_ValueError, - "cannot set UPDATEIFCOPY " \ + "cannot set WRITEBACKIFCOPY " \ "flag to True"); return NULL; } else { + PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEBACKIFCOPY); PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY); Py_XDECREF(fa->base); fa->base = NULL; @@ -2444,7 +2452,7 @@ static PyObject * array_getslice(PyArrayObject *self, PyObject *args) { - PyObject *start, *stop, *slice; + PyObject *start, *stop, *slice, *result; if (!PyArg_ParseTuple(args, "OO:__getslice__", &start, &stop)) { return NULL; } @@ -2455,7 +2463,9 @@ } /* Deliberately delegate to subclasses */ - return PyObject_GetItem((PyObject *)self, slice); + result = PyObject_GetItem((PyObject *)self, slice); + Py_DECREF(slice); + return result; } static PyObject * @@ -2473,9 +2483,10 @@ /* Deliberately delegate to subclasses */ if (PyObject_SetItem((PyObject *)self, slice, value) < 0) { + Py_DECREF(slice); return NULL; } - + Py_DECREF(slice); Py_RETURN_NONE; } @@ -2497,6 +2508,12 @@ (PyCFunction)array_ufunc, METH_VARARGS | METH_KEYWORDS, NULL}, +#ifndef NPY_PY3K + {"__unicode__", + (PyCFunction)array_unicode, + METH_NOARGS, NULL}, +#endif + /* for the sys module */ {"__sizeof__", (PyCFunction) array_sizeof, @@ -2528,6 +2545,10 @@ (PyCFunction) array_complex, METH_VARARGS, NULL}, + {"__format__", + (PyCFunction) array_format, + METH_VARARGS, NULL}, + #ifndef NPY_PY3K /* * While we could put these in `tp_sequence`, its' easier to define them @@ -2569,7 +2590,7 @@ METH_VARARGS | METH_KEYWORDS, NULL}, {"byteswap", (PyCFunction)array_byteswap, - METH_VARARGS, NULL}, + METH_VARARGS | METH_KEYWORDS, NULL}, {"choose", (PyCFunction)array_choose, METH_VARARGS | METH_KEYWORDS, NULL}, diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/multiarraymodule.c python-numpy-1.14.5/numpy/core/src/multiarray/multiarraymodule.c --- python-numpy-1.13.3/numpy/core/src/multiarray/multiarraymodule.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/multiarraymodule.c 2018-06-12 18:28:52.000000000 +0000 @@ -37,6 +37,7 @@ #include "arrayobject.h" #include "hashdescr.h" #include "descriptor.h" +#include "dragon4.h" #include "calculation.h" #include "number.h" #include "scalartypes.h" @@ -64,6 +65,24 @@ #include "get_attr_string.h" +/* + * global variable to determine if legacy printing is enabled, accessible from + * C. For simplicity the mode is encoded as an integer where '0' means no + * legacy mode, and '113' means 1.13 legacy mode. We can upgrade this if we + * have more complex requirements in the future. + */ +int npy_legacy_print_mode = 0; + +static PyObject * +set_legacy_print_mode(PyObject *NPY_UNUSED(self), PyObject *args) +{ + if (!PyArg_ParseTuple(args, "i", &npy_legacy_print_mode)) { + return NULL; + } + Py_RETURN_NONE; +} + + /* Only here for API compatibility */ NPY_NO_EXPORT PyTypeObject PyBigArray_Type; @@ -315,20 +334,39 @@ return 0; } +/* + * Get the ndarray subclass with the highest priority + */ +NPY_NO_EXPORT PyTypeObject * +PyArray_GetSubType(int narrays, PyArrayObject **arrays) { + PyTypeObject *subtype = &PyArray_Type; + double priority = NPY_PRIORITY; + int i; + + /* Get the priority subtype for the array */ + for (i = 0; i < narrays; ++i) { + if (Py_TYPE(arrays[i]) != subtype) { + double pr = PyArray_GetPriority((PyObject *)(arrays[i]), 0.0); + if (pr > priority) { + priority = pr; + subtype = Py_TYPE(arrays[i]); + } + } + } + + return subtype; +} + /* * Concatenates a list of ndarrays. */ NPY_NO_EXPORT PyArrayObject * -PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis) +PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis, + PyArrayObject* ret) { - PyTypeObject *subtype = &PyArray_Type; - double priority = NPY_PRIORITY; int iarrays, idim, ndim; - npy_intp shape[NPY_MAXDIMS], s, strides[NPY_MAXDIMS]; - int strideperm[NPY_MAXDIMS]; - PyArray_Descr *dtype = NULL; - PyArrayObject *ret = NULL; + npy_intp shape[NPY_MAXDIMS]; PyArrayObject_fields *sliding_view = NULL; if (narrays <= 0) { @@ -383,47 +421,57 @@ } } - /* Get the priority subtype for the array */ - for (iarrays = 0; iarrays < narrays; ++iarrays) { - if (Py_TYPE(arrays[iarrays]) != subtype) { - double pr = PyArray_GetPriority((PyObject *)(arrays[iarrays]), 0.0); - if (pr > priority) { - priority = pr; - subtype = Py_TYPE(arrays[iarrays]); - } + if (ret != NULL) { + if (PyArray_NDIM(ret) != ndim) { + PyErr_SetString(PyExc_ValueError, + "Output array has wrong dimensionality"); + return NULL; } + if (!PyArray_CompareLists(shape, PyArray_SHAPE(ret), ndim)) { + PyErr_SetString(PyExc_ValueError, + "Output array is the wrong shape"); + return NULL; + } + Py_INCREF(ret); } + else { + npy_intp s, strides[NPY_MAXDIMS]; + int strideperm[NPY_MAXDIMS]; - /* Get the resulting dtype from combining all the arrays */ - dtype = PyArray_ResultType(narrays, arrays, 0, NULL); - if (dtype == NULL) { - return NULL; - } + /* Get the priority subtype for the array */ + PyTypeObject *subtype = PyArray_GetSubType(narrays, arrays); - /* - * Figure out the permutation to apply to the strides to match - * the memory layout of the input arrays, using ambiguity - * resolution rules matching that of the NpyIter. - */ - PyArray_CreateMultiSortedStridePerm(narrays, arrays, ndim, strideperm); - s = dtype->elsize; - for (idim = ndim-1; idim >= 0; --idim) { - int iperm = strideperm[idim]; - strides[iperm] = s; - s *= shape[iperm]; - } - - /* Allocate the array for the result. This steals the 'dtype' reference. */ - ret = (PyArrayObject *)PyArray_NewFromDescr(subtype, - dtype, - ndim, - shape, - strides, - NULL, - 0, - NULL); - if (ret == NULL) { - return NULL; + /* Get the resulting dtype from combining all the arrays */ + PyArray_Descr *dtype = PyArray_ResultType(narrays, arrays, 0, NULL); + if (dtype == NULL) { + return NULL; + } + + /* + * Figure out the permutation to apply to the strides to match + * the memory layout of the input arrays, using ambiguity + * resolution rules matching that of the NpyIter. + */ + PyArray_CreateMultiSortedStridePerm(narrays, arrays, ndim, strideperm); + s = dtype->elsize; + for (idim = ndim-1; idim >= 0; --idim) { + int iperm = strideperm[idim]; + strides[iperm] = s; + s *= shape[iperm]; + } + + /* Allocate the array for the result. This steals the 'dtype' reference. */ + ret = (PyArrayObject *)PyArray_NewFromDescr(subtype, + dtype, + ndim, + shape, + strides, + NULL, + 0, + NULL); + if (ret == NULL) { + return NULL; + } } /* @@ -462,15 +510,10 @@ */ NPY_NO_EXPORT PyArrayObject * PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, - NPY_ORDER order) + NPY_ORDER order, PyArrayObject *ret) { - PyTypeObject *subtype = &PyArray_Type; - double priority = NPY_PRIORITY; int iarrays; - npy_intp stride; npy_intp shape = 0; - PyArray_Descr *dtype = NULL; - PyArrayObject *ret = NULL; PyArrayObject_fields *sliding_view = NULL; if (narrays <= 0) { @@ -494,36 +537,45 @@ } } - /* Get the priority subtype for the array */ - for (iarrays = 0; iarrays < narrays; ++iarrays) { - if (Py_TYPE(arrays[iarrays]) != subtype) { - double pr = PyArray_GetPriority((PyObject *)(arrays[iarrays]), 0.0); - if (pr > priority) { - priority = pr; - subtype = Py_TYPE(arrays[iarrays]); - } + if (ret != NULL) { + if (PyArray_NDIM(ret) != 1) { + PyErr_SetString(PyExc_ValueError, + "Output array must be 1D"); + return NULL; + } + if (shape != PyArray_SIZE(ret)) { + PyErr_SetString(PyExc_ValueError, + "Output array is the wrong size"); + return NULL; } + Py_INCREF(ret); } + else { + npy_intp stride; - /* Get the resulting dtype from combining all the arrays */ - dtype = PyArray_ResultType(narrays, arrays, 0, NULL); - if (dtype == NULL) { - return NULL; - } + /* Get the priority subtype for the array */ + PyTypeObject *subtype = PyArray_GetSubType(narrays, arrays); - stride = dtype->elsize; + /* Get the resulting dtype from combining all the arrays */ + PyArray_Descr *dtype = PyArray_ResultType(narrays, arrays, 0, NULL); + if (dtype == NULL) { + return NULL; + } - /* Allocate the array for the result. This steals the 'dtype' reference. */ - ret = (PyArrayObject *)PyArray_NewFromDescr(subtype, - dtype, - 1, - &shape, - &stride, - NULL, - 0, - NULL); - if (ret == NULL) { - return NULL; + stride = dtype->elsize; + + /* Allocate the array for the result. This steals the 'dtype' reference. */ + ret = (PyArrayObject *)PyArray_NewFromDescr(subtype, + dtype, + 1, + &shape, + &stride, + NULL, + 0, + NULL); + if (ret == NULL) { + return NULL; + } } /* @@ -558,22 +610,11 @@ return ret; } - -/*NUMPY_API - * Concatenate - * - * Concatenate an arbitrary Python sequence into an array. - * op is a python object supporting the sequence interface. - * Its elements will be concatenated together to form a single - * multidimensional array. If axis is NPY_MAXDIMS or bigger, then - * each sequence object will be flattened before concatenation -*/ NPY_NO_EXPORT PyObject * -PyArray_Concatenate(PyObject *op, int axis) +PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret) { int iarrays, narrays; PyArrayObject **arrays; - PyArrayObject *ret; if (!PySequence_Check(op)) { PyErr_SetString(PyExc_TypeError, @@ -606,10 +647,10 @@ } if (axis >= NPY_MAXDIMS) { - ret = PyArray_ConcatenateFlattenedArrays(narrays, arrays, NPY_CORDER); + ret = PyArray_ConcatenateFlattenedArrays(narrays, arrays, NPY_CORDER, ret); } else { - ret = PyArray_ConcatenateArrays(narrays, arrays, axis); + ret = PyArray_ConcatenateArrays(narrays, arrays, axis, ret); } for (iarrays = 0; iarrays < narrays; ++iarrays) { @@ -629,6 +670,21 @@ return NULL; } +/*NUMPY_API + * Concatenate + * + * Concatenate an arbitrary Python sequence into an array. + * op is a python object supporting the sequence interface. + * Its elements will be concatenated together to form a single + * multidimensional array. If axis is NPY_MAXDIMS or bigger, then + * each sequence object will be flattened before concatenation +*/ +NPY_NO_EXPORT PyObject * +PyArray_Concatenate(PyObject *op, int axis) +{ + return PyArray_ConcatenateInto(op, axis, NULL); +} + static int _signbit_set(PyArrayObject *arr) { @@ -759,32 +815,17 @@ int nd, npy_intp dimensions[], int typenum, PyArrayObject **result) { PyArrayObject *out_buf; - PyTypeObject *subtype; - double prior1, prior2; - /* - * Need to choose an output array that can hold a sum - * -- use priority to determine which subtype. - */ - if (Py_TYPE(ap2) != Py_TYPE(ap1)) { - prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0); - prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0); - subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1)); - } - else { - prior1 = prior2 = 0.0; - subtype = Py_TYPE(ap1); - } + if (out) { int d; /* verify that out is usable */ - if (Py_TYPE(out) != subtype || - PyArray_NDIM(out) != nd || + if (PyArray_NDIM(out) != nd || PyArray_TYPE(out) != typenum || !PyArray_ISCARRAY(out)) { PyErr_SetString(PyExc_ValueError, - "output array is not acceptable " - "(must have the right type, nr dimensions, and be a C-Array)"); + "output array is not acceptable (must have the right datatype, " + "number of dimensions, and be a C-Array)"); return 0; } for (d = 0; d < nd; ++d) { @@ -807,7 +848,7 @@ /* set copy-back */ Py_INCREF(out); - if (PyArray_SetUpdateIfCopyBase(out_buf, out) < 0) { + if (PyArray_SetWritebackIfCopyBase(out_buf, out) < 0) { Py_DECREF(out); Py_DECREF(out_buf); return NULL; @@ -825,18 +866,35 @@ return out_buf; } + else { + PyTypeObject *subtype; + double prior1, prior2; + /* + * Need to choose an output array that can hold a sum + * -- use priority to determine which subtype. + */ + if (Py_TYPE(ap2) != Py_TYPE(ap1)) { + prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0); + prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0); + subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1)); + } + else { + prior1 = prior2 = 0.0; + subtype = Py_TYPE(ap1); + } - out_buf = (PyArrayObject *)PyArray_New(subtype, nd, dimensions, - typenum, NULL, NULL, 0, 0, - (PyObject *) - (prior2 > prior1 ? ap2 : ap1)); + out_buf = (PyArrayObject *)PyArray_New(subtype, nd, dimensions, + typenum, NULL, NULL, 0, 0, + (PyObject *) + (prior2 > prior1 ? ap2 : ap1)); + + if (out_buf != NULL && result) { + Py_INCREF(out_buf); + *result = out_buf; + } - if (out_buf != NULL && result) { - Py_INCREF(out_buf); - *result = out_buf; + return out_buf; } - - return out_buf; } /* Could perhaps be redone to not make contiguous arrays */ @@ -1062,6 +1120,7 @@ Py_DECREF(ap2); /* Trigger possible copy-back into `result` */ + PyArray_ResolveWritebackIfCopy(out_buf); Py_DECREF(out_buf); return (PyObject *)result; @@ -1418,29 +1477,34 @@ /* * Compare the field dictionaries for two types. * - * Return 1 if the contents are the same, 0 if not. + * Return 1 if the field types and field names of the two descrs are equal and + * in the same order, 0 if not. */ static int -_equivalent_fields(PyObject *field1, PyObject *field2) { +_equivalent_fields(PyArray_Descr *type1, PyArray_Descr *type2) { - int same, val; + int val; - if (field1 == field2) { + if (type1->fields == type2->fields && type1->names == type2->names) { return 1; } - if (field1 == NULL || field2 == NULL) { + if (type1->fields == NULL || type2->fields == NULL) { return 0; } - val = PyObject_RichCompareBool(field1, field2, Py_EQ); + val = PyObject_RichCompareBool(type1->fields, type2->fields, Py_EQ); if (val != 1 || PyErr_Occurred()) { - same = 0; + PyErr_Clear(); + return 0; } - else { - same = 1; + + val = PyObject_RichCompareBool(type1->names, type2->names, Py_EQ); + if (val != 1 || PyErr_Occurred()) { + PyErr_Clear(); + return 0; } - PyErr_Clear(); - return same; + + return 1; } /* @@ -1499,10 +1563,8 @@ return ((type_num1 == type_num2) && _equivalent_subarrays(type1->subarray, type2->subarray)); } - if (type_num1 == NPY_VOID - || type_num2 == NPY_VOID) { - return ((type_num1 == type_num2) - && _equivalent_fields(type1->fields, type2->fields)); + if (type_num1 == NPY_VOID || type_num2 == NPY_VOID) { + return ((type_num1 == type_num2) && _equivalent_fields(type1, type2)); } if (type_num1 == NPY_DATETIME || type_num1 == NPY_TIMEDELTA @@ -1662,7 +1724,7 @@ ndmin_obj = PyDict_GetItem(kws, npy_ma_str_ndmin); if (ndmin_obj) { ndmin = PyLong_AsLong(ndmin_obj); - if (ndmin == -1 && PyErr_Occurred()) { + if (error_converting(ndmin)) { goto clean_type; } else if (ndmin > NPY_MAXDIMS) { @@ -1853,12 +1915,12 @@ ret = (PyArrayObject *)PyArray_Empty(shape.len, shape.ptr, typecode, is_f_order); - PyDimMem_FREE(shape.ptr); + npy_free_cache_dim_obj(shape); return (PyObject *)ret; fail: Py_XDECREF(typecode); - PyDimMem_FREE(shape.ptr); + npy_free_cache_dim_obj(shape); return NULL; } @@ -2007,12 +2069,12 @@ ret = (PyArrayObject *)PyArray_Zeros(shape.len, shape.ptr, typecode, (int) is_f_order); - PyDimMem_FREE(shape.ptr); + npy_free_cache_dim_obj(shape); return (PyObject *)ret; fail: Py_XDECREF(typecode); - PyDimMem_FREE(shape.ptr); + npy_free_cache_dim_obj(shape); return (PyObject *)ret; } @@ -2056,6 +2118,17 @@ Py_XDECREF(descr); return NULL; } + + /* binary mode, condition copied from PyArray_FromString */ + if (sep == NULL || strlen(sep) == 0) { + /* Numpy 1.14, 2017-10-19 */ + if (DEPRECATE( + "The binary mode of fromstring is deprecated, as it behaves " + "surprisingly on unicode inputs. Use frombuffer instead") < 0) { + Py_DECREF(descr); + return NULL; + } + } return PyArray_FromString(data, (npy_intp)s, descr, (npy_intp)nin, sep); } @@ -2156,14 +2229,24 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { PyObject *a0; + PyObject *out = NULL; int axis = 0; - static char *kwlist[] = {"seq", "axis", NULL}; + static char *kwlist[] = {"seq", "axis", "out", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&:concatenate", kwlist, - &a0, PyArray_AxisConverter, &axis)) { + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O:concatenate", kwlist, + &a0, PyArray_AxisConverter, &axis, &out)) { return NULL; } - return PyArray_Concatenate(a0, axis); + if (out != NULL) { + if (out == Py_None) { + out = NULL; + } + else if (!PyArray_Check(out)) { + PyErr_SetString(PyExc_TypeError, "'out' must be an array"); + return NULL; + } + } + return PyArray_ConcatenateInto(a0, axis, (PyArrayObject *)out); } static PyObject * @@ -2947,7 +3030,7 @@ } ret = PyArray_NewFromDescr(subtype, dtype, (int)shape.len, shape.ptr, NULL, NULL, 0, NULL); - PyDimMem_FREE(shape.ptr); + npy_free_cache_dim_obj(shape); evil_global_disable_warn_O4O8_flag = 0; @@ -2957,7 +3040,7 @@ evil_global_disable_warn_O4O8_flag = 0; Py_XDECREF(dtype); - PyDimMem_FREE(shape.ptr); + npy_free_cache_dim_obj(shape); return NULL; } @@ -3224,7 +3307,7 @@ npy_bool ret; PyObject *retobj = NULL; NPY_CASTING casting = NPY_SAFE_CASTING; - static char *kwlist[] = {"from", "to", "casting", NULL}; + static char *kwlist[] = {"from_", "to", "casting", NULL}; if(!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|O&:can_cast", kwlist, &from_obj, @@ -3520,14 +3603,128 @@ #undef _test_code + +/* + * Prints floating-point scalars usign the Dragon4 algorithm, scientific mode. + * See docstring of `np.format_float_scientific` for description of arguments. + * The differences is that a value of -1 is valid for pad_left, exp_digits, + * precision, which is equivalent to `None`. + */ +static PyObject * +dragon4_scientific(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) +{ + PyObject *obj; + static char *kwlist[] = {"x", "precision", "unique", "sign", "trim", + "pad_left", "exp_digits", NULL}; + int precision=-1, pad_left=-1, exp_digits=-1; + char *trimstr=NULL; + DigitMode digit_mode; + TrimMode trim = TrimMode_None; + int sign=0, unique=1; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|iiisii:dragon4_scientific", + kwlist, &obj, &precision, &unique, &sign, &trimstr, &pad_left, + &exp_digits)) { + return NULL; + } + + if (trimstr != NULL) { + if (strcmp(trimstr, "k") == 0) { + trim = TrimMode_None; + } + else if (strcmp(trimstr, ".") == 0) { + trim = TrimMode_Zeros; + } + else if (strcmp(trimstr, "0") == 0) { + trim = TrimMode_LeaveOneZero; + } + else if (strcmp(trimstr, "-") == 0) { + trim = TrimMode_DptZeros; + } + else { + PyErr_SetString(PyExc_TypeError, + "if supplied, trim must be 'k', '.', '0' or '-'"); + return NULL; + } + } + + digit_mode = unique ? DigitMode_Unique : DigitMode_Exact; + + if (unique == 0 && precision < 0) { + PyErr_SetString(PyExc_TypeError, + "in non-unique mode `precision` must be supplied"); + return NULL; + } + + return Dragon4_Scientific(obj, digit_mode, precision, sign, trim, + pad_left, exp_digits); +} + +/* + * Prints floating-point scalars usign the Dragon4 algorithm, positional mode. + * See docstring of `np.format_float_positional` for description of arguments. + * The differences is that a value of -1 is valid for pad_left, pad_right, + * precision, which is equivalent to `None`. + */ +static PyObject * +dragon4_positional(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) +{ + PyObject *obj; + static char *kwlist[] = {"x", "precision", "unique", "fractional", + "sign", "trim", "pad_left", "pad_right", NULL}; + int precision=-1, pad_left=-1, pad_right=-1; + char *trimstr=NULL; + CutoffMode cutoff_mode; + DigitMode digit_mode; + TrimMode trim = TrimMode_None; + int sign=0, unique=1, fractional=0; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|iiiisii:dragon4_positional", + kwlist, &obj, &precision, &unique, &fractional, &sign, &trimstr, + &pad_left, &pad_right)) { + return NULL; + } + + if (trimstr != NULL) { + if (strcmp(trimstr, "k") == 0) { + trim = TrimMode_None; + } + else if (strcmp(trimstr, ".") == 0) { + trim = TrimMode_Zeros; + } + else if (strcmp(trimstr, "0") == 0) { + trim = TrimMode_LeaveOneZero; + } + else if (strcmp(trimstr, "-") == 0) { + trim = TrimMode_DptZeros; + } + else { + PyErr_SetString(PyExc_TypeError, + "if supplied, trim must be 'k', '.', '0' or '-'"); + return NULL; + } + } + + digit_mode = unique ? DigitMode_Unique : DigitMode_Exact; + cutoff_mode = fractional ? CutoffMode_FractionLength : + CutoffMode_TotalLength; + + if (unique == 0 && precision < 0) { + PyErr_SetString(PyExc_TypeError, + "in non-unique mode `precision` must be supplied"); + return NULL; + } + + return Dragon4_Positional(obj, digit_mode, cutoff_mode, precision, sign, + trim, pad_left, pad_right); +} + static PyObject * format_longfloat(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { PyObject *obj; unsigned int precision; - npy_longdouble x; static char *kwlist[] = {"x", "precision", NULL}; - static char repr[100]; if (!PyArg_ParseTupleAndKeywords(args, kwds, "OI:format_longfloat", kwlist, &obj, &precision)) { @@ -3538,12 +3735,8 @@ "not a longfloat"); return NULL; } - x = ((PyLongDoubleScalarObject *)obj)->obval; - if (precision > 70) { - precision = 70; - } - format_longdouble(repr, 100, x, precision); - return PyUString_FromString(repr); + return Dragon4_Scientific(obj, DigitMode_Unique, precision, 0, + TrimMode_LeaveOneZero, -1, -1); } static PyObject * @@ -4226,6 +4419,12 @@ {"format_longfloat", (PyCFunction)format_longfloat, METH_VARARGS | METH_KEYWORDS, NULL}, + {"dragon4_positional", + (PyCFunction)dragon4_positional, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"dragon4_scientific", + (PyCFunction)dragon4_scientific, + METH_VARARGS | METH_KEYWORDS, NULL}, {"compare_chararrays", (PyCFunction)compare_chararrays, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -4259,6 +4458,8 @@ METH_VARARGS | METH_KEYWORDS, NULL}, {"normalize_axis_index", (PyCFunction)normalize_axis_index, METH_VARARGS | METH_KEYWORDS, NULL}, + {"set_legacy_print_mode", (PyCFunction)set_legacy_print_mode, + METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; @@ -4465,6 +4666,7 @@ _addnew(CONTIGUOUS, NPY_ARRAY_C_CONTIGUOUS, C); _addnew(ALIGNED, NPY_ARRAY_ALIGNED, A); _addnew(UPDATEIFCOPY, NPY_ARRAY_UPDATEIFCOPY, U); + _addnew(WRITEBACKIFCOPY, NPY_ARRAY_WRITEBACKIFCOPY, X); _addnew(WRITEABLE, NPY_ARRAY_WRITEABLE, W); _addone(C_CONTIGUOUS, NPY_ARRAY_C_CONTIGUOUS); _addone(F_CONTIGUOUS, NPY_ARRAY_F_CONTIGUOUS); @@ -4607,15 +4809,13 @@ if (PyType_Ready(&NpyBusDayCalendar_Type) < 0) { return RETVAL; } -/* FIXME - * There is no error handling here - */ + c_api = NpyCapsule_FromVoidPtr((void *)PyArray_API, NULL); - PyDict_SetItemString(d, "_ARRAY_API", c_api); - Py_DECREF(c_api); - if (PyErr_Occurred()) { + if (c_api == NULL) { goto err; } + PyDict_SetItemString(d, "_ARRAY_API", c_api); + Py_DECREF(c_api); /* * PyExc_Exception should catch all the standard errors that are @@ -4633,10 +4833,10 @@ PyDict_SetItemString(d, "__version__", s); Py_DECREF(s); -/* FIXME - * There is no error handling here - */ s = NpyCapsule_FromVoidPtr((void *)_datetime_strings, NULL); + if (s == NULL) { + goto err; + } PyDict_SetItemString(d, "DATETIMEUNITS", s); Py_DECREF(s); @@ -4666,23 +4866,15 @@ ADDCONST(MAY_SHARE_EXACT); #undef ADDCONST - Py_INCREF(&PyArray_Type); PyDict_SetItemString(d, "ndarray", (PyObject *)&PyArray_Type); - Py_INCREF(&PyArrayIter_Type); PyDict_SetItemString(d, "flatiter", (PyObject *)&PyArrayIter_Type); - Py_INCREF(&PyArrayMultiIter_Type); PyDict_SetItemString(d, "nditer", (PyObject *)&NpyIter_Type); - Py_INCREF(&NpyIter_Type); PyDict_SetItemString(d, "broadcast", (PyObject *)&PyArrayMultiIter_Type); - Py_INCREF(&PyArrayDescr_Type); PyDict_SetItemString(d, "dtype", (PyObject *)&PyArrayDescr_Type); - - Py_INCREF(&PyArrayFlags_Type); PyDict_SetItemString(d, "flagsobj", (PyObject *)&PyArrayFlags_Type); /* Business day calendar object */ - Py_INCREF(&NpyBusDayCalendar_Type); PyDict_SetItemString(d, "busdaycalendar", (PyObject *)&NpyBusDayCalendar_Type); set_flaginfo(d); diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/multiarray_tests.c.src python-numpy-1.14.5/numpy/core/src/multiarray/multiarray_tests.c.src --- python-numpy-1.13.3/numpy/core/src/multiarray/multiarray_tests.c.src 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/multiarray_tests.c.src 2018-06-12 18:28:52.000000000 +0000 @@ -3,8 +3,10 @@ #include #define _NPY_NO_DEPRECATIONS /* for NPY_CHAR */ #include "numpy/arrayobject.h" +#include "numpy/npy_math.h" #include "mem_overlap.h" #include "npy_extint128.h" +#include "common.h" /* test PyArray_IsPythonScalar, before including private py3 compat header */ static PyObject * @@ -617,6 +619,66 @@ return (PyObject *)descr; } +/* used to test UPDATEIFCOPY usage emits deprecation warning */ +static PyObject* +npy_updateifcopy_deprecation(PyObject* NPY_UNUSED(self), PyObject* args) +{ + int flags; + PyObject* array; + if (!PyArray_Check(args)) { + PyErr_SetString(PyExc_TypeError, "test needs ndarray input"); + return NULL; + } + flags = NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY; + array = PyArray_FromArray((PyArrayObject*)args, NULL, flags); + if (array == NULL) + return NULL; + PyArray_ResolveWritebackIfCopy((PyArrayObject*)array); + Py_DECREF(array); + Py_RETURN_NONE; +} + +/* used to create array with WRITEBACKIFCOPY flag */ +static PyObject* +npy_create_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args) +{ + int flags; + PyObject* array; + if (!PyArray_Check(args)) { + PyErr_SetString(PyExc_TypeError, "test needs ndarray input"); + return NULL; + } + flags = NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY; + array = PyArray_FromArray((PyArrayObject*)args, NULL, flags); + if (array == NULL) + return NULL; + return array; +} + +/* resolve WRITEBACKIFCOPY */ +static PyObject* +npy_resolve(PyObject* NPY_UNUSED(self), PyObject* args) +{ + if (!PyArray_Check(args)) { + PyErr_SetString(PyExc_TypeError, "test needs ndarray input"); + return NULL; + } + PyArray_ResolveWritebackIfCopy((PyArrayObject*)args); + Py_RETURN_NONE; +} + +/* resolve WRITEBACKIFCOPY */ +static PyObject* +npy_discard(PyObject* NPY_UNUSED(self), PyObject* args) +{ + if (!PyArray_Check(args)) { + PyErr_SetString(PyExc_TypeError, "test needs ndarray input"); + return NULL; + } + PyArray_DiscardWritebackIfCopy((PyArrayObject*)args); + Py_RETURN_NONE; +} + #if !defined(NPY_PY3K) static PyObject * int_subclass(PyObject *dummy, PyObject *args) @@ -1000,11 +1062,11 @@ for (j = 0; j < nterms; ++j) { terms[j].a = (npy_int64)PyInt_AsSsize_t(PyTuple_GET_ITEM(A, j)); - if (terms[j].a == -1 && PyErr_Occurred()) { + if (error_converting(terms[j].a)) { goto fail; } terms[j].ub = (npy_int64)PyInt_AsSsize_t(PyTuple_GET_ITEM(U, j)); - if (terms[j].ub == -1 && PyErr_Occurred()) { + if (error_converting(terms[j].ub)) { goto fail; } } @@ -1559,6 +1621,125 @@ } +static char get_fpu_mode_doc[] = ( + "get_fpu_mode()\n" + "\n" + "Get the current FPU control word, in a platform-dependent format.\n" + "Returns None if not implemented on current platform."); + +static PyObject * +get_fpu_mode(PyObject *NPY_UNUSED(self), PyObject *args) +{ + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } + +#if defined(_MSC_VER) + { + unsigned int result = 0; + result = _controlfp(0, 0); + return PyLong_FromLongLong(result); + } +#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) + { + unsigned short cw = 0; + __asm__("fstcw %w0" : "=m" (cw)); + return PyLong_FromLongLong(cw); + } +#else + Py_RETURN_NONE; +#endif +} + +/* + * npymath wrappers + */ + +/**begin repeat + * #name = cabs, carg# + */ + +/**begin repeat1 + * #itype = npy_cfloat, npy_cdouble, npy_clongdouble# + * #ITYPE = NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE# + * #otype = npy_float, npy_double, npy_longdouble# + * #OTYPE = NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE# + * #suffix= f, , l# + */ + +static PyObject * +call_npy_@name@@suffix@(PyObject *NPY_UNUSED(self), PyObject *args) +{ + PyObject *z_py = NULL, *z_arr = NULL, *w_arr = NULL; + + if (!PyArg_ParseTuple(args, "O", &z_py)) { + return NULL; + } + + z_arr = PyArray_FROMANY(z_py, @ITYPE@, 0, 0, NPY_ARRAY_CARRAY_RO); + if (z_arr == NULL) { + return NULL; + } + + w_arr = PyArray_SimpleNew(0, NULL, @OTYPE@); + if (w_arr == NULL) { + Py_DECREF(z_arr); + return NULL; + } + + *(@otype@*)PyArray_DATA((PyArrayObject *)w_arr) = + npy_@name@@suffix@(*(@itype@*)PyArray_DATA((PyArrayObject *)z_arr)); + + Py_DECREF(z_arr); + return w_arr; +} + +/**end repeat1**/ + +/**end repeat**/ + +/**begin repeat + * #name = log10, cosh, sinh, tan, tanh# + */ + +/**begin repeat1 + * #type = npy_float, npy_double, npy_longdouble# + * #TYPE = NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE# + * #suffix= f, , l# + */ + +static PyObject * +call_npy_@name@@suffix@(PyObject *NPY_UNUSED(self), PyObject *args) +{ + PyObject *z_py = NULL, *z_arr = NULL, *w_arr = NULL; + + if (!PyArg_ParseTuple(args, "O", &z_py)) { + return NULL; + } + + z_arr = PyArray_FROMANY(z_py, @TYPE@, 0, 0, NPY_ARRAY_CARRAY_RO); + if (z_arr == NULL) { + return NULL; + } + + w_arr = PyArray_SimpleNew(0, NULL, @TYPE@); + if (w_arr == NULL) { + Py_DECREF(z_arr); + return NULL; + } + + *(@type@*)PyArray_DATA((PyArrayObject *)w_arr) = + npy_@name@@suffix@(*(@type@*)PyArray_DATA((PyArrayObject *)z_arr)); + + Py_DECREF(z_arr); + return w_arr; +} + +/**end repeat1**/ + +/**end repeat**/ + + static PyMethodDef Multiarray_TestsMethods[] = { {"IsPythonScalar", IsPythonScalar, @@ -1587,6 +1768,18 @@ {"npy_char_deprecation", npy_char_deprecation, METH_NOARGS, NULL}, + {"npy_updateifcopy_deprecation", + npy_updateifcopy_deprecation, + METH_O, NULL}, + {"npy_create_writebackifcopy", + npy_create_writebackifcopy, + METH_O, NULL}, + {"npy_resolve", + npy_resolve, + METH_O, NULL}, + {"npy_discard", + npy_discard, + METH_O, NULL}, #if !defined(NPY_PY3K) {"test_int_subclass", int_subclass, @@ -1649,6 +1842,37 @@ {"extint_ceildiv_128_64", extint_ceildiv_128_64, METH_VARARGS, NULL}, + {"get_fpu_mode", + get_fpu_mode, + METH_VARARGS, get_fpu_mode_doc}, +/**begin repeat + * #name = cabs, carg# + */ + +/**begin repeat1 + * #suffix = f, , l# + */ + {"npy_@name@@suffix@", + call_npy_@name@@suffix@, + METH_VARARGS, NULL}, +/**end repeat1**/ + +/**end repeat**/ + +/**begin repeat + * #name = log10, cosh, sinh, tan, tanh# + */ + +/**begin repeat1 + * #suffix= f, , l# + */ + {"npy_@name@@suffix@", + call_npy_@name@@suffix@, + METH_VARARGS, NULL}, +/**end repeat1**/ + +/**end repeat**/ + {NULL, NULL, 0, NULL} /* Sentinel */ }; diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/nditer_pywrap.c python-numpy-1.14.5/numpy/core/src/multiarray/nditer_pywrap.c --- python-numpy-1.13.3/numpy/core/src/multiarray/nditer_pywrap.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/nditer_pywrap.c 2018-06-12 18:28:52.000000000 +0000 @@ -15,6 +15,8 @@ #include #include "npy_config.h" #include "npy_pycompat.h" +#include "alloc.h" +#include "common.h" typedef struct NewNpyArrayIterObject_tag NewNpyArrayIterObject; @@ -692,7 +694,7 @@ int fromanyflags = 0; if (op_flags[iop]&(NPY_ITER_READWRITE|NPY_ITER_WRITEONLY)) { - fromanyflags |= NPY_ARRAY_UPDATEIFCOPY; + fromanyflags |= NPY_ARRAY_WRITEBACKIFCOPY; } ao = (PyArrayObject *)PyArray_FROM_OF((PyObject *)op[iop], fromanyflags); @@ -758,7 +760,7 @@ &op_axes_in, PyArray_IntpConverter, &itershape, &buffersize)) { - PyDimMem_FREE(itershape.ptr); + npy_free_cache_dim_obj(itershape); return -1; } @@ -804,7 +806,7 @@ } } else if (itershape.ptr != NULL) { - PyDimMem_FREE(itershape.ptr); + npy_free_cache_dim_obj(itershape); itershape.ptr = NULL; } @@ -832,7 +834,7 @@ self->finished = 0; } - PyDimMem_FREE(itershape.ptr); + npy_free_cache_dim_obj(itershape); /* Release the references we got to the ops and dtypes */ for (iop = 0; iop < nop; ++iop) { @@ -843,7 +845,7 @@ return 0; fail: - PyDimMem_FREE(itershape.ptr); + npy_free_cache_dim_obj(itershape); for (iop = 0; iop < nop; ++iop) { Py_XDECREF(op[iop]); Py_XDECREF(op_request_dtypes[iop]); @@ -1618,7 +1620,7 @@ for (idim = 0; idim < ndim; ++idim) { PyObject *v = PySequence_GetItem(value, idim); multi_index[idim] = PyInt_AsLong(v); - if (multi_index[idim]==-1 && PyErr_Occurred()) { + if (error_converting(multi_index[idim])) { Py_XDECREF(v); return -1; } @@ -1678,7 +1680,7 @@ if (NpyIter_HasIndex(self->iter)) { npy_intp ind; ind = PyInt_AsLong(value); - if (ind==-1 && PyErr_Occurred()) { + if (error_converting(ind)) { return -1; } if (NpyIter_GotoIndex(self->iter, ind) != NPY_SUCCEED) { @@ -1728,7 +1730,7 @@ } iterindex = PyInt_AsLong(value); - if (iterindex==-1 && PyErr_Occurred()) { + if (error_converting(iterindex)) { return -1; } if (NpyIter_GotoIterIndex(self->iter, iterindex) != NPY_SUCCEED) { @@ -2256,7 +2258,7 @@ if (PyInt_Check(op) || PyLong_Check(op) || (PyIndex_Check(op) && !PySequence_Check(op))) { npy_intp i = PyArray_PyIntAsIntp(op); - if (i == -1 && PyErr_Occurred()) { + if (error_converting(i)) { return NULL; } return npyiter_seq_item(self, i); @@ -2305,7 +2307,7 @@ if (PyInt_Check(op) || PyLong_Check(op) || (PyIndex_Check(op) && !PySequence_Check(op))) { npy_intp i = PyArray_PyIntAsIntp(op); - if (i == -1 && PyErr_Occurred()) { + if (error_converting(i)) { return -1; } return npyiter_seq_ass_item(self, i, value); diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/number.c python-numpy-1.14.5/numpy/core/src/multiarray/number.c --- python-numpy-1.13.3/numpy/core/src/multiarray/number.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/number.c 2018-06-12 18:28:52.000000000 +0000 @@ -16,6 +16,15 @@ #include "binop_override.h" +/* <2.7.11 and <3.4.4 have the wrong argument type for Py_EnterRecursiveCall */ +#if (PY_VERSION_HEX < 0x02070B00) || \ + ((0x03000000 <= PY_VERSION_HEX) && (PY_VERSION_HEX < 0x03040400)) + #define _Py_EnterRecursiveCall(x) Py_EnterRecursiveCall((char *)(x)) +#else + #define _Py_EnterRecursiveCall(x) Py_EnterRecursiveCall(x) +#endif + + /************************************************************************* **************** Implement Number Protocol **************************** *************************************************************************/ @@ -445,7 +454,7 @@ return NPY_NOSCALAR; } val = PyInt_AsSsize_t(value); - if (val == -1 && PyErr_Occurred()) { + if (error_converting(val)) { PyErr_Clear(); return NPY_NOSCALAR; } @@ -785,7 +794,7 @@ n = PyArray_SIZE(mp); if (n == 1) { int res; - if (Py_EnterRecursiveCall(" while converting array to bool")) { + if (_Py_EnterRecursiveCall(" while converting array to bool")) { return -1; } res = PyArray_DESCR(mp)->f->nonzero(PyArray_DATA(mp), mp); @@ -797,6 +806,12 @@ return res; } else if (n == 0) { + /* 2017-09-25, 1.14 */ + if (DEPRECATE("The truth value of an empty array is ambiguous. " + "Returning False, but in future this will result in an error. " + "Use `array.size > 0` to check that an array is not empty.") < 0) { + return -1; + } return 0; } else { @@ -808,213 +823,112 @@ } } - +/* + * Convert the array to a scalar if allowed, and apply the builtin function + * to it. The where argument is passed onto Py_EnterRecursiveCall when the + * array contains python objects. + */ NPY_NO_EXPORT PyObject * -array_int(PyArrayObject *v) +array_scalar_forward(PyArrayObject *v, + PyObject *(*builtin_func)(PyObject *), + const char *where) { - PyObject *pv, *pv2; + PyObject *scalar; if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can be"\ + PyErr_SetString(PyExc_TypeError, "only size-1 arrays can be"\ " converted to Python scalars"); return NULL; } - pv = PyArray_DESCR(v)->f->getitem(PyArray_DATA(v), v); - if (pv == NULL) { - return NULL; - } - if (Py_TYPE(pv)->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - Py_DECREF(pv); + + scalar = PyArray_GETITEM(v, PyArray_DATA(v)); + if (scalar == NULL) { return NULL; } - if (Py_TYPE(pv)->tp_as_number->nb_int == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to int"); - Py_DECREF(pv); - return NULL; + + /* Need to guard against recursion if our array holds references */ + if (PyDataType_REFCHK(PyArray_DESCR(v))) { + PyObject *res; + if (_Py_EnterRecursiveCall(where) != 0) { + Py_DECREF(scalar); + return NULL; + } + res = builtin_func(scalar); + Py_DECREF(scalar); + Py_LeaveRecursiveCall(); + return res; } - /* - * If we still got an array which can hold references, stop - * because it could point back at 'v'. - */ - if (PyArray_Check(pv) && - PyDataType_REFCHK(PyArray_DESCR((PyArrayObject *)pv))) { - PyErr_SetString(PyExc_TypeError, - "object array may be self-referencing"); - Py_DECREF(pv); - return NULL; + else { + PyObject *res; + res = builtin_func(scalar); + Py_DECREF(scalar); + return res; } - - pv2 = Py_TYPE(pv)->tp_as_number->nb_int(pv); - Py_DECREF(pv); - return pv2; } -static PyObject * + +NPY_NO_EXPORT PyObject * array_float(PyArrayObject *v) { - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = PyArray_DESCR(v)->f->getitem(PyArray_DATA(v), v); - if (pv == NULL) { - return NULL; - } - if (Py_TYPE(pv)->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to a "\ - "float; scalar object is not a number"); - Py_DECREF(pv); - return NULL; - } - if (Py_TYPE(pv)->tp_as_number->nb_float == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to float"); - Py_DECREF(pv); - return NULL; - } - /* - * If we still got an array which can hold references, stop - * because it could point back at 'v'. - */ - if (PyArray_Check(pv) && - PyDataType_REFCHK(PyArray_DESCR((PyArrayObject *)pv))) { - PyErr_SetString(PyExc_TypeError, - "object array may be self-referencing"); - Py_DECREF(pv); - return NULL; - } - pv2 = Py_TYPE(pv)->tp_as_number->nb_float(pv); - Py_DECREF(pv); - return pv2; + return array_scalar_forward(v, &PyNumber_Float, " in ndarray.__float__"); } -#if !defined(NPY_PY3K) +#if defined(NPY_PY3K) -static PyObject * +NPY_NO_EXPORT PyObject * +array_int(PyArrayObject *v) +{ + return array_scalar_forward(v, &PyNumber_Long, " in ndarray.__int__"); +} + +#else + +NPY_NO_EXPORT PyObject * +array_int(PyArrayObject *v) +{ + return array_scalar_forward(v, &PyNumber_Int, " in ndarray.__int__"); +} + +NPY_NO_EXPORT PyObject * array_long(PyArrayObject *v) { - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = PyArray_DESCR(v)->f->getitem(PyArray_DATA(v), v); - if (pv == NULL) { - return NULL; - } - if (Py_TYPE(pv)->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - Py_DECREF(pv); - return NULL; - } - if (Py_TYPE(pv)->tp_as_number->nb_long == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to long"); - Py_DECREF(pv); + return array_scalar_forward(v, &PyNumber_Long, " in ndarray.__long__"); +} + +/* hex and oct aren't exposed to the C api, but we need a function pointer */ +static PyObject * +_PyNumber_Oct(PyObject *o) { + PyObject *res; + PyObject *mod = PyImport_ImportModule("__builtin__"); + if (mod == NULL) { return NULL; } - /* - * If we still got an array which can hold references, stop - * because it could point back at 'v'. - */ - if (PyArray_Check(pv) && - PyDataType_REFCHK(PyArray_DESCR((PyArrayObject *)pv))) { - PyErr_SetString(PyExc_TypeError, - "object array may be self-referencing"); - Py_DECREF(pv); + res = PyObject_CallMethod(mod, "oct", "(O)", o); + Py_DECREF(mod); + return res; +} + +static PyObject * +_PyNumber_Hex(PyObject *o) { + PyObject *res; + PyObject *mod = PyImport_ImportModule("__builtin__"); + if (mod == NULL) { return NULL; } - pv2 = Py_TYPE(pv)->tp_as_number->nb_long(pv); - Py_DECREF(pv); - return pv2; + res = PyObject_CallMethod(mod, "hex", "(O)", o); + Py_DECREF(mod); + return res; } -static PyObject * +NPY_NO_EXPORT PyObject * array_oct(PyArrayObject *v) { - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = PyArray_DESCR(v)->f->getitem(PyArray_DATA(v), v); - if (pv == NULL) { - return NULL; - } - if (Py_TYPE(pv)->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - Py_DECREF(pv); - return NULL; - } - if (Py_TYPE(pv)->tp_as_number->nb_oct == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to oct"); - Py_DECREF(pv); - return NULL; - } - /* - * If we still got an array which can hold references, stop - * because it could point back at 'v'. - */ - if (PyArray_Check(pv) && - PyDataType_REFCHK(PyArray_DESCR((PyArrayObject *)pv))) { - PyErr_SetString(PyExc_TypeError, - "object array may be self-referencing"); - Py_DECREF(pv); - return NULL; - } - pv2 = Py_TYPE(pv)->tp_as_number->nb_oct(pv); - Py_DECREF(pv); - return pv2; + return array_scalar_forward(v, &_PyNumber_Oct, " in ndarray.__oct__"); } -static PyObject * +NPY_NO_EXPORT PyObject * array_hex(PyArrayObject *v) { - PyObject *pv, *pv2; - if (PyArray_SIZE(v) != 1) { - PyErr_SetString(PyExc_TypeError, "only length-1 arrays can "\ - "be converted to Python scalars"); - return NULL; - } - pv = PyArray_DESCR(v)->f->getitem(PyArray_DATA(v), v); - if (pv == NULL) { - return NULL; - } - if (Py_TYPE(pv)->tp_as_number == 0) { - PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ - "scalar object is not a number"); - Py_DECREF(pv); - return NULL; - } - if (Py_TYPE(pv)->tp_as_number->nb_hex == 0) { - PyErr_SetString(PyExc_TypeError, "don't know how to convert "\ - "scalar number to hex"); - Py_DECREF(pv); - return NULL; - } - /* - * If we still got an array which can hold references, stop - * because it could point back at 'v'. - */ - if (PyArray_Check(pv) && - PyDataType_REFCHK(PyArray_DESCR((PyArrayObject *)pv))) { - PyErr_SetString(PyExc_TypeError, - "object array may be self-referencing"); - Py_DECREF(pv); - return NULL; - } - pv2 = Py_TYPE(pv)->tp_as_number->nb_hex(pv); - Py_DECREF(pv); - return pv2; + return array_scalar_forward(v, &_PyNumber_Hex, " in ndarray.__hex__"); } #endif @@ -1033,7 +947,7 @@ "only integer scalar arrays can be converted to a scalar index"); return NULL; } - return PyArray_DESCR(v)->f->getitem(PyArray_DATA(v), v); + return PyArray_GETITEM(v, PyArray_DATA(v)); } diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/numpyos.c python-numpy-1.14.5/numpy/core/src/multiarray/numpyos.c --- python-numpy-1.13.3/numpy/core/src/multiarray/numpyos.c 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/numpyos.c 2018-06-12 18:28:52.000000000 +0000 @@ -570,12 +570,11 @@ errno = 0; result = strtold_l(s, endptr, clocale); freelocale(clocale); - if (errno) { - *endptr = (char*)s; - } } else { - *endptr = (char*)s; + if (endptr != NULL) { + *endptr = (char*)s; + } result = 0; } return result; diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/scalarapi.c python-numpy-1.14.5/numpy/core/src/multiarray/scalarapi.c --- python-numpy-1.13.3/numpy/core/src/multiarray/scalarapi.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/scalarapi.c 2018-06-12 17:35:36.000000000 +0000 @@ -415,7 +415,7 @@ else if (PyLong_Check(object)) { npy_longlong val; val = PyLong_AsLongLong(object); - if (val==-1 && PyErr_Occurred()) { + if (error_converting(val)) { PyErr_Clear(); return NULL; } @@ -567,7 +567,7 @@ } descr = PyArray_DescrFromTypeObject((PyObject *)Py_TYPE(sc)); - if (descr->elsize == 0) { + if (PyDataType_ISUNSIZED(descr)) { PyArray_DESCR_REPLACE(descr); type_num = descr->type_num; if (type_num == NPY_STRING) { diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/scalartypes.c.src python-numpy-1.14.5/numpy/core/src/multiarray/scalartypes.c.src --- python-numpy-1.13.3/numpy/core/src/multiarray/scalartypes.c.src 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/scalartypes.c.src 2018-06-12 18:28:52.000000000 +0000 @@ -24,6 +24,10 @@ #include "scalartypes.h" #include "_datetime.h" #include "datetime_strings.h" +#include "alloc.h" +#include "npy_import.h" +#include "dragon4.h" +#include "npy_longdouble.h" #include @@ -194,9 +198,21 @@ } } +static PyObject * +gentype_add(PyObject *m1, PyObject* m2) +{ + /* special case str.__radd__, which should not call array_add */ + if (PyString_Check(m1) || PyUnicode_Check(m1)) { + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; + } + BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_add, gentype_add); + return PyArray_Type.tp_as_number->nb_add(m1, m2); +} + /**begin repeat * - * #name = add, subtract, remainder, divmod, lshift, rshift, + * #name = subtract, remainder, divmod, lshift, rshift, * and, xor, or, floor_divide, true_divide# */ static PyObject * @@ -243,7 +259,7 @@ (Py_TYPE(m1)->tp_as_number->nb_multiply == NULL))) { /* Try to convert m2 to an int and try sequence repeat */ repeat = PyArray_PyIntAsIntp(m2); - if (repeat == -1 && PyErr_Occurred()) { + if (error_converting(repeat)) { return NULL; } /* Note that npy_intp is compatible to Py_Ssize_t */ @@ -256,7 +272,7 @@ (Py_TYPE(m2)->tp_as_number->nb_multiply == NULL))) { /* Try to convert m1 to an int and try sequence repeat */ repeat = PyArray_PyIntAsIntp(m1); - if (repeat == -1 && PyErr_Occurred()) { + if (error_converting(repeat)) { return NULL; } return PySequence_Repeat(m2, repeat); @@ -326,31 +342,17 @@ } static PyObject * -gentype_str(PyObject *self) +genint_type_str(PyObject *self) { - PyObject *arr, *ret = NULL; - - arr = PyArray_FromScalar(self, NULL); - if (arr != NULL) { - ret = PyObject_Str((PyObject *)arr); - Py_DECREF(arr); + PyObject *item, *item_str; + item = gentype_generic_method(self, NULL, NULL, "item"); + if (item == NULL) { + return NULL; } - return ret; -} - -static PyObject * -gentype_repr(PyObject *self) -{ - PyObject *arr, *ret = NULL; - - arr = PyArray_FromScalar(self, NULL); - if (arr != NULL) { - /* XXX: Why are we using str here? */ - ret = PyObject_Str((PyObject *)arr); - Py_DECREF(arr); - } - return ret; + item_str = PyObject_Str(item); + Py_DECREF(item); + return item_str; } /* @@ -429,146 +431,32 @@ #endif /**begin repeat - * #name = float, double, longdouble# - * #NAME = FLOAT, DOUBLE, LONGDOUBLE# - * #type = npy_float, npy_double, npy_longdouble# - * #suff = f, d, l# - */ - -#define _FMT1 "%%.%i" NPY_@NAME@_FMT -#define _FMT2 "%%+.%i" NPY_@NAME@_FMT - -NPY_NO_EXPORT void -format_@name@(char *buf, size_t buflen, @type@ val, unsigned int prec) -{ - /* XXX: Find a correct size here for format string */ - char format[64], *res; - size_t i, cnt; - - PyOS_snprintf(format, sizeof(format), _FMT1, prec); - res = NumPyOS_ascii_format@suff@(buf, buflen, format, val, 0); - if (res == NULL) { - fprintf(stderr, "Error while formatting\n"); - return; - } - - /* If nothing but digits after sign, append ".0" */ - cnt = strlen(buf); - for (i = (buf[0] == '-') ? 1 : 0; i < cnt; ++i) { - if (!isdigit(Py_CHARMASK(buf[i]))) { - break; - } - } - if (i == cnt && buflen >= cnt + 3) { - strcpy(&buf[cnt],".0"); - } -} - -#undef _FMT1 -#undef _FMT2 - -/**end repeat**/ - -/**begin repeat - * #name = cfloat, cdouble, clongdouble# - * #NAME = FLOAT, DOUBLE, LONGDOUBLE# - * #type = npy_cfloat, npy_cdouble, npy_clongdouble# - * #suff = f, d, l# + * #name = half, float, double, longdouble# + * #NAME = HALF, FLOAT, DOUBLE, LONGDOUBLE# + * #type = npy_half, npy_float, npy_double, npy_longdouble# + * #suff = h, f, d, l# */ -#define _FMT1 "%%.%i" NPY_@NAME@_FMT -#define _FMT2 "%%+.%i" NPY_@NAME@_FMT - -static void -format_@name@(char *buf, size_t buflen, @type@ val, unsigned int prec) -{ - /* XXX: Find a correct size here for format string */ - char format[64]; - char *res; - - /* - * Ideally, we should handle this nan/inf stuff in NumpyOS_ascii_format* - */ - if (val.real == 0.0 && npy_signbit(val.real) == 0) { - PyOS_snprintf(format, sizeof(format), _FMT1, prec); - res = NumPyOS_ascii_format@suff@(buf, buflen - 1, format, val.imag, 0); - if (res == NULL) { - /* FIXME - * We need a better way to handle the error message - */ - fprintf(stderr, "Error while formatting\n"); - return; - } - if (!npy_isfinite(val.imag)) { - strncat(buf, "*", 1); - } - strncat(buf, "j", 1); +NPY_NO_EXPORT PyObject * +format_@name@(@type@ val, npy_bool scientific, + int precision, int sign, TrimMode trim, + int pad_left, int pad_right, int exp_digits) +{ + if (scientific) { + return Dragon4_Scientific_AnySize(&val, sizeof(@type@), + DigitMode_Unique, precision, + sign, trim, pad_left, exp_digits); } else { - char re[64], im[64]; - if (npy_isfinite(val.real)) { - PyOS_snprintf(format, sizeof(format), _FMT1, prec); - res = NumPyOS_ascii_format@suff@(re, sizeof(re), format, - val.real, 0); - if (res == NULL) { - /* FIXME - * We need a better way to handle the error message - */ - fprintf(stderr, "Error while formatting\n"); - return; - } - } - else { - if (npy_isnan(val.real)) { - strcpy(re, "nan"); - } - else if (val.real > 0){ - strcpy(re, "inf"); - } - else { - strcpy(re, "-inf"); - } - } - - - if (npy_isfinite(val.imag)) { - PyOS_snprintf(format, sizeof(format), _FMT2, prec); - res = NumPyOS_ascii_format@suff@(im, sizeof(im), format, - val.imag, 0); - if (res == NULL) { - fprintf(stderr, "Error while formatting\n"); - return; - } - } - else { - if (npy_isnan(val.imag)) { - strcpy(im, "+nan"); - } - else if (val.imag > 0){ - strcpy(im, "+inf"); - } - else { - strcpy(im, "-inf"); - } - if (!npy_isfinite(val.imag)) { - strncat(im, "*", 1); - } - } - PyOS_snprintf(buf, buflen, "(%s%sj)", re, im); + return Dragon4_Positional_AnySize(&val, sizeof(@type@), + DigitMode_Unique, CutoffMode_TotalLength, precision, + sign, trim, pad_left, pad_right); } } -#undef _FMT1 -#undef _FMT2 /**end repeat**/ -NPY_NO_EXPORT void -format_half(char *buf, size_t buflen, npy_half val, unsigned int prec) -{ - format_float(buf, buflen, npy_half_to_float(val), prec); -} - /* * over-ride repr and str of array-scalar strings and unicode to * remove NULL bytes and then call the corresponding functions @@ -607,6 +495,92 @@ } /**end repeat**/ + +/* + * Convert array of bytes to a string representation much like bytes.__repr__, + * but convert all bytes (including ASCII) to the `\x00` notation with + * uppercase hex codes (FF not ff). + * + * Largely copied from _Py_strhex_impl in CPython implementation + */ +static NPY_INLINE PyObject * +_void_to_hex(const char* argbuf, const Py_ssize_t arglen, + const char *schars, const char *bprefix, const char *echars) +{ + PyObject *retval; + int extrachars, slen; + char *retbuf; + Py_ssize_t i, j; + char const *hexdigits = "0123456789ABCDEF"; + + extrachars = strlen(schars) + strlen(echars); + slen = extrachars + arglen*(2 + strlen(bprefix)); + + if (arglen > (PY_SSIZE_T_MAX / 2) - extrachars) { + return PyErr_NoMemory(); + } + + retbuf = (char *)PyMem_Malloc(slen); + if (!retbuf) { + return PyErr_NoMemory(); + } + + memcpy(retbuf, schars, strlen(schars)); + j = strlen(schars); + + for (i = 0; i < arglen; i++) { + unsigned char c; + memcpy(&retbuf[j], bprefix, strlen(bprefix)); + j += strlen(bprefix); + c = (argbuf[i] >> 4) & 0xf; + retbuf[j++] = hexdigits[c]; + c = argbuf[i] & 0xf; + retbuf[j++] = hexdigits[c]; + } + memcpy(&retbuf[j], echars, strlen(echars)); + + retval = PyUString_FromStringAndSize(retbuf, slen); + PyMem_Free(retbuf); + + return retval; +} + +static PyObject * +voidtype_repr(PyObject *self) +{ + PyVoidScalarObject *s = (PyVoidScalarObject*) self; + if (PyDataType_HASFIELDS(s->descr)) { + static PyObject *reprfunc = NULL; + + npy_cache_import("numpy.core.arrayprint", + "_void_scalar_repr", &reprfunc); + if (reprfunc == NULL) { + return NULL; + } + + return PyObject_CallFunction(reprfunc, "O", self); + } + return _void_to_hex(s->obval, s->descr->elsize, "void(b'", "\\x", "')"); +} + +static PyObject * +voidtype_str(PyObject *self) +{ + PyVoidScalarObject *s = (PyVoidScalarObject*) self; + if (PyDataType_HASFIELDS(s->descr)) { + static PyObject *reprfunc = NULL; + + npy_cache_import("numpy.core.arrayprint", + "_void_scalar_repr", &reprfunc); + if (reprfunc == NULL) { + return NULL; + } + + return PyObject_CallFunction(reprfunc, "O", self); + } + return _void_to_hex(s->obval, s->descr->elsize, "b'", "\\x", "'"); +} + static PyObject * datetimetype_repr(PyObject *self) { @@ -800,7 +774,23 @@ return ret; } -/* The REPR values are finfo.precision + 2 */ +/* + * float type str and repr + * + * These functions will return NULL if PyString creation fails. + */ + + +/* + * *** BEGIN LEGACY PRINTING MODE CODE *** + * + * This code is legacy code needed to reproduce the printing behavior of + * scalars in numpy 1.13. One day we hope to remove it. + */ + +/* determines if legacy mode is enabled, global set in multiarraymodule.c */ +extern int npy_legacy_print_mode; + #define HALFPREC_REPR 5 #define HALFPREC_STR 5 #define FLOATPREC_REPR 8 @@ -815,144 +805,300 @@ #define LONGDOUBLEPREC_STR 12 #endif -/* - * float type str and repr - * - * These functions will return NULL if PyString creation fails. - */ - /**begin repeat - * #name = half, float, double, longdouble# - * #Name = Half, Float, Double, LongDouble# - * #NAME = HALF, FLOAT, DOUBLE, LONGDOUBLE# - * #hascomplex = 0, 1, 1, 1# - */ -/**begin repeat1 * #kind = str, repr# * #KIND = STR, REPR# */ -#define PREC @NAME@PREC_@KIND@ +/**begin repeat1 + * #name = cfloat, cdouble, clongdouble# + * #NAME = FLOAT, DOUBLE, LONGDOUBLE# + * #type = npy_cfloat, npy_cdouble, npy_clongdouble# + * #suff = f, d, l# + */ -static PyObject * -@name@type_@kind@(PyObject *self) +#define _FMT1 "%%.%i" NPY_@NAME@_FMT +#define _FMT2 "%%+.%i" NPY_@NAME@_FMT + +static PyObject* +legacy_@name@_format@kind@(@type@ val) { - char buf[100]; - npy_@name@ val = ((Py@Name@ScalarObject *)self)->obval; + /* XXX: Find a correct size here for format string */ + char format[64], buf[100], *res; + + /* + * Ideally, we should handle this nan/inf stuff in NumpyOS_ascii_format* + */ + if (val.real == 0.0 && npy_signbit(val.real) == 0) { + PyOS_snprintf(format, sizeof(format), _FMT1, @NAME@PREC_@KIND@); + res = NumPyOS_ascii_format@suff@(buf, sizeof(buf) - 1, format, val.imag, 0); + if (res == NULL) { + PyErr_SetString(PyExc_RuntimeError, "Error while formatting"); + return NULL; + } + if (!npy_isfinite(val.imag)) { + strncat(buf, "*", 1); + } + strncat(buf, "j", 1); + } + else { + char re[64], im[64]; + if (npy_isfinite(val.real)) { + PyOS_snprintf(format, sizeof(format), _FMT1, @NAME@PREC_@KIND@); + res = NumPyOS_ascii_format@suff@(re, sizeof(re), format, + val.real, 0); + if (res == NULL) { + PyErr_SetString(PyExc_RuntimeError, "Error while formatting"); + return NULL; + } + } + else { + if (npy_isnan(val.real)) { + strcpy(re, "nan"); + } + else if (val.real > 0){ + strcpy(re, "inf"); + } + else { + strcpy(re, "-inf"); + } + } + + + if (npy_isfinite(val.imag)) { + PyOS_snprintf(format, sizeof(format), _FMT2, @NAME@PREC_@KIND@); + res = NumPyOS_ascii_format@suff@(im, sizeof(im), format, + val.imag, 0); + if (res == NULL) { + PyErr_SetString(PyExc_RuntimeError, "Error while formatting"); + return NULL; + } + } + else { + if (npy_isnan(val.imag)) { + strcpy(im, "+nan"); + } + else if (val.imag > 0){ + strcpy(im, "+inf"); + } + else { + strcpy(im, "-inf"); + } + if (!npy_isfinite(val.imag)) { + strncat(im, "*", 1); + } + } + PyOS_snprintf(buf, sizeof(buf), "(%s%sj)", re, im); + } - format_@name@(buf, sizeof(buf), val, PREC); return PyUString_FromString(buf); } -#if @hascomplex@ +#undef _FMT1 +#undef _FMT2 + +/**end repeat1**/ + +/**begin repeat1 + * #name = float, double, longdouble# + * #Name = Float, Double, LongDouble# + * #NAME = FLOAT, DOUBLE, LONGDOUBLE# + * #suff = f, d, l# + */ + +#define _FMT1 "%%.%i" NPY_@NAME@_FMT + static PyObject * -c@name@type_@kind@(PyObject *self) -{ - char buf[202]; - npy_c@name@ val = ((PyC@Name@ScalarObject *)self)->obval; +legacy_@name@_format@kind@(npy_@name@ val){ + /* XXX: Find a correct size here for format string */ + char format[64], buf[100], *res; + size_t i, cnt; + + PyOS_snprintf(format, sizeof(format), _FMT1, @NAME@PREC_@KIND@); + res = NumPyOS_ascii_format@suff@(buf, sizeof(buf), format, val, 0); + if (res == NULL) { + PyErr_SetString(PyExc_RuntimeError, "Error while formatting"); + return NULL; + } + + /* If nothing but digits after sign, append ".0" */ + cnt = strlen(buf); + for (i = (buf[0] == '-') ? 1 : 0; i < cnt; ++i) { + if (!isdigit(Py_CHARMASK(buf[i]))) { + break; + } + } + if (i == cnt && sizeof(buf) >= cnt + 3) { + strcpy(&buf[cnt],".0"); + } - format_c@name@(buf, sizeof(buf), val, PREC); return PyUString_FromString(buf); } -#endif -#undef PREC +#undef _FMT1 /**end repeat1**/ + /**end repeat**/ + /* - * float type print (control print a, where a is a float type instance) + * *** END LEGACY PRINTING MODE CODE *** */ + + /**begin repeat - * #name = half, float, double, longdouble# - * #Name = Half, Float, Double, LongDouble# - * #NAME = HALF, FLOAT, DOUBLE, LONGDOUBLE# - * #hascomplex = 0, 1, 1, 1# + * #kind = str, repr# */ -static int -@name@type_print(PyObject *v, FILE *fp, int flags) +/**begin repeat1 + * #name = float, double, longdouble# + * #Name = Float, Double, LongDouble# + * #NAME = FLOAT, DOUBLE, LONGDOUBLE# + */ + +/* helper function choose scientific of fractional output, based on a cutoff */ +static PyObject * +@name@type_@kind@_either(npy_@name@ val, TrimMode trim_pos, TrimMode trim_sci, + npy_bool sign) { - char buf[100]; - npy_@name@ val = ((Py@Name@ScalarObject *)v)->obval; + npy_@name@ absval; - format_@name@(buf, sizeof(buf), val, - (flags & Py_PRINT_RAW) ? @NAME@PREC_STR : @NAME@PREC_REPR); - Py_BEGIN_ALLOW_THREADS - fputs(buf, fp); - Py_END_ALLOW_THREADS - return 0; + if (npy_legacy_print_mode == 113) { + return legacy_@name@_format@kind@(val); + } + + absval = val < 0 ? -val : val; + + if (absval == 0 || (absval < 1.e16L && absval >= 1.e-4L) ) { + return format_@name@(val, 0, -1, sign, trim_pos, -1, -1, -1); + } + return format_@name@(val, 1, -1, sign, trim_sci, -1, -1, -1); } -#if @hascomplex@ -static int -c@name@type_print(PyObject *v, FILE *fp, int flags) +static PyObject * +@name@type_@kind@(PyObject *self) { - /* Size of buf: twice sizeof(real) + 2 (for the parenthesis) */ - char buf[202]; - npy_c@name@ val = ((PyC@Name@ScalarObject *)v)->obval; - - format_c@name@(buf, sizeof(buf), val, - (flags & Py_PRINT_RAW) ? @NAME@PREC_STR : @NAME@PREC_REPR); - Py_BEGIN_ALLOW_THREADS - fputs(buf, fp); - Py_END_ALLOW_THREADS - return 0; + return @name@type_@kind@_either(((Py@Name@ScalarObject *)self)->obval, + TrimMode_LeaveOneZero, TrimMode_DptZeros, 0); } -#endif -/**end repeat**/ +static PyObject * +c@name@type_@kind@(PyObject *self) +{ + PyObject *rstr, *istr, *ret; + npy_c@name@ val = ((PyC@Name@ScalarObject *)self)->obval; + TrimMode trim = TrimMode_DptZeros; + if (npy_legacy_print_mode == 113) { + return legacy_c@name@_format@kind@(val); + } + + if (val.real == 0.0 && npy_signbit(val.real) == 0) { + istr = @name@type_@kind@_either(val.imag, trim, trim, 0); + if (istr == NULL) { + return NULL; + } + + PyUString_ConcatAndDel(&istr, PyUString_FromString("j")); + return istr; + } + + if (npy_isfinite(val.real)) { + rstr = @name@type_@kind@_either(val.real, trim, trim, 0); + if (rstr == NULL) { + return NULL; + } + } + else if (npy_isnan(val.real)) { + rstr = PyUString_FromString("nan"); + } + else if (val.real > 0){ + rstr = PyUString_FromString("inf"); + } + else { + rstr = PyUString_FromString("-inf"); + } + + if (npy_isfinite(val.imag)) { + istr = @name@type_@kind@_either(val.imag, trim, trim, 1); + if (istr == NULL) { + return NULL; + } + } + else if (npy_isnan(val.imag)) { + istr = PyUString_FromString("+nan"); + } + else if (val.imag > 0){ + istr = PyUString_FromString("+inf"); + } + else { + istr = PyUString_FromString("-inf"); + } + + ret = PyUString_FromString("("); + PyUString_ConcatAndDel(&ret, rstr); + PyUString_ConcatAndDel(&ret, istr); + PyUString_ConcatAndDel(&ret, PyUString_FromString("j)")); + return ret; +} + +#undef PREC + +/**end repeat1**/ -/* - * Could improve this with a PyLong_FromLongDouble(longdouble ldval) - * but this would need some more work... - */ -/**begin repeat - * - * #name = (int, float)*2# - * #KIND = (Long, Float)*2# - * #char = ,,c*2# - * #CHAR = ,,C*2# - * #POST = ,,.real*2# - */ static PyObject * -@char@longdoubletype_@name@(PyObject *self) +halftype_@kind@(PyObject *self) { - double dval; - PyObject *obj, *ret; + npy_half val = ((PyHalfScalarObject *)self)->obval; + float floatval = npy_half_to_float(val); + float absval; - dval = (double)(((Py@CHAR@LongDoubleScalarObject *)self)->obval)@POST@; - obj = Py@KIND@_FromDouble(dval); - if (obj == NULL) { - return NULL; + if (npy_legacy_print_mode == 113) { + return legacy_float_format@kind@(floatval); } - ret = Py_TYPE(obj)->tp_as_number->nb_@name@(obj); - Py_DECREF(obj); - return ret; + + absval = floatval < 0 ? -floatval : floatval; + + if (absval == 0 || (absval < 1.e16 && absval >= 1.e-4) ) { + return format_half(val, 0, -1, 0, TrimMode_LeaveOneZero, -1, -1, -1); + } + return format_half(val, 1, -1, 0, TrimMode_DptZeros, -1, -1, -1); } + + /**end repeat**/ +/**begin repeat + * #char = ,c# + * #CHAR = ,C# + * #POST = ,.real# + */ +static PyObject * +@char@longdoubletype_float(PyObject *self) +{ + npy_longdouble val = PyArrayScalar_VAL(self, @CHAR@LongDouble)@POST@; + return PyFloat_FromDouble((double) val); +} + +static PyObject * +@char@longdoubletype_long(PyObject *self) +{ + npy_longdouble val = PyArrayScalar_VAL(self, @CHAR@LongDouble)@POST@; + return npy_longdouble_to_PyLong(val); +} + #if !defined(NPY_PY3K) -/**begin repeat - * - * #name = (long, hex, oct)*2# - * #KIND = (Long*3)*2# - * #char = ,,,c*3# - * #CHAR = ,,,C*3# - * #POST = ,,,.real*3# +/**begin repeat1 + * #name = int, hex, oct# */ static PyObject * @char@longdoubletype_@name@(PyObject *self) { - double dval; - PyObject *obj, *ret; - - dval = (double)(((Py@CHAR@LongDoubleScalarObject *)self)->obval)@POST@; - obj = Py@KIND@_FromDouble(dval); + PyObject *ret; + PyObject *obj = @char@longdoubletype_long(self); if (obj == NULL) { return NULL; } @@ -960,10 +1106,12 @@ Py_DECREF(obj); return ret; } -/**end repeat**/ +/**end repeat1**/ #endif /* !defined(NPY_PY3K) */ +/**end repeat**/ + static PyNumberMethods gentype_as_number = { (binaryfunc)gentype_add, /*nb_add*/ (binaryfunc)gentype_subtract, /*nb_subtract*/ @@ -1195,7 +1343,8 @@ inter->two = 2; inter->nd = 0; inter->flags = PyArray_FLAGS(arr); - inter->flags &= ~(NPY_ARRAY_UPDATEIFCOPY | NPY_ARRAY_OWNDATA); + inter->flags &= ~(NPY_ARRAY_UPDATEIFCOPY | NPY_ARRAY_WRITEBACKIFCOPY | + NPY_ARRAY_OWNDATA); inter->flags |= NPY_ARRAY_NOTSWAPPED; inter->typekind = PyArray_DESCR(arr)->kind; inter->itemsize = PyArray_DESCR(arr)->elsize; @@ -1343,10 +1492,9 @@ int elsize; typecode = PyArray_DescrFromScalar(self); elsize = typecode->elsize; - temp = PyDataMem_NEW(elsize); - memset(temp, '\0', elsize); + temp = npy_alloc_cache_zero(elsize); ret = PyArray_Scalar(temp, typecode, NULL); - PyDataMem_FREE(temp); + npy_free_cache(temp, elsize); } Py_XDECREF(typecode); @@ -1516,9 +1664,9 @@ */ /**begin repeat * - * #name = tolist, item, tostring, tobytes, astype, copy, __deepcopy__, - * searchsorted, view, swapaxes, conj, conjugate, nonzero, flatten, - * ravel, fill, transpose, newbyteorder# + * #name = tolist, item, __deepcopy__, __copy__, + * swapaxes, conj, conjugate, nonzero, + * fill, transpose, newbyteorder# */ static PyObject * gentype_@name@(PyObject *self, PyObject *args) @@ -1548,11 +1696,13 @@ gentype_getreadbuf(PyObject *, Py_ssize_t, void **); static PyObject * -gentype_byteswap(PyObject *self, PyObject *args) +gentype_byteswap(PyObject *self, PyObject *args, PyObject *kwds) { npy_bool inplace = NPY_FALSE; + static char *kwlist[] = {"inplace", NULL}; - if (!PyArg_ParseTuple(args, "|O&:byteswap", PyArray_BoolConverter, &inplace)) { + if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&:byteswap", kwlist, + PyArray_BoolConverter, &inplace)) { return NULL; } if (inplace) { @@ -1593,8 +1743,9 @@ * * #name = take, getfield, put, repeat, tofile, mean, trace, diagonal, clip, * std, var, sum, cumsum, prod, cumprod, compress, sort, argsort, - * round, argmax, argmin, max, min, ptp, any, all, resize, reshape, - * choose# + * round, argmax, argmin, max, min, ptp, any, all, astype, resize, + * reshape, choose, tostring, tobytes, copy, searchsorted, view, + * flatten, ravel# */ static PyObject * gentype_@name@(PyObject *self, PyObject *args, PyObject *kwds) @@ -1628,7 +1779,7 @@ * However, as a special case, void-scalar assignment broadcasts * differently from ndarrays when assigning to an object field: Assignment * to an ndarray object field broadcasts, but assignment to a void-scalar - * object-field should not, in order to allow nested ndarrays. + * object-field should not, in order to allow nested ndarrays. * These lines should then behave identically: * * b = np.zeros(1, dtype=[('x', 'O')]) @@ -1858,19 +2009,19 @@ METH_VARARGS, NULL}, {"tobytes", (PyCFunction)gentype_tobytes, - METH_VARARGS, NULL}, + METH_VARARGS | METH_KEYWORDS, NULL}, {"tofile", (PyCFunction)gentype_tofile, METH_VARARGS | METH_KEYWORDS, NULL}, {"tostring", (PyCFunction)gentype_tostring, - METH_VARARGS, NULL}, + METH_VARARGS | METH_KEYWORDS, NULL}, {"byteswap", (PyCFunction)gentype_byteswap, - METH_VARARGS, NULL}, + METH_VARARGS | METH_KEYWORDS, NULL}, {"astype", (PyCFunction)gentype_astype, - METH_VARARGS, NULL}, + METH_VARARGS | METH_KEYWORDS, NULL}, {"getfield", (PyCFunction)gentype_getfield, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -1879,7 +2030,7 @@ METH_VARARGS | METH_KEYWORDS, NULL}, {"copy", (PyCFunction)gentype_copy, - METH_VARARGS, NULL}, + METH_VARARGS | METH_KEYWORDS, NULL}, {"resize", (PyCFunction)gentype_resize, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -1897,7 +2048,7 @@ /* for the copy module */ {"__copy__", - (PyCFunction)gentype_copy, + (PyCFunction)gentype___copy__, METH_VARARGS, NULL}, {"__deepcopy__", (PyCFunction)gentype___deepcopy__, @@ -1945,7 +2096,7 @@ METH_VARARGS | METH_KEYWORDS, NULL}, {"searchsorted", (PyCFunction)gentype_searchsorted, - METH_VARARGS, NULL}, + METH_VARARGS | METH_KEYWORDS, NULL}, {"argmax", (PyCFunction)gentype_argmax, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -1960,7 +2111,7 @@ METH_VARARGS, NULL}, {"view", (PyCFunction)gentype_view, - METH_VARARGS, NULL}, + METH_VARARGS | METH_KEYWORDS, NULL}, {"swapaxes", (PyCFunction)gentype_swapaxes, METH_VARARGS, NULL}, @@ -2023,10 +2174,10 @@ METH_VARARGS | METH_KEYWORDS, NULL}, {"flatten", (PyCFunction)gentype_flatten, - METH_VARARGS, NULL}, + METH_VARARGS | METH_KEYWORDS, NULL}, {"ravel", (PyCFunction)gentype_ravel, - METH_VARARGS, NULL}, + METH_VARARGS | METH_KEYWORDS, NULL}, {"round", (PyCFunction)gentype_round, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -2151,35 +2302,31 @@ voidtype_subscript(PyVoidScalarObject *self, PyObject *ind) { npy_intp n; - PyObject *ret, *args; + PyObject *ret, *res; - if (!(PyDataType_HASFIELDS(self->descr))) { - PyErr_SetString(PyExc_IndexError, - "can't index void scalar without fields"); - return NULL; + /* structured voids will accept an integer index */ + if (PyDataType_HASFIELDS(self->descr)) { + n = PyArray_PyIntAsIntp(ind); + if (!error_converting(n)) { + return voidtype_item(self, (Py_ssize_t)n); + } + PyErr_Clear(); } -#if defined(NPY_PY3K) - if (PyUString_Check(ind)) { -#else - if (PyBytes_Check(ind) || PyUnicode_Check(ind)) { -#endif - args = Py_BuildValue("(O)", ind); - ret = gentype_generic_method((PyObject *)self, args, NULL, "__getitem__"); - Py_DECREF(args); - return ret; - } + res = PyArray_FromScalar((PyObject*)self, NULL); - /* try to convert it to a number */ - n = PyArray_PyIntAsIntp(ind); - if (error_converting(n)) { - goto fail; + /* ellipsis should return 0d array */ + if(ind == Py_Ellipsis){ + return res; } - return voidtype_item(self, (Py_ssize_t)n); -fail: - PyErr_SetString(PyExc_IndexError, "invalid index"); - return NULL; + /* + * other cases (field names, empty tuple) will return either + * scalar or non-0d array. Compute this using ndarray subscript. + */ + ret = array_subscript((PyArrayObject *)res, ind); + Py_DECREF(res); + return PyArray_Return((PyArrayObject*)ret); } static int @@ -2229,11 +2376,7 @@ return -1; } -#if defined(NPY_PY3K) - if (PyUString_Check(ind)) { -#else - if (PyBytes_Check(ind) || PyUnicode_Check(ind)) { -#endif + if (PyBaseString_Check(ind)) { /* * Much like in voidtype_setfield, we cannot simply use ndarray's * __setitem__ since assignment to void scalars should not broadcast @@ -2473,7 +2616,7 @@ void_dealloc(PyVoidScalarObject *v) { if (v->flags & NPY_ARRAY_OWNDATA) { - PyDataMem_FREE(v->obval); + npy_free_cache(v->obval, Py_SIZE(v)); } Py_XDECREF(v->descr); Py_XDECREF(v->base); @@ -2819,7 +2962,14 @@ static PyObject * bool_index(PyObject *a) { - return PyInt_FromLong(PyArrayScalar_VAL(a, Bool)); + if (DEPRECATE( + "In future, it will be an error for 'np.bool_' scalars to be " + "interpreted as an index") < 0) { + return NULL; + } + else { + return PyInt_FromLong(PyArrayScalar_VAL(a, Bool)); + } } /* Arithmetic methods -- only so we can override &, |, ^. */ @@ -2889,9 +3039,7 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds)) { PyObject *obj, *arr; - npy_ulonglong memu = 1; PyObject *new = NULL; - char *destptr; if (!PyArg_ParseTuple(args, "O:void", &obj)) { return NULL; @@ -2913,7 +3061,8 @@ } if (new && PyLong_Check(new)) { PyObject *ret; - memu = PyLong_AsUnsignedLongLong(new); + char *destptr; + npy_ulonglong memu = PyLong_AsUnsignedLongLong(new); Py_DECREF(new); if (PyErr_Occurred() || (memu > NPY_MAX_INT)) { PyErr_Clear(); @@ -2922,13 +3071,13 @@ (int) NPY_MAX_INT); return NULL; } - destptr = PyDataMem_NEW((int) memu); + destptr = npy_alloc_cache_zero(memu); if (destptr == NULL) { return PyErr_NoMemory(); } ret = type->tp_alloc(type, 0); if (ret == NULL) { - PyDataMem_FREE(destptr); + npy_free_cache(destptr, memu); return PyErr_NoMemory(); } ((PyVoidScalarObject *)ret)->obval = destptr; @@ -2939,7 +3088,6 @@ ((PyVoidScalarObject *)ret)->flags = NPY_ARRAY_BEHAVED | NPY_ARRAY_OWNDATA; ((PyVoidScalarObject *)ret)->base = NULL; - memset(destptr, '\0', (size_t) memu); return ret; } @@ -4031,6 +4179,37 @@ } } +#ifndef NPY_PY3K +/* + * In python2, the `float` and `complex` types still implement the obsolete + * "tp_print" method, which uses CPython's float-printing routines to print the + * float. Numpy's float_/cfloat inherit from Python float/complex, but + * override its tp_repr and tp_str methods. In order to avoid an inconsistency + * with the inherited tp_print, we need to override it too. + * + * In python3 the tp_print method is reserved/unused. + */ +static int +float_print(PyObject *o, FILE *fp, int flags) +{ + int ret; + PyObject *to_print; + if (flags & Py_PRINT_RAW) { + to_print = PyObject_Str(o); + } + else { + to_print = PyObject_Repr(o); + } + + if (to_print == NULL) { + return -1; + } + + ret = PyObject_Print(to_print, fp, Py_PRINT_RAW); + Py_DECREF(to_print); + return ret; +} +#endif static PyNumberMethods longdoubletype_as_number; static PyNumberMethods clongdoubletype_as_number; @@ -4051,8 +4230,6 @@ PyGenericArrType_Type.tp_new = NULL; PyGenericArrType_Type.tp_alloc = gentype_alloc; PyGenericArrType_Type.tp_free = (freefunc)gentype_free; - PyGenericArrType_Type.tp_repr = gentype_repr; - PyGenericArrType_Type.tp_str = gentype_str; PyGenericArrType_Type.tp_richcompare = gentype_richcompare; PyBoolArrType_Type.tp_as_number = &bool_arrtype_as_number; @@ -4085,6 +4262,12 @@ /**end repeat**/ +#ifndef NPY_PY3K + PyDoubleArrType_Type.tp_print = &float_print; + PyCDoubleArrType_Type.tp_print = &float_print; +#endif + + PyBoolArrType_Type.tp_as_number->nb_index = (unaryfunc)bool_index; PyStringArrType_Type.tp_alloc = NULL; @@ -4100,6 +4283,8 @@ PyVoidArrType_Type.tp_getset = voidtype_getsets; PyVoidArrType_Type.tp_as_mapping = &voidtype_as_mapping; PyVoidArrType_Type.tp_as_sequence = &voidtype_as_sequence; + PyVoidArrType_Type.tp_repr = voidtype_repr; + PyVoidArrType_Type.tp_str = voidtype_str; PyIntegerArrType_Type.tp_getset = inttype_getsets; @@ -4183,45 +4368,44 @@ /**end repeat**/ - PyHalfArrType_Type.tp_print = halftype_print; - PyFloatArrType_Type.tp_print = floattype_print; - PyDoubleArrType_Type.tp_print = doubletype_print; - PyLongDoubleArrType_Type.tp_print = longdoubletype_print; - - PyCFloatArrType_Type.tp_print = cfloattype_print; - PyCDoubleArrType_Type.tp_print = cdoubletype_print; - PyCLongDoubleArrType_Type.tp_print = clongdoubletype_print; - - /* - * These need to be coded specially because getitem does not - * return a normal Python type - */ - PyLongDoubleArrType_Type.tp_as_number = &longdoubletype_as_number; - PyCLongDoubleArrType_Type.tp_as_number = &clongdoubletype_as_number; /**begin repeat - * #name = int, float, repr, str# - * #kind = tp_as_number->nb*2, tp*2# + * #Type = Bool, Byte, UByte, Short, UShort, Int, UInt, Long, + * ULong, LongLong, ULongLong# */ - PyLongDoubleArrType_Type.@kind@_@name@ = longdoubletype_@name@; - PyCLongDoubleArrType_Type.@kind@_@name@ = clongdoubletype_@name@; + /* both str/repr use genint_type_str to avoid trailing "L" of longs */ + Py@Type@ArrType_Type.tp_str = genint_type_str; + Py@Type@ArrType_Type.tp_repr = genint_type_str; /**end repeat**/ -#if !defined(NPY_PY3K) + /**begin repeat - * #name = long, hex, oct# - * #kind = tp_as_number->nb*3# + * #char = ,c# + * #CHAR = ,C# */ - PyLongDoubleArrType_Type.@kind@_@name@ = longdoubletype_@name@; - PyCLongDoubleArrType_Type.@kind@_@name@ = clongdoubletype_@name@; + /* + * These need to be coded specially because longdouble/clongdouble getitem + * does not return a normal Python type + */ + @char@longdoubletype_as_number.nb_float = @char@longdoubletype_float; +#if defined(NPY_PY3K) + @char@longdoubletype_as_number.nb_int = @char@longdoubletype_long; +#else + @char@longdoubletype_as_number.nb_int = @char@longdoubletype_int; + @char@longdoubletype_as_number.nb_long = @char@longdoubletype_long; + @char@longdoubletype_as_number.nb_hex = @char@longdoubletype_hex; + @char@longdoubletype_as_number.nb_oct = @char@longdoubletype_oct; +#endif - /**end repeat**/ + Py@CHAR@LongDoubleArrType_Type.tp_as_number = &@char@longdoubletype_as_number; + Py@CHAR@LongDoubleArrType_Type.tp_repr = @char@longdoubletype_repr; + Py@CHAR@LongDoubleArrType_Type.tp_str = @char@longdoubletype_str; -#endif + /**end repeat**/ PyStringArrType_Type.tp_itemsize = sizeof(char); PyVoidArrType_Type.tp_dealloc = (destructor) void_dealloc; diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/scalartypes.h python-numpy-1.14.5/numpy/core/src/multiarray/scalartypes.h --- python-numpy-1.13.3/numpy/core/src/multiarray/scalartypes.h 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/scalartypes.h 2018-06-12 17:31:56.000000000 +0000 @@ -19,9 +19,6 @@ NPY_NO_EXPORT void initialize_numeric_types(void); -NPY_NO_EXPORT void -format_longdouble(char *buf, size_t buflen, npy_longdouble val, unsigned int prec); - #if PY_VERSION_HEX >= 0x03000000 NPY_NO_EXPORT void gentype_struct_free(PyObject *ptr); diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/shape.c python-numpy-1.14.5/numpy/core/src/multiarray/shape.c --- python-numpy-1.13.3/numpy/core/src/multiarray/shape.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/shape.c 2018-06-12 18:28:52.000000000 +0000 @@ -19,6 +19,7 @@ #include "templ_common.h" /* for npy_mul_with_overflow_intp */ #include "common.h" /* for convert_shape_to_string */ +#include "alloc.h" static int _fix_unknown_dimension(PyArray_Dims *newshape, PyArrayObject *arr); @@ -40,15 +41,14 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, NPY_ORDER order) { + npy_intp oldnbytes, newnbytes; npy_intp oldsize, newsize; int new_nd=newshape->len, k, n, elsize; int refcnt; npy_intp* new_dimensions=newshape->ptr; npy_intp new_strides[NPY_MAXDIMS]; - size_t sd; npy_intp *dimptr; char *new_data; - npy_intp largest; if (!PyArray_ISONESEGMENT(self)) { PyErr_SetString(PyExc_ValueError, @@ -56,15 +56,12 @@ return NULL; } - if (PyArray_DESCR(self)->elsize == 0) { - PyErr_SetString(PyExc_ValueError, - "Bad data-type size."); - return NULL; - } + /* Compute total size of old and new arrays. The new size might overflow */ + oldsize = PyArray_SIZE(self); newsize = 1; - largest = NPY_MAX_INTP / PyArray_DESCR(self)->elsize; for(k = 0; k < new_nd; k++) { if (new_dimensions[k] == 0) { + newsize = 0; break; } if (new_dimensions[k] < 0) { @@ -72,14 +69,19 @@ "negative dimensions not allowed"); return NULL; } - newsize *= new_dimensions[k]; - if (newsize <= 0 || newsize > largest) { + if (npy_mul_with_overflow_intp(&newsize, newsize, new_dimensions[k])) { return PyErr_NoMemory(); } } - oldsize = PyArray_SIZE(self); - if (oldsize != newsize) { + /* Convert to number of bytes. The new count might overflow */ + elsize = PyArray_DESCR(self)->elsize; + oldnbytes = oldsize * elsize; + if (npy_mul_with_overflow_intp(&newnbytes, newsize, elsize)) { + return PyErr_NoMemory(); + } + + if (oldnbytes != newnbytes) { if (!(PyArray_FLAGS(self) & NPY_ARRAY_OWNDATA)) { PyErr_SetString(PyExc_ValueError, "cannot resize this array: it does not own its data"); @@ -91,7 +93,6 @@ PyErr_SetString(PyExc_ValueError, "cannot resize an array with refcheck=True on PyPy.\n" "Use the resize function or refcheck=False"); - return NULL; #else refcnt = PyArray_REFCOUNT(self); @@ -110,14 +111,9 @@ return NULL; } - if (newsize == 0) { - sd = PyArray_DESCR(self)->elsize; - } - else { - sd = newsize*PyArray_DESCR(self)->elsize; - } - /* Reallocate space if needed */ - new_data = PyDataMem_RENEW(PyArray_DATA(self), sd); + /* Reallocate space if needed - allocating 0 is forbidden */ + new_data = PyDataMem_RENEW( + PyArray_DATA(self), newnbytes == 0 ? elsize : newnbytes); if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate memory for array"); @@ -126,13 +122,12 @@ ((PyArrayObject_fields *)self)->data = new_data; } - if ((newsize > oldsize) && PyArray_ISWRITEABLE(self)) { + if (newnbytes > oldnbytes && PyArray_ISWRITEABLE(self)) { /* Fill new memory with zeros */ - elsize = PyArray_DESCR(self)->elsize; if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_ITEM_REFCOUNT)) { PyObject *zero = PyInt_FromLong(0); char *optr; - optr = PyArray_BYTES(self) + oldsize*elsize; + optr = PyArray_BYTES(self) + oldnbytes; n = newsize - oldsize; for (k = 0; k < n; k++) { _putzero((char *)optr, zero, PyArray_DESCR(self)); @@ -141,7 +136,7 @@ Py_DECREF(zero); } else{ - memset(PyArray_BYTES(self)+oldsize*elsize, 0, (newsize-oldsize)*elsize); + memset(PyArray_BYTES(self) + oldnbytes, 0, newnbytes - oldnbytes); } } @@ -316,7 +311,7 @@ return NULL; } ret = PyArray_Newshape(self, &newdims, NPY_CORDER); - PyDimMem_FREE(newdims.ptr); + npy_free_cache_dim_obj(newdims); return ret; } diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/strfuncs.c python-numpy-1.14.5/numpy/core/src/multiarray/strfuncs.c --- python-numpy-1.13.3/numpy/core/src/multiarray/strfuncs.c 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/strfuncs.c 2018-06-12 18:28:52.000000000 +0000 @@ -0,0 +1,259 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include +#include + +#include "npy_pycompat.h" + +#include "strfuncs.h" + +static PyObject *PyArray_StrFunction = NULL; +static PyObject *PyArray_ReprFunction = NULL; + +/*NUMPY_API + * Set the array print function to be a Python function. + */ +NPY_NO_EXPORT void +PyArray_SetStringFunction(PyObject *op, int repr) +{ + if (repr) { + /* Dispose of previous callback */ + Py_XDECREF(PyArray_ReprFunction); + /* Add a reference to new callback */ + Py_XINCREF(op); + /* Remember new callback */ + PyArray_ReprFunction = op; + } + else { + /* Dispose of previous callback */ + Py_XDECREF(PyArray_StrFunction); + /* Add a reference to new callback */ + Py_XINCREF(op); + /* Remember new callback */ + PyArray_StrFunction = op; + } +} + + +/* + * Extend string. On failure, returns NULL and leaves *strp alone. + * XXX we do this in multiple places; time for a string library? + */ +static char * +extend(char **strp, Py_ssize_t n, Py_ssize_t *maxp) +{ + char *str = *strp; + Py_ssize_t new_cap; + + if (n >= *maxp - 16) { + new_cap = *maxp * 2; + + if (new_cap <= *maxp) { /* overflow */ + return NULL; + } + str = PyArray_realloc(*strp, new_cap); + if (str != NULL) { + *strp = str; + *maxp = new_cap; + } + } + return str; +} + + +static int +dump_data(char **string, Py_ssize_t *n, Py_ssize_t *max_n, char *data, int nd, + npy_intp *dimensions, npy_intp *strides, PyArrayObject* self) +{ + PyObject *op = NULL, *sp = NULL; + char *ostring; + npy_intp i, N, ret = 0; + +#define CHECK_MEMORY do { \ + if (extend(string, *n, max_n) == NULL) { \ + ret = -1; \ + goto end; \ + } \ + } while (0) + + if (nd == 0) { + if ((op = PyArray_GETITEM(self, data)) == NULL) { + return -1; + } + sp = PyObject_Repr(op); + if (sp == NULL) { + ret = -1; + goto end; + } + ostring = PyString_AsString(sp); + N = PyString_Size(sp)*sizeof(char); + *n += N; + CHECK_MEMORY; + memmove(*string + (*n - N), ostring, N); + } + else { + CHECK_MEMORY; + (*string)[*n] = '['; + *n += 1; + for (i = 0; i < dimensions[0]; i++) { + if (dump_data(string, n, max_n, + data + (*strides)*i, + nd - 1, dimensions + 1, + strides + 1, self) < 0) { + return -1; + } + CHECK_MEMORY; + if (i < dimensions[0] - 1) { + (*string)[*n] = ','; + (*string)[*n+1] = ' '; + *n += 2; + } + } + CHECK_MEMORY; + (*string)[*n] = ']'; + *n += 1; + } + +#undef CHECK_MEMORY + +end: + Py_XDECREF(op); + Py_XDECREF(sp); + return ret; +} + + +static PyObject * +array_repr_builtin(PyArrayObject *self, int repr) +{ + PyObject *ret; + char *string; + /* max_n initial value is arbitrary, dump_data will extend it */ + Py_ssize_t n = 0, max_n = PyArray_NBYTES(self) * 4 + 7; + + if ((string = PyArray_malloc(max_n)) == NULL) { + return PyErr_NoMemory(); + } + + if (dump_data(&string, &n, &max_n, PyArray_DATA(self), + PyArray_NDIM(self), PyArray_DIMS(self), + PyArray_STRIDES(self), self) < 0) { + PyArray_free(string); + return NULL; + } + + if (repr) { + if (PyArray_ISEXTENDED(self)) { + ret = PyUString_FromFormat("array(%s, '%c%d')", + string, + PyArray_DESCR(self)->type, + PyArray_DESCR(self)->elsize); + } + else { + ret = PyUString_FromFormat("array(%s, '%c')", + string, + PyArray_DESCR(self)->type); + } + } + else { + ret = PyUString_FromStringAndSize(string, n); + } + + PyArray_free(string); + return ret; +} + + +NPY_NO_EXPORT PyObject * +array_repr(PyArrayObject *self) +{ + PyObject *s, *arglist; + + if (PyArray_ReprFunction == NULL) { + s = array_repr_builtin(self, 1); + } + else { + arglist = Py_BuildValue("(O)", self); + s = PyEval_CallObject(PyArray_ReprFunction, arglist); + Py_DECREF(arglist); + } + return s; +} + + +NPY_NO_EXPORT PyObject * +array_str(PyArrayObject *self) +{ + PyObject *s, *arglist; + + if (PyArray_StrFunction == NULL) { + s = array_repr_builtin(self, 0); + } + else { + arglist = Py_BuildValue("(O)", self); + s = PyEval_CallObject(PyArray_StrFunction, arglist); + Py_DECREF(arglist); + } + return s; +} + +NPY_NO_EXPORT PyObject * +array_format(PyArrayObject *self, PyObject *args) +{ + PyObject *format; + if (!PyArg_ParseTuple(args, "O:__format__", &format)) + return NULL; + + /* 0d arrays - forward to the scalar type */ + if (PyArray_NDIM(self) == 0) { + PyObject *item = PyArray_ToScalar(PyArray_DATA(self), self); + PyObject *res; + + if (item == NULL) { + return NULL; + } + res = PyObject_Format(item, format); + Py_DECREF(item); + return res; + } + /* Everything else - use the builtin */ + else { + return PyObject_CallMethod( + (PyObject *)&PyBaseObject_Type, "__format__", "OO", + (PyObject *)self, format + ); + } +} + +#ifndef NPY_PY3K + +NPY_NO_EXPORT PyObject * +array_unicode(PyArrayObject *self) +{ + PyObject *uni; + + if (PyArray_NDIM(self) == 0) { + PyObject *item = PyArray_ToScalar(PyArray_DATA(self), self); + if (item == NULL){ + return NULL; + } + + /* defer to invoking `unicode` on the scalar */ + uni = PyObject_CallFunctionObjArgs( + (PyObject *)&PyUnicode_Type, item, NULL); + Py_DECREF(item); + } + else { + /* Do what unicode(self) would normally do */ + PyObject *str = PyObject_Str((PyObject *)self); + if (str == NULL){ + return NULL; + } + uni = PyUnicode_FromObject(str); + Py_DECREF(str); + } + return uni; +} + +#endif diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/strfuncs.h python-numpy-1.14.5/numpy/core/src/multiarray/strfuncs.h --- python-numpy-1.13.3/numpy/core/src/multiarray/strfuncs.h 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/strfuncs.h 2018-06-12 17:31:56.000000000 +0000 @@ -0,0 +1,21 @@ +#ifndef _NPY_ARRAY_STRFUNCS_H_ +#define _NPY_ARRAY_STRFUNCS_H_ + +NPY_NO_EXPORT void +PyArray_SetStringFunction(PyObject *op, int repr); + +NPY_NO_EXPORT PyObject * +array_repr(PyArrayObject *self); + +NPY_NO_EXPORT PyObject * +array_str(PyArrayObject *self); + +NPY_NO_EXPORT PyObject * +array_format(PyArrayObject *self, PyObject *args); + +#ifndef NPY_PY3K + NPY_NO_EXPORT PyObject * + array_unicode(PyArrayObject *self); +#endif + +#endif diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/temp_elide.c python-numpy-1.14.5/numpy/core/src/multiarray/temp_elide.c --- python-numpy-1.13.3/numpy/core/src/multiarray/temp_elide.c 2017-09-24 22:47:22.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/temp_elide.c 2018-06-12 18:28:52.000000000 +0000 @@ -287,6 +287,7 @@ !PyArray_CHKFLAGS(alhs, NPY_ARRAY_OWNDATA) || !PyArray_ISWRITEABLE(alhs) || PyArray_CHKFLAGS(alhs, NPY_ARRAY_UPDATEIFCOPY) || + PyArray_CHKFLAGS(alhs, NPY_ARRAY_WRITEBACKIFCOPY) || PyArray_NBYTES(alhs) < NPY_MIN_ELIDE_BYTES) { return 0; } diff -Nru python-numpy-1.13.3/numpy/core/src/multiarray/usertypes.c python-numpy-1.14.5/numpy/core/src/multiarray/usertypes.c --- python-numpy-1.13.3/numpy/core/src/multiarray/usertypes.c 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/multiarray/usertypes.c 2018-06-12 17:31:56.000000000 +0000 @@ -146,7 +146,7 @@ } typenum = NPY_USERDEF + NPY_NUMUSERTYPES; descr->type_num = typenum; - if (descr->elsize == 0) { + if (PyDataType_ISUNSIZED(descr)) { PyErr_SetString(PyExc_ValueError, "cannot register a" \ "flexible data-type"); return -1; diff -Nru python-numpy-1.13.3/numpy/core/src/npymath/ieee754.c.src python-numpy-1.14.5/numpy/core/src/npymath/ieee754.c.src --- python-numpy-1.13.3/numpy/core/src/npymath/ieee754.c.src 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/npymath/ieee754.c.src 2018-06-12 18:28:52.000000000 +0000 @@ -6,6 +6,7 @@ */ #include "npy_math_common.h" #include "npy_math_private.h" +#include "numpy/utils.h" #ifndef HAVE_COPYSIGN double npy_copysign(double x, double y) @@ -557,6 +558,15 @@ } #endif +int npy_clear_floatstatus() { + char x=0; + return npy_clear_floatstatus_barrier(&x); +} +int npy_get_floatstatus() { + char x=0; + return npy_get_floatstatus_barrier(&x); +} + /* * Functions to set the floating point status word. * keep in sync with NO_FLOATING_POINT_SUPPORT in ufuncobject.h @@ -574,18 +584,24 @@ defined(__NetBSD__) #include -int npy_get_floatstatus(void) +int npy_get_floatstatus_barrier(char * param) { int fpstatus = fpgetsticky(); + /* + * By using a volatile, the compiler cannot reorder this call + */ + if (param != NULL) { + volatile char NPY_UNUSED(c) = *(char*)param; + } return ((FP_X_DZ & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) | ((FP_X_OFL & fpstatus) ? NPY_FPE_OVERFLOW : 0) | ((FP_X_UFL & fpstatus) ? NPY_FPE_UNDERFLOW : 0) | ((FP_X_INV & fpstatus) ? NPY_FPE_INVALID : 0); } -int npy_clear_floatstatus(void) +int npy_clear_floatstatus_barrier(char * param) { - int fpstatus = npy_get_floatstatus(); + int fpstatus = npy_get_floatstatus_barrier(param); fpsetsticky(0); return fpstatus; @@ -617,10 +633,16 @@ (defined(__FreeBSD__) && (__FreeBSD_version >= 502114)) # include -int npy_get_floatstatus(void) +int npy_get_floatstatus_barrier(char* param) { int fpstatus = fetestexcept(FE_DIVBYZERO | FE_OVERFLOW | FE_UNDERFLOW | FE_INVALID); + /* + * By using a volatile, the compiler cannot reorder this call + */ + if (param != NULL) { + volatile char NPY_UNUSED(c) = *(char*)param; + } return ((FE_DIVBYZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) | ((FE_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) | @@ -628,10 +650,10 @@ ((FE_INVALID & fpstatus) ? NPY_FPE_INVALID : 0); } -int npy_clear_floatstatus(void) +int npy_clear_floatstatus_barrier(char * param) { /* testing float status is 50-100 times faster than clearing on x86 */ - int fpstatus = npy_get_floatstatus(); + int fpstatus = npy_get_floatstatus_barrier(param); if (fpstatus != 0) { feclearexcept(FE_DIVBYZERO | FE_OVERFLOW | FE_UNDERFLOW | FE_INVALID); @@ -665,18 +687,24 @@ #include #include -int npy_get_floatstatus(void) +int npy_get_floatstatus_barrier(char *param) { int fpstatus = fp_read_flag(); + /* + * By using a volatile, the compiler cannot reorder this call + */ + if (param != NULL) { + volatile char NPY_UNUSED(c) = *(char*)param; + } return ((FP_DIV_BY_ZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) | ((FP_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) | ((FP_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) | ((FP_INVALID & fpstatus) ? NPY_FPE_INVALID : 0); } -int npy_clear_floatstatus(void) +int npy_clear_floatstatus_barrier(char * param) { - int fpstatus = npy_get_floatstatus(); + int fpstatus = npy_get_floatstatus_barrier(param); fp_swap_flag(0); return fpstatus; @@ -710,8 +738,11 @@ #include -int npy_get_floatstatus(void) +int npy_get_floatstatus_barrier(char *param) { + /* + * By using a volatile, the compiler cannot reorder this call + */ #if defined(_WIN64) int fpstatus = _statusfp(); #else @@ -720,15 +751,18 @@ _statusfp2(&fpstatus, &fpstatus2); fpstatus |= fpstatus2; #endif + if (param != NULL) { + volatile char NPY_UNUSED(c) = *(char*)param; + } return ((SW_ZERODIVIDE & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) | ((SW_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) | ((SW_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) | ((SW_INVALID & fpstatus) ? NPY_FPE_INVALID : 0); } -int npy_clear_floatstatus(void) +int npy_clear_floatstatus_barrier(char *param) { - int fpstatus = npy_get_floatstatus(); + int fpstatus = npy_get_floatstatus_barrier(param); _clearfp(); return fpstatus; @@ -739,18 +773,24 @@ #include -int npy_get_floatstatus(void) +int npy_get_floatstatus_barrier(char *param) { unsigned long fpstatus = ieee_get_fp_control(); + /* + * By using a volatile, the compiler cannot reorder this call + */ + if (param != NULL) { + volatile char NPY_UNUSED(c) = *(char*)param; + } return ((IEEE_STATUS_DZE & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) | ((IEEE_STATUS_OVF & fpstatus) ? NPY_FPE_OVERFLOW : 0) | ((IEEE_STATUS_UNF & fpstatus) ? NPY_FPE_UNDERFLOW : 0) | ((IEEE_STATUS_INV & fpstatus) ? NPY_FPE_INVALID : 0); } -int npy_clear_floatstatus(void) +int npy_clear_floatstatus_barrier(char *param) { - long fpstatus = npy_get_floatstatus(); + int fpstatus = npy_get_floatstatus_barrier(param); /* clear status bits as well as disable exception mode if on */ ieee_set_fp_control(0); @@ -759,13 +799,14 @@ #else -int npy_get_floatstatus(void) +int npy_get_floatstatus_barrier(char *NPY_UNUSED(param)) { return 0; } -int npy_clear_floatstatus(void) +int npy_clear_floatstatus_barrier(char *param) { + int fpstatus = npy_get_floatstatus_barrier(param); return 0; } diff -Nru python-numpy-1.13.3/numpy/core/src/npymath/npy_math_internal.h.src python-numpy-1.14.5/numpy/core/src/npymath/npy_math_internal.h.src --- python-numpy-1.13.3/numpy/core/src/npymath/npy_math_internal.h.src 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/npymath/npy_math_internal.h.src 2018-06-12 18:28:52.000000000 +0000 @@ -659,7 +659,7 @@ /* snap quotient to nearest integral value */ if (div) { - floordiv = npy_floor(div); + floordiv = npy_floor@c@(div); if (div - floordiv > 0.5@c@) floordiv += 1.0@c@; } diff -Nru python-numpy-1.13.3/numpy/core/src/npysort/mergesort.c.src python-numpy-1.14.5/numpy/core/src/npysort/mergesort.c.src --- python-numpy-1.13.3/numpy/core/src/npysort/mergesort.c.src 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/npysort/mergesort.c.src 2018-06-12 17:31:56.000000000 +0000 @@ -254,6 +254,11 @@ @type@ *pl, *pr, *pw, *vp; int err = 0; + /* Items that have zero size don't make sense to sort */ + if (elsize == 0) { + return 0; + } + pl = start; pr = pl + num*len; pw = malloc((num/2) * elsize); @@ -329,6 +334,11 @@ size_t len = elsize / sizeof(@type@); npy_intp *pl, *pr, *pw; + /* Items that have zero size don't make sense to sort */ + if (elsize == 0) { + return 0; + } + pl = tosort; pr = pl + num; pw = malloc((num/2) * sizeof(npy_intp)); @@ -405,10 +415,18 @@ PyArray_CompareFunc *cmp = PyArray_DESCR(arr)->f->compare; char *pl = start; char *pr = pl + num*elsize; - char *pw = malloc((num >> 1) *elsize); - char *vp = malloc(elsize); + char *pw; + char *vp; int err = -NPY_ENOMEM; + /* Items that have zero size don't make sense to sort */ + if (elsize == 0) { + return 0; + } + + pw = malloc((num >> 1) *elsize); + vp = malloc(elsize); + if (pw != NULL && vp != NULL) { npy_mergesort0(pl, pr, pw, vp, elsize, cmp, arr); err = 0; @@ -475,6 +493,11 @@ PyArray_CompareFunc *cmp = PyArray_DESCR(arr)->f->compare; npy_intp *pl, *pr, *pw; + /* Items that have zero size don't make sense to sort */ + if (elsize == 0) { + return 0; + } + pl = tosort; pr = pl + num; pw = malloc((num >> 1) * sizeof(npy_intp)); diff -Nru python-numpy-1.13.3/numpy/core/src/npysort/quicksort.c.src python-numpy-1.14.5/numpy/core/src/npysort/quicksort.c.src --- python-numpy-1.13.3/numpy/core/src/npysort/quicksort.c.src 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/npysort/quicksort.c.src 2018-06-12 18:28:52.000000000 +0000 @@ -258,7 +258,7 @@ { PyArrayObject *arr = varr; const size_t len = PyArray_ITEMSIZE(arr)/sizeof(@type@); - @type@ *vp = malloc(PyArray_ITEMSIZE(arr)); + @type@ *vp; @type@ *pl = start; @type@ *pr = pl + (num - 1)*len; @type@ *stack[PYA_QS_STACK], **sptr = stack, *pm, *pi, *pj, *pk; @@ -266,6 +266,12 @@ int * psdepth = depth; int cdepth = npy_get_msb(num) * 2; + /* Items that have zero size don't make sense to sort */ + if (len == 0) { + return 0; + } + + vp = malloc(PyArray_ITEMSIZE(arr)); if (vp == NULL) { return -NPY_ENOMEM; } @@ -351,6 +357,11 @@ int * psdepth = depth; int cdepth = npy_get_msb(num) * 2; + /* Items that have zero size don't make sense to sort */ + if (len == 0) { + return 0; + } + for (;;) { if (NPY_UNLIKELY(cdepth < 0)) { aheapsort_@suff@(vv, pl, pr - pl + 1, varr); @@ -429,7 +440,7 @@ PyArrayObject *arr = varr; npy_intp elsize = PyArray_ITEMSIZE(arr); PyArray_CompareFunc *cmp = PyArray_DESCR(arr)->f->compare; - char *vp = malloc(elsize); + char *vp; char *pl = start; char *pr = pl + (num - 1)*elsize; char *stack[PYA_QS_STACK]; @@ -439,6 +450,12 @@ int * psdepth = depth; int cdepth = npy_get_msb(num) * 2; + /* Items that have zero size don't make sense to sort */ + if (elsize == 0) { + return 0; + } + + vp = malloc(elsize); if (vp == NULL) { return -NPY_ENOMEM; } @@ -539,6 +556,11 @@ int * psdepth = depth; int cdepth = npy_get_msb(num) * 2; + /* Items that have zero size don't make sense to sort */ + if (elsize == 0) { + return 0; + } + for (;;) { if (NPY_UNLIKELY(cdepth < 0)) { npy_aheapsort(vv, pl, pr - pl + 1, varr); diff -Nru python-numpy-1.13.3/numpy/core/src/private/mem_overlap.c python-numpy-1.14.5/numpy/core/src/private/mem_overlap.c --- python-numpy-1.13.3/numpy/core/src/private/mem_overlap.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/private/mem_overlap.c 2018-06-12 18:28:52.000000000 +0000 @@ -181,9 +181,6 @@ All rights reserved. Licensed under 3-clause BSD license, see LICENSE.txt. */ -#include -#include -#include #include #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -191,6 +188,10 @@ #include "mem_overlap.h" #include "npy_extint128.h" +#include +#include +#include + #define MAX(a, b) (((a) >= (b)) ? (a) : (b)) #define MIN(a, b) (((a) <= (b)) ? (a) : (b)) diff -Nru python-numpy-1.13.3/numpy/core/src/private/npy_import.h python-numpy-1.14.5/numpy/core/src/private/npy_import.h --- python-numpy-1.13.3/numpy/core/src/private/npy_import.h 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/private/npy_import.h 2018-06-12 18:28:52.000000000 +0000 @@ -29,4 +29,18 @@ } } +NPY_INLINE static PyObject * +npy_import(const char *module, const char *attr) +{ + PyObject *mod = PyImport_ImportModule(module); + PyObject *ret = NULL; + + if (mod != NULL) { + ret = PyObject_GetAttrString(mod, attr); + } + Py_XDECREF(mod); + + return ret; +} + #endif diff -Nru python-numpy-1.13.3/numpy/core/src/private/npy_longdouble.c python-numpy-1.14.5/numpy/core/src/private/npy_longdouble.c --- python-numpy-1.13.3/numpy/core/src/private/npy_longdouble.c 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/private/npy_longdouble.c 2018-06-12 17:31:56.000000000 +0000 @@ -0,0 +1,102 @@ +#include + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#include "numpy/ndarraytypes.h" +#include "numpy/npy_math.h" + +/* This is a backport of Py_SETREF */ +#define NPY_SETREF(op, op2) \ + do { \ + PyObject *_py_tmp = (PyObject *)(op); \ + (op) = (op2); \ + Py_DECREF(_py_tmp); \ + } while (0) + + +/* + * Heavily derived from PyLong_FromDouble + * Notably, we can't set the digits directly, so have to shift and or instead. + */ +NPY_VISIBILITY_HIDDEN PyObject * +npy_longdouble_to_PyLong(npy_longdouble ldval) +{ + PyObject *v; + PyObject *l_chunk_size; + /* + * number of bits to extract at a time. CPython uses 30, but that's because + * it's tied to the internal long representation + */ + const int chunk_size = NPY_BITSOF_LONGLONG; + npy_longdouble frac; + int i, ndig, expo, neg; + neg = 0; + + if (npy_isinf(ldval)) { + PyErr_SetString(PyExc_OverflowError, + "cannot convert longdouble infinity to integer"); + return NULL; + } + if (npy_isnan(ldval)) { + PyErr_SetString(PyExc_ValueError, + "cannot convert longdouble NaN to integer"); + return NULL; + } + if (ldval < 0.0) { + neg = 1; + ldval = -ldval; + } + frac = npy_frexpl(ldval, &expo); /* ldval = frac*2**expo; 0.0 <= frac < 1.0 */ + v = PyLong_FromLong(0L); + if (v == NULL) + return NULL; + if (expo <= 0) + return v; + + ndig = (expo-1) / chunk_size + 1; + + l_chunk_size = PyLong_FromLong(chunk_size); + if (l_chunk_size == NULL) { + Py_DECREF(v); + return NULL; + } + + /* Get the MSBs of the integral part of the float */ + frac = npy_ldexpl(frac, (expo-1) % chunk_size + 1); + for (i = ndig; --i >= 0; ) { + npy_ulonglong chunk = (npy_ulonglong)frac; + PyObject *l_chunk; + /* v = v << chunk_size */ + NPY_SETREF(v, PyNumber_Lshift(v, l_chunk_size)); + if (v == NULL) { + goto done; + } + l_chunk = PyLong_FromUnsignedLongLong(chunk); + if (l_chunk == NULL) { + Py_DECREF(v); + v = NULL; + goto done; + } + /* v = v | chunk */ + NPY_SETREF(v, PyNumber_Or(v, l_chunk)); + Py_DECREF(l_chunk); + if (v == NULL) { + goto done; + } + + /* Remove the msbs, and repeat */ + frac = frac - (npy_longdouble) chunk; + frac = npy_ldexpl(frac, chunk_size); + } + + /* v = -v */ + if (neg) { + NPY_SETREF(v, PyNumber_Negative(v)); + if (v == NULL) { + goto done; + } + } + +done: + Py_DECREF(l_chunk_size); + return v; +} diff -Nru python-numpy-1.13.3/numpy/core/src/private/npy_longdouble.h python-numpy-1.14.5/numpy/core/src/private/npy_longdouble.h --- python-numpy-1.13.3/numpy/core/src/private/npy_longdouble.h 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/private/npy_longdouble.h 2018-06-12 17:31:56.000000000 +0000 @@ -0,0 +1,17 @@ +#ifndef __NPY_LONGDOUBLE_H +#define __NPY_LONGDOUBLE_H + +#include "npy_config.h" +#include "numpy/ndarraytypes.h" + +/* Convert a npy_longdouble to a python `long` integer. + * + * Results are rounded towards zero. + * + * This performs the same task as PyLong_FromDouble, but for long doubles + * which have a greater range. + */ +NPY_VISIBILITY_HIDDEN PyObject * +npy_longdouble_to_PyLong(npy_longdouble ldval); + +#endif diff -Nru python-numpy-1.13.3/numpy/core/src/umath/extobj.c python-numpy-1.14.5/numpy/core/src/umath/extobj.c --- python-numpy-1.13.3/numpy/core/src/umath/extobj.c 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/umath/extobj.c 2018-06-12 17:31:56.000000000 +0000 @@ -0,0 +1,323 @@ +#define _UMATHMODULE +#define NPY_NO_DEPRECATED_API NPY_API_VERSION + +#include + +#include "npy_config.h" + +#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API +#define NO_IMPORT_ARRAY + +#include "npy_pycompat.h" + +#include "extobj.h" +#include "numpy/ufuncobject.h" + +#include "ufunc_object.h" /* for npy_um_str_pyvals_name */ +#include "common.h" + +#if USE_USE_DEFAULTS==1 +static int PyUFunc_NUM_NODEFAULTS = 0; + +/* + * This is a strategy to buy a little speed up and avoid the dictionary + * look-up in the default case. It should work in the presence of + * threads. If it is deemed too complicated or it doesn't actually work + * it could be taken out. + */ +NPY_NO_EXPORT int +ufunc_update_use_defaults(void) +{ + PyObject *errobj = NULL; + int errmask, bufsize; + int res; + + PyUFunc_NUM_NODEFAULTS += 1; + res = PyUFunc_GetPyValues("test", &bufsize, &errmask, &errobj); + PyUFunc_NUM_NODEFAULTS -= 1; + if (res < 0) { + Py_XDECREF(errobj); + return -1; + } + if ((errmask != UFUNC_ERR_DEFAULT) || (bufsize != NPY_BUFSIZE) + || (PyTuple_GET_ITEM(errobj, 1) != Py_None)) { + PyUFunc_NUM_NODEFAULTS += 1; + } + else if (PyUFunc_NUM_NODEFAULTS > 0) { + PyUFunc_NUM_NODEFAULTS -= 1; + } + Py_XDECREF(errobj); + return 0; +} +#endif + +/* + * fpstatus is the ufunc_formatted hardware status + * errmask is the handling mask specified by the user. + * errobj is a Python object with (string, callable object or None) + * or NULL + */ + +/* + * 2. for each of the flags + * determine whether to ignore, warn, raise error, or call Python function. + * If ignore, do nothing + * If warn, print a warning and continue + * If raise return an error + * If call, call a user-defined function with string + */ + +NPY_NO_EXPORT int +_error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *first) +{ + PyObject *pyfunc, *ret, *args; + char *name = PyBytes_AS_STRING(PyTuple_GET_ITEM(errobj,0)); + char msg[100]; + + NPY_ALLOW_C_API_DEF + + /* don't need C API for a simple ignore */ + if (method == UFUNC_ERR_IGNORE) { + return 0; + } + + /* don't need C API for a simple print */ + if (method == UFUNC_ERR_PRINT) { + if (*first) { + fprintf(stderr, "Warning: %s encountered in %s\n", errtype, name); + *first = 0; + } + return 0; + } + + NPY_ALLOW_C_API; + switch(method) { + case UFUNC_ERR_WARN: + PyOS_snprintf(msg, sizeof(msg), "%s encountered in %s", errtype, name); + if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) { + goto fail; + } + break; + case UFUNC_ERR_RAISE: + PyErr_Format(PyExc_FloatingPointError, "%s encountered in %s", + errtype, name); + goto fail; + case UFUNC_ERR_CALL: + pyfunc = PyTuple_GET_ITEM(errobj, 1); + if (pyfunc == Py_None) { + PyErr_Format(PyExc_NameError, + "python callback specified for %s (in " \ + " %s) but no function found.", + errtype, name); + goto fail; + } + args = Py_BuildValue("NN", PyUString_FromString(errtype), + PyInt_FromLong((long) retstatus)); + if (args == NULL) { + goto fail; + } + ret = PyObject_CallObject(pyfunc, args); + Py_DECREF(args); + if (ret == NULL) { + goto fail; + } + Py_DECREF(ret); + break; + case UFUNC_ERR_LOG: + if (first) { + *first = 0; + pyfunc = PyTuple_GET_ITEM(errobj, 1); + if (pyfunc == Py_None) { + PyErr_Format(PyExc_NameError, + "log specified for %s (in %s) but no " \ + "object with write method found.", + errtype, name); + goto fail; + } + PyOS_snprintf(msg, sizeof(msg), + "Warning: %s encountered in %s\n", errtype, name); + ret = PyObject_CallMethod(pyfunc, "write", "s", msg); + if (ret == NULL) { + goto fail; + } + Py_DECREF(ret); + } + break; + } + NPY_DISABLE_C_API; + return 0; + +fail: + NPY_DISABLE_C_API; + return -1; +} + + + +NPY_NO_EXPORT PyObject * +get_global_ext_obj(void) +{ + PyObject *thedict; + PyObject *ref = NULL; + +#if USE_USE_DEFAULTS==1 + if (PyUFunc_NUM_NODEFAULTS != 0) { +#endif + thedict = PyThreadState_GetDict(); + if (thedict == NULL) { + thedict = PyEval_GetBuiltins(); + } + ref = PyDict_GetItem(thedict, npy_um_str_pyvals_name); +#if USE_USE_DEFAULTS==1 + } +#endif + + return ref; +} + + +/* + * Extracts some values from the global pyvals tuple. + * all destinations may be NULL, in which case they are not retrieved + * ref - should hold the global tuple + * name - is the name of the ufunc (ufuncobj->name) + * + * bufsize - receives the buffer size to use + * errmask - receives the bitmask for error handling + * errobj - receives the python object to call with the error, + * if an error handling method is 'call' + */ +NPY_NO_EXPORT int +_extract_pyvals(PyObject *ref, const char *name, int *bufsize, + int *errmask, PyObject **errobj) +{ + PyObject *retval; + + /* default errobj case, skips dictionary lookup */ + if (ref == NULL) { + if (errmask) { + *errmask = UFUNC_ERR_DEFAULT; + } + if (errobj) { + *errobj = Py_BuildValue("NO", PyBytes_FromString(name), Py_None); + } + if (bufsize) { + *bufsize = NPY_BUFSIZE; + } + return 0; + } + + if (!PyList_Check(ref) || (PyList_GET_SIZE(ref)!=3)) { + PyErr_Format(PyExc_TypeError, + "%s must be a length 3 list.", UFUNC_PYVALS_NAME); + return -1; + } + + if (bufsize != NULL) { + *bufsize = PyInt_AsLong(PyList_GET_ITEM(ref, 0)); + if (error_converting(*bufsize)) { + return -1; + } + if ((*bufsize < NPY_MIN_BUFSIZE) || + (*bufsize > NPY_MAX_BUFSIZE) || + (*bufsize % 16 != 0)) { + PyErr_Format(PyExc_ValueError, + "buffer size (%d) is not in range " + "(%"NPY_INTP_FMT" - %"NPY_INTP_FMT") or not a multiple of 16", + *bufsize, (npy_intp) NPY_MIN_BUFSIZE, + (npy_intp) NPY_MAX_BUFSIZE); + return -1; + } + } + + if (errmask != NULL) { + *errmask = PyInt_AsLong(PyList_GET_ITEM(ref, 1)); + if (*errmask < 0) { + if (PyErr_Occurred()) { + return -1; + } + PyErr_Format(PyExc_ValueError, + "invalid error mask (%d)", + *errmask); + return -1; + } + } + + if (errobj != NULL) { + *errobj = NULL; + retval = PyList_GET_ITEM(ref, 2); + if (retval != Py_None && !PyCallable_Check(retval)) { + PyObject *temp; + temp = PyObject_GetAttrString(retval, "write"); + if (temp == NULL || !PyCallable_Check(temp)) { + PyErr_SetString(PyExc_TypeError, + "python object must be callable or have " \ + "a callable write method"); + Py_XDECREF(temp); + return -1; + } + Py_DECREF(temp); + } + + *errobj = Py_BuildValue("NO", PyBytes_FromString(name), retval); + if (*errobj == NULL) { + return -1; + } + } + return 0; +} + +/* + * check the floating point status + * - errmask: mask of status to check + * - extobj: ufunc pyvals object + * may be null, in which case the thread global one is fetched + * - ufunc_name: name of ufunc + */ +NPY_NO_EXPORT int +_check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name) { + int fperr; + PyObject *errobj = NULL; + int ret; + int first = 1; + + if (!errmask) { + return 0; + } + fperr = npy_get_floatstatus_barrier((char*)extobj); + if (!fperr) { + return 0; + } + + /* Get error object globals */ + if (extobj == NULL) { + extobj = get_global_ext_obj(); + } + if (_extract_pyvals(extobj, ufunc_name, + NULL, NULL, &errobj) < 0) { + Py_XDECREF(errobj); + return -1; + } + + ret = PyUFunc_handlefperr(errmask, errobj, fperr, &first); + Py_XDECREF(errobj); + + return ret; +} + + +NPY_NO_EXPORT int +_get_bufsize_errmask(PyObject * extobj, const char *ufunc_name, + int *buffersize, int *errormask) +{ + /* Get the buffersize and errormask */ + if (extobj == NULL) { + extobj = get_global_ext_obj(); + } + if (_extract_pyvals(extobj, ufunc_name, + buffersize, errormask, NULL) < 0) { + return -1; + } + + return 0; +} diff -Nru python-numpy-1.13.3/numpy/core/src/umath/extobj.h python-numpy-1.14.5/numpy/core/src/umath/extobj.h --- python-numpy-1.13.3/numpy/core/src/umath/extobj.h 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/umath/extobj.h 2018-06-12 17:31:56.000000000 +0000 @@ -0,0 +1,32 @@ +#ifndef _NPY_PRIVATE__EXTOBJ_H_ +#define _NPY_PRIVATE__EXTOBJ_H_ + +#include /* for NPY_NO_EXPORT */ + +NPY_NO_EXPORT int +_error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *first); + +NPY_NO_EXPORT PyObject * +get_global_ext_obj(void); + +NPY_NO_EXPORT int +_extract_pyvals(PyObject *ref, const char *name, int *bufsize, + int *errmask, PyObject **errobj); + +NPY_NO_EXPORT int +_check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name); + +NPY_NO_EXPORT int +_get_bufsize_errmask(PyObject * extobj, const char *ufunc_name, + int *buffersize, int *errormask); + +/********************/ +#define USE_USE_DEFAULTS 1 +/********************/ + +#if USE_USE_DEFAULTS==1 +NPY_NO_EXPORT int +ufunc_update_use_defaults(void); +#endif + +#endif diff -Nru python-numpy-1.13.3/numpy/core/src/umath/loops.c.src python-numpy-1.14.5/numpy/core/src/umath/loops.c.src --- python-numpy-1.13.3/numpy/core/src/umath/loops.c.src 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/umath/loops.c.src 2018-06-12 18:28:52.000000000 +0000 @@ -1287,6 +1287,7 @@ NPY_NO_EXPORT void @TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { + npy_bool give_future_warning = 0; BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; @@ -1294,17 +1295,19 @@ *((npy_bool *)op1) = res; if ((in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) && res) { - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - /* 2016-01-18, 1.11 */ - if (DEPRECATE_FUTUREWARNING( - "In the future, 'NAT @OP@ x' and 'x @OP@ NAT' " - "will always be False.") < 0) { - NPY_DISABLE_C_API; - return; - } - NPY_DISABLE_C_API; + give_future_warning = 1; + } + } + if (give_future_warning) { + NPY_ALLOW_C_API_DEF + NPY_ALLOW_C_API; + /* 2016-01-18, 1.11 */ + if (DEPRECATE_FUTUREWARNING( + "In the future, 'NAT @OP@ x' and 'x @OP@ NAT' " + "will always be False.") < 0) { + /* nothing to do, we return anyway */ } + NPY_DISABLE_C_API; } } /**end repeat1**/ @@ -1312,24 +1315,27 @@ NPY_NO_EXPORT void @TYPE@_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { + npy_bool give_future_warning = 0; BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; *((npy_bool *)op1) = in1 != in2; if (in1 == NPY_DATETIME_NAT && in2 == NPY_DATETIME_NAT) { - NPY_ALLOW_C_API_DEF - NPY_ALLOW_C_API; - /* 2016-01-18, 1.11 */ - if (DEPRECATE_FUTUREWARNING( - "In the future, NAT != NAT will be True " - "rather than False.") < 0) { - NPY_DISABLE_C_API; - return; - } - NPY_DISABLE_C_API; + give_future_warning = 1; } } + if (give_future_warning) { + NPY_ALLOW_C_API_DEF + NPY_ALLOW_C_API; + /* 2016-01-18, 1.11 */ + if (DEPRECATE_FUTUREWARNING( + "In the future, NAT != NAT will be True " + "rather than False.") < 0) { + /* nothing to do, we return anyway */ + } + NPY_DISABLE_C_API; + } } @@ -1617,13 +1623,13 @@ * when updating also update similar complex floats summation */ static @type@ -pairwise_sum_@TYPE@(@dtype@ *a, npy_uintp n, npy_intp stride) +pairwise_sum_@TYPE@(char *a, npy_uintp n, npy_intp stride) { if (n < 8) { npy_intp i; @type@ res = 0.; for (i = 0; i < n; i++) { - res += @trf@(a[i * stride]); + res += @trf@(*((@dtype@*)(a + i * stride))); } return res; } @@ -1636,26 +1642,26 @@ * 8 times unroll reduces blocksize to 16 and allows vectorization with * avx without changing summation ordering */ - r[0] = @trf@(a[0 * stride]); - r[1] = @trf@(a[1 * stride]); - r[2] = @trf@(a[2 * stride]); - r[3] = @trf@(a[3 * stride]); - r[4] = @trf@(a[4 * stride]); - r[5] = @trf@(a[5 * stride]); - r[6] = @trf@(a[6 * stride]); - r[7] = @trf@(a[7 * stride]); + r[0] = @trf@(*((@dtype@ *)(a + 0 * stride))); + r[1] = @trf@(*((@dtype@ *)(a + 1 * stride))); + r[2] = @trf@(*((@dtype@ *)(a + 2 * stride))); + r[3] = @trf@(*((@dtype@ *)(a + 3 * stride))); + r[4] = @trf@(*((@dtype@ *)(a + 4 * stride))); + r[5] = @trf@(*((@dtype@ *)(a + 5 * stride))); + r[6] = @trf@(*((@dtype@ *)(a + 6 * stride))); + r[7] = @trf@(*((@dtype@ *)(a + 7 * stride))); for (i = 8; i < n - (n % 8); i += 8) { /* small blocksizes seems to mess with hardware prefetch */ - NPY_PREFETCH(&a[(i + 512 / sizeof(a[0])) * stride], 0, 3); - r[0] += @trf@(a[(i + 0) * stride]); - r[1] += @trf@(a[(i + 1) * stride]); - r[2] += @trf@(a[(i + 2) * stride]); - r[3] += @trf@(a[(i + 3) * stride]); - r[4] += @trf@(a[(i + 4) * stride]); - r[5] += @trf@(a[(i + 5) * stride]); - r[6] += @trf@(a[(i + 6) * stride]); - r[7] += @trf@(a[(i + 7) * stride]); + NPY_PREFETCH(a + (i + 512 / sizeof(@dtype@)) * stride, 0, 3); + r[0] += @trf@(*((@dtype@ *)(a + (i + 0) * stride))); + r[1] += @trf@(*((@dtype@ *)(a + (i + 1) * stride))); + r[2] += @trf@(*((@dtype@ *)(a + (i + 2) * stride))); + r[3] += @trf@(*((@dtype@ *)(a + (i + 3) * stride))); + r[4] += @trf@(*((@dtype@ *)(a + (i + 4) * stride))); + r[5] += @trf@(*((@dtype@ *)(a + (i + 5) * stride))); + r[6] += @trf@(*((@dtype@ *)(a + (i + 6) * stride))); + r[7] += @trf@(*((@dtype@ *)(a + (i + 7) * stride))); } /* accumulate now to avoid stack spills for single peel loop */ @@ -1664,7 +1670,7 @@ /* do non multiple of 8 rest */ for (; i < n; i++) { - res += @trf@(a[i * stride]); + res += @trf@(*((@dtype@ *)(a + i * stride))); } return res; } @@ -1701,8 +1707,7 @@ @type@ * iop1 = (@type@ *)args[0]; npy_intp n = dimensions[0]; - *iop1 @OP@= pairwise_sum_@TYPE@((@type@ *)args[1], n, - steps[1] / (npy_intp)sizeof(@type@)); + *iop1 @OP@= pairwise_sum_@TYPE@(args[1], n, steps[1]); #else BINARY_REDUCE_LOOP(@type@) { io1 @OP@= *(@type@ *)ip2; @@ -1770,7 +1775,7 @@ *((npy_bool *)op1) = @func@(in1) != 0; } } - npy_clear_floatstatus(); + npy_clear_floatstatus_barrier((char*)dimensions); } /**end repeat1**/ @@ -1817,6 +1822,9 @@ const @type@ in2 = *(@type@ *)ip2; io1 = (io1 @OP@ in2 || npy_isnan(io1)) ? io1 : in2; } + if (npy_isnan(io1)) { + npy_set_floatstatus_invalid(); + } *((@type@ *)iop1) = io1; } } @@ -1852,6 +1860,7 @@ *((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in2)) ? in1 : in2; } } + npy_clear_floatstatus_barrier((char*)dimensions); } /**end repeat1**/ @@ -1941,7 +1950,7 @@ *((@type@ *)op1) = tmp + 0; } } - npy_clear_floatstatus(); + npy_clear_floatstatus_barrier((char*)dimensions); } NPY_NO_EXPORT void @@ -2058,8 +2067,7 @@ #if @PW@ npy_intp n = dimensions[0]; - io1 @OP@= pairwise_sum_HALF((npy_half *)args[1], n, - steps[1] / (npy_intp)sizeof(npy_half)); + io1 @OP@= pairwise_sum_HALF(args[1], n, steps[1]); #else BINARY_REDUCE_LOOP_INNER { io1 @OP@= npy_half_to_float(*(npy_half *)ip2); @@ -2128,7 +2136,7 @@ const npy_half in1 = *(npy_half *)ip1; *((npy_bool *)op1) = @func@(in1) != 0; } - npy_clear_floatstatus(); + npy_clear_floatstatus_barrier((char*)dimensions); } /**end repeat**/ @@ -2190,6 +2198,7 @@ const npy_half in2 = *(npy_half *)ip2; *((npy_half *)op1) = (@OP@(in1, in2) || npy_half_isnan(in2)) ? in1 : in2; } + npy_clear_floatstatus_barrier((char*)dimensions); } /**end repeat**/ @@ -2389,7 +2398,7 @@ /* similar to pairwise sum of real floats */ static void -pairwise_sum_@TYPE@(@ftype@ *rr, @ftype@ * ri, @ftype@ * a, npy_uintp n, +pairwise_sum_@TYPE@(@ftype@ *rr, @ftype@ * ri, char * a, npy_uintp n, npy_intp stride) { assert(n % 2 == 0); @@ -2398,8 +2407,8 @@ *rr = 0.; *ri = 0.; for (i = 0; i < n; i += 2) { - *rr += a[i * stride + 0]; - *ri += a[i * stride + 1]; + *rr += *((@ftype@ *)(a + i * stride + 0)); + *ri += *((@ftype@ *)(a + i * stride + sizeof(@ftype@))); } return; } @@ -2412,26 +2421,26 @@ * 8 times unroll reduces blocksize to 16 and allows vectorization with * avx without changing summation ordering */ - r[0] = a[0 * stride]; - r[1] = a[0 * stride + 1]; - r[2] = a[2 * stride]; - r[3] = a[2 * stride + 1]; - r[4] = a[4 * stride]; - r[5] = a[4 * stride + 1]; - r[6] = a[6 * stride]; - r[7] = a[6 * stride + 1]; + r[0] = *((@ftype@ *)(a + 0 * stride)); + r[1] = *((@ftype@ *)(a + 0 * stride + sizeof(@ftype@))); + r[2] = *((@ftype@ *)(a + 2 * stride)); + r[3] = *((@ftype@ *)(a + 2 * stride + sizeof(@ftype@))); + r[4] = *((@ftype@ *)(a + 4 * stride)); + r[5] = *((@ftype@ *)(a + 4 * stride + sizeof(@ftype@))); + r[6] = *((@ftype@ *)(a + 6 * stride)); + r[7] = *((@ftype@ *)(a + 6 * stride + sizeof(@ftype@))); for (i = 8; i < n - (n % 8); i += 8) { /* small blocksizes seems to mess with hardware prefetch */ - NPY_PREFETCH(&a[(i + 512 / sizeof(a[0])) * stride], 0, 3); - r[0] += a[(i + 0) * stride]; - r[1] += a[(i + 0) * stride + 1]; - r[2] += a[(i + 2) * stride]; - r[3] += a[(i + 2) * stride + 1]; - r[4] += a[(i + 4) * stride]; - r[5] += a[(i + 4) * stride + 1]; - r[6] += a[(i + 6) * stride]; - r[7] += a[(i + 6) * stride + 1]; + NPY_PREFETCH(a + (i + 512 / sizeof(@ftype@)) * stride, 0, 3); + r[0] += *((@ftype@ *)(a + (i + 0) * stride)); + r[1] += *((@ftype@ *)(a + (i + 0) * stride + sizeof(@ftype@))); + r[2] += *((@ftype@ *)(a + (i + 2) * stride)); + r[3] += *((@ftype@ *)(a + (i + 2) * stride + sizeof(@ftype@))); + r[4] += *((@ftype@ *)(a + (i + 4) * stride)); + r[5] += *((@ftype@ *)(a + (i + 4) * stride + sizeof(@ftype@))); + r[6] += *((@ftype@ *)(a + (i + 6) * stride)); + r[7] += *((@ftype@ *)(a + (i + 6) * stride + sizeof(@ftype@))); } /* accumulate now to avoid stack spills for single peel loop */ @@ -2440,8 +2449,8 @@ /* do non multiple of 8 rest */ for (; i < n; i+=2) { - *rr += a[i * stride + 0]; - *ri += a[i * stride + 1]; + *rr += *((@ftype@ *)(a + i * stride + 0)); + *ri += *((@ftype@ *)(a + i * stride + sizeof(@ftype@))); } return; } @@ -2473,8 +2482,7 @@ @ftype@ * oi = ((@ftype@ *)args[0]) + 1; @ftype@ rr, ri; - pairwise_sum_@TYPE@(&rr, &ri, (@ftype@ *)args[1], n * 2, - steps[1] / (npy_intp)sizeof(@ftype@) / 2); + pairwise_sum_@TYPE@(&rr, &ri, args[1], n * 2, steps[1] / 2); *or @OP@= rr; *oi @OP@= ri; return; @@ -2630,7 +2638,7 @@ const @ftype@ in1i = ((@ftype@ *)ip1)[1]; *((npy_bool *)op1) = @func@(in1r) @OP@ @func@(in1i); } - npy_clear_floatstatus(); + npy_clear_floatstatus_barrier((char*)dimensions); } /**end repeat1**/ @@ -2739,6 +2747,7 @@ ((@ftype@ *)op1)[1] = in2i; } } + npy_clear_floatstatus_barrier((char*)dimensions); } /**end repeat1**/ diff -Nru python-numpy-1.13.3/numpy/core/src/umath/override.c python-numpy-1.14.5/numpy/core/src/umath/override.c --- python-numpy-1.13.3/numpy/core/src/umath/override.c 2017-09-24 22:47:22.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/umath/override.c 2018-06-12 18:28:52.000000000 +0000 @@ -29,7 +29,10 @@ "cannot specify both 'sig' and 'signature'"); return -1; } - Py_INCREF(obj); + /* + * No INCREF or DECREF needed: got a borrowed reference above, + * and, unlike e.g. PyList_SetItem, PyDict_SetItem INCREF's it. + */ PyDict_SetItemString(normal_kwds, "signature", obj); PyDict_DelItemString(normal_kwds, "sig"); } @@ -282,7 +285,6 @@ if (*normal_args == NULL) { return -1; } - /* ufuncs accept 'sig' or 'signature' normalize to 'signature' */ return normalize_signature_keyword(*normal_kwds); } diff -Nru python-numpy-1.13.3/numpy/core/src/umath/reduction.c python-numpy-1.14.5/numpy/core/src/umath/reduction.c --- python-numpy-1.13.3/numpy/core/src/umath/reduction.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/umath/reduction.c 2018-06-12 18:28:52.000000000 +0000 @@ -21,8 +21,10 @@ #include "npy_config.h" #include "npy_pycompat.h" +#include "numpy/ufuncobject.h" #include "lowlevel_strided_loops.h" #include "reduction.h" +#include "extobj.h" /* for _check_ufunc_fperr */ /* * Allocates a result array for a reduction operation, with @@ -187,7 +189,7 @@ } Py_INCREF(ret); - if (PyArray_SetUpdateIfCopyBase(ret_copy, (PyArrayObject *)ret) < 0) { + if (PyArray_SetWritebackIfCopyBase(ret_copy, (PyArrayObject *)ret) < 0) { Py_DECREF(ret); Py_DECREF(ret_copy); return NULL; @@ -437,6 +439,7 @@ * data : Data which is passed to assign_identity and the inner loop. * buffersize : Buffer size for the iterator. For the default, pass in 0. * funcname : The name of the reduction function, for error messages. + * errormask : forwarded from _get_bufsize_errmask * * TODO FIXME: if you squint, this is essentially an second independent * implementation of generalized ufuncs with signature (i)->(), plus a few @@ -458,7 +461,8 @@ int subok, PyArray_AssignReduceIdentityFunc *assign_identity, PyArray_ReduceLoopFunc *loop, - void *data, npy_intp buffersize, const char *funcname) + void *data, npy_intp buffersize, const char *funcname, + int errormask) { PyArrayObject *result = NULL, *op_view = NULL; npy_intp skip_first_count = 0; @@ -481,7 +485,7 @@ * This either conforms 'out' to the ndim of 'operand', or allocates * a new array appropriate for this reduction. * - * A new array with UPDATEIFCOPY is allocated if operand and out have memory + * A new array with WRITEBACKIFCOPY is allocated if operand and out have memory * overlap. */ Py_INCREF(result_dtype); @@ -555,6 +559,9 @@ goto fail; } + /* Start with the floating-point exception flags cleared */ + npy_clear_floatstatus_barrier((char*)&iter); + if (NpyIter_GetIterSize(iter) != 0) { NpyIter_IterNextFunc *iternext; char **dataptr; @@ -586,6 +593,12 @@ goto fail; } } + + /* Check whether any errors occurred during the loop */ + if (PyErr_Occurred() || + _check_ufunc_fperr(errormask, NULL, "reduce") < 0) { + goto fail; + } NpyIter_Deallocate(iter); Py_DECREF(op_view); @@ -598,6 +611,7 @@ } } else { + PyArray_ResolveWritebackIfCopy(result); /* prevent spurious warnings */ Py_DECREF(result); result = out; Py_INCREF(result); @@ -606,6 +620,7 @@ return result; fail: + PyArray_ResolveWritebackIfCopy(result); /* prevent spurious warnings */ Py_XDECREF(result); Py_XDECREF(op_view); if (iter != NULL) { diff -Nru python-numpy-1.13.3/numpy/core/src/umath/reduction.h python-numpy-1.14.5/numpy/core/src/umath/reduction.h --- python-numpy-1.13.3/numpy/core/src/umath/reduction.h 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/umath/reduction.h 2018-06-12 18:28:52.000000000 +0000 @@ -137,6 +137,7 @@ * data : Data which is passed to assign_identity and the inner loop. * buffersize : Buffer size for the iterator. For the default, pass in 0. * funcname : The name of the reduction function, for error messages. + * errormask : forwarded from _get_bufsize_errmask */ NPY_NO_EXPORT PyArrayObject * PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out, @@ -149,6 +150,7 @@ int subok, PyArray_AssignReduceIdentityFunc *assign_identity, PyArray_ReduceLoopFunc *loop, - void *data, npy_intp buffersize, const char *funcname); + void *data, npy_intp buffersize, const char *funcname, + int errormask); #endif diff -Nru python-numpy-1.13.3/numpy/core/src/umath/scalarmath.c.src python-numpy-1.14.5/numpy/core/src/umath/scalarmath.c.src --- python-numpy-1.13.3/numpy/core/src/umath/scalarmath.c.src 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/umath/scalarmath.c.src 2018-06-12 18:28:52.000000000 +0000 @@ -24,6 +24,7 @@ #include "templ_common.h" #include "binop_override.h" +#include "npy_longdouble.h" /* Basic operations: * @@ -550,13 +551,13 @@ /**begin repeat * #name = cfloat, cdouble, clongdouble# * #type = npy_cfloat, npy_cdouble, npy_clongdouble# - * #rname = float, double, longdouble# * #rtype = npy_float, npy_double, npy_longdouble# + * #c = f,,l# */ static void @name@_ctype_absolute(@type@ a, @rtype@ *out) { - *out = _basic_@rname@_sqrt(a.real*a.real + a.imag*a.imag); + *out = npy_cabs@c@(a); } /**end repeat**/ @@ -853,7 +854,7 @@ } #if @fperr@ - PyUFunc_clearfperr(); + npy_clear_floatstatus_barrier((char*)&out); #endif /* @@ -868,7 +869,7 @@ #if @fperr@ /* Check status flag. If it is set, then look up what to do */ - retstatus = PyUFunc_getfperr(); + retstatus = npy_get_floatstatus_barrier((char*)&out); if (retstatus) { int bufsize, errmask; PyObject *errobj; @@ -998,7 +999,7 @@ return Py_NotImplemented; } - PyUFunc_clearfperr(); + npy_clear_floatstatus_barrier((char*)&out); /* * here we do the actual calculation with arg1 and arg2 @@ -1013,7 +1014,7 @@ } /* Check status flag. If it is set, then look up what to do */ - retstatus = PyUFunc_getfperr(); + retstatus = npy_get_floatstatus_barrier((char*)&out); if (retstatus) { int bufsize, errmask; PyObject *errobj; @@ -1077,7 +1078,7 @@ return Py_NotImplemented; } - PyUFunc_clearfperr(); + npy_clear_floatstatus_barrier((char*)&out); /* * here we do the actual calculation with arg1 and arg2 @@ -1141,7 +1142,7 @@ return Py_NotImplemented; } - PyUFunc_clearfperr(); + npy_clear_floatstatus_barrier((char*)&out); /* * here we do the actual calculation with arg1 and arg2 @@ -1155,7 +1156,7 @@ } /* Check status flag. If it is set, then look up what to do */ - retstatus = PyUFunc_getfperr(); + retstatus = npy_get_floatstatus_barrier((char*)&out); if (retstatus) { int bufsize, errmask; PyObject *errobj; @@ -1391,45 +1392,46 @@ * * #cmplx = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1# * #sign = (signed, unsigned)*5, , , , , , , # - * #unsigntyp = 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0*7# - * #ctype = long*8, PY_LONG_LONG*2, double*7# + * #ctype = long*8, PY_LONG_LONG*2, + * double*3, npy_longdouble, double*2, npy_longdouble# * #to_ctype = , , , , , , , , , , npy_half_to_double, , , , , , # - * #realtyp = 0*10, 1*7# * #func = (PyLong_FromLong, PyLong_FromUnsignedLong)*4, * PyLong_FromLongLong, PyLong_FromUnsignedLongLong, - * PyLong_FromDouble*7# + * PyLong_FromDouble*3, npy_longdouble_to_PyLong, + * PyLong_FromDouble*2, npy_longdouble_to_PyLong# */ static PyObject * @name@_int(PyObject *obj) { + PyObject *long_result; + #if @cmplx@ - @sign@ @ctype@ x= @to_ctype@(PyArrayScalar_VAL(obj, @Name@).real); - int ret; + @sign@ @ctype@ x = @to_ctype@(PyArrayScalar_VAL(obj, @Name@).real); #else - @sign@ @ctype@ x= @to_ctype@(PyArrayScalar_VAL(obj, @Name@)); -#endif - -#if @realtyp@ - double ix; - modf(x, &ix); - x = ix; + @sign@ @ctype@ x = @to_ctype@(PyArrayScalar_VAL(obj, @Name@)); #endif #if @cmplx@ - ret = emit_complexwarning(); - if (ret < 0) { + if (emit_complexwarning() < 0) { return NULL; } #endif -#if @unsigntyp@ - if(x < LONG_MAX) - return PyInt_FromLong(x); -#else - if(LONG_MIN < x && x < LONG_MAX) - return PyInt_FromLong(x); + long_result = @func@(x); + if (long_result == NULL){ + return NULL; + } + +#ifndef NPY_PY3K + /* Invoke long.__int__ to try to downcast */ + { + PyObject *before_downcast = long_result; + long_result = Py_TYPE(long_result)->tp_as_number->nb_int(long_result); + Py_DECREF(before_downcast); + } #endif - return @func@(x); + + return long_result; } /**end repeat**/ @@ -1447,18 +1449,18 @@ * #to_ctype = (, , , , , , , , , , npy_half_to_double, , , , , , )*2# * #which = long*17, float*17# * #func = (PyLong_FromLongLong, PyLong_FromUnsignedLongLong)*5, - * PyLong_FromDouble*7, PyFloat_FromDouble*17# + * PyLong_FromDouble*3, npy_longdouble_to_PyLong, + * PyLong_FromDouble*2, npy_longdouble_to_PyLong, + * PyFloat_FromDouble*17# */ static NPY_INLINE PyObject * @name@_@which@(PyObject *obj) { #if @cmplx@ - int ret; - ret = emit_complexwarning(); - if (ret < 0) { + if (emit_complexwarning() < 0) { return NULL; } - return @func@(@to_ctype@((PyArrayScalar_VAL(obj, @Name@)).real)); + return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@).real)); #else return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@))); #endif diff -Nru python-numpy-1.13.3/numpy/core/src/umath/simd.inc.src python-numpy-1.14.5/numpy/core/src/umath/simd.inc.src --- python-numpy-1.13.3/numpy/core/src/umath/simd.inc.src 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/umath/simd.inc.src 2018-06-12 18:28:52.000000000 +0000 @@ -840,7 +840,7 @@ i += 2 * stride; /* minps/minpd will set invalid flag if nan is encountered */ - npy_clear_floatstatus(); + npy_clear_floatstatus_barrier((char*)&c1); LOOP_BLOCKED(@type@, 32) { @vtype@ v1 = @vpre@_load_@vsuf@((@type@*)&ip[i]); @vtype@ v2 = @vpre@_load_@vsuf@((@type@*)&ip[i + stride]); @@ -849,7 +849,7 @@ } c1 = @vpre@_@VOP@_@vsuf@(c1, c2); - if (npy_get_floatstatus() & NPY_FPE_INVALID) { + if (npy_get_floatstatus_barrier((char*)&c1) & NPY_FPE_INVALID) { *op = @nan@; } else { @@ -860,6 +860,9 @@ LOOP_BLOCKED_END { *op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i]; } + if (npy_isnan(*op)) { + npy_set_floatstatus_invalid(); + } } /**end repeat1**/ diff -Nru python-numpy-1.13.3/numpy/core/src/umath/test_rational.c.src python-numpy-1.14.5/numpy/core/src/umath/test_rational.c.src --- python-numpy-1.13.3/numpy/core/src/umath/test_rational.c.src 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/umath/test_rational.c.src 2018-06-12 18:28:52.000000000 +0000 @@ -9,6 +9,9 @@ #include #include +#include "common.h" /* for error_converting */ + + /* Relevant arithmetic exceptions */ /* Uncomment the following line to work around a bug in numpy */ @@ -425,7 +428,7 @@ PyObject* y; int eq; n[i] = PyInt_AsLong(x[i]); - if (n[i]==-1 && PyErr_Occurred()) { + if (error_converting(n[i])) { if (PyErr_ExceptionMatches(PyExc_TypeError)) { PyErr_Format(PyExc_TypeError, "expected integer %s, got %s", @@ -473,7 +476,7 @@ PyObject* y_; \ int eq_; \ long n_ = PyInt_AsLong(object); \ - if (n_==-1 && PyErr_Occurred()) { \ + if (error_converting(n_)) { \ if (PyErr_ExceptionMatches(PyExc_TypeError)) { \ PyErr_Clear(); \ Py_INCREF(Py_NotImplemented); \ @@ -750,7 +753,7 @@ long n = PyInt_AsLong(item); PyObject* y; int eq; - if (n==-1 && PyErr_Occurred()) { + if (error_converting(n)) { return -1; } y = PyInt_FromLong(n); diff -Nru python-numpy-1.13.3/numpy/core/src/umath/ufunc_object.c python-numpy-1.14.5/numpy/core/src/umath/ufunc_object.c --- python-numpy-1.13.3/numpy/core/src/umath/ufunc_object.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/umath/ufunc_object.c 2018-06-12 18:28:52.000000000 +0000 @@ -46,6 +46,8 @@ #include "ufunc_object.h" #include "override.h" #include "npy_import.h" +#include "extobj.h" +#include "common.h" /********** PRINTF DEBUG TRACING **************/ #define NPY_UF_DBG_TRACING 0 @@ -63,21 +65,12 @@ #endif /**********************************************/ - -/********************/ -#define USE_USE_DEFAULTS 1 -/********************/ - /* ---------------------------------------------------------------- */ static int _does_loop_use_arrays(void *data); static int -_extract_pyvals(PyObject *ref, const char *name, int *bufsize, - int *errmask, PyObject **errobj); - -static int assign_reduce_identity_zero(PyArrayObject *result, void *data); static int @@ -87,103 +80,6 @@ assign_reduce_identity_one(PyArrayObject *result, void *data); -/* - * fpstatus is the ufunc_formatted hardware status - * errmask is the handling mask specified by the user. - * errobj is a Python object with (string, callable object or None) - * or NULL - */ - -/* - * 2. for each of the flags - * determine whether to ignore, warn, raise error, or call Python function. - * If ignore, do nothing - * If warn, print a warning and continue - * If raise return an error - * If call, call a user-defined function with string - */ - -static int -_error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *first) -{ - PyObject *pyfunc, *ret, *args; - char *name = PyBytes_AS_STRING(PyTuple_GET_ITEM(errobj,0)); - char msg[100]; - - NPY_ALLOW_C_API_DEF - - /* don't need C API for a simple print */ - if (method == UFUNC_ERR_PRINT) { - if (*first) { - fprintf(stderr, "Warning: %s encountered in %s\n", errtype, name); - *first = 0; - } - return 0; - } - - NPY_ALLOW_C_API; - switch(method) { - case UFUNC_ERR_WARN: - PyOS_snprintf(msg, sizeof(msg), "%s encountered in %s", errtype, name); - if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) { - goto fail; - } - break; - case UFUNC_ERR_RAISE: - PyErr_Format(PyExc_FloatingPointError, "%s encountered in %s", - errtype, name); - goto fail; - case UFUNC_ERR_CALL: - pyfunc = PyTuple_GET_ITEM(errobj, 1); - if (pyfunc == Py_None) { - PyErr_Format(PyExc_NameError, - "python callback specified for %s (in " \ - " %s) but no function found.", - errtype, name); - goto fail; - } - args = Py_BuildValue("NN", PyUString_FromString(errtype), - PyInt_FromLong((long) retstatus)); - if (args == NULL) { - goto fail; - } - ret = PyObject_CallObject(pyfunc, args); - Py_DECREF(args); - if (ret == NULL) { - goto fail; - } - Py_DECREF(ret); - break; - case UFUNC_ERR_LOG: - if (first) { - *first = 0; - pyfunc = PyTuple_GET_ITEM(errobj, 1); - if (pyfunc == Py_None) { - PyErr_Format(PyExc_NameError, - "log specified for %s (in %s) but no " \ - "object with write method found.", - errtype, name); - goto fail; - } - PyOS_snprintf(msg, sizeof(msg), - "Warning: %s encountered in %s\n", errtype, name); - ret = PyObject_CallMethod(pyfunc, "write", "s", msg); - if (ret == NULL) { - goto fail; - } - Py_DECREF(ret); - } - break; - } - NPY_DISABLE_C_API; - return 0; - -fail: - NPY_DISABLE_C_API; - return -1; -} - - /*UFUNC_API*/ NPY_NO_EXPORT int PyUFunc_getfperr(void) @@ -192,7 +88,8 @@ * non-clearing get was only added in 1.9 so this function always cleared * keep it so just in case third party code relied on the clearing */ - return npy_clear_floatstatus(); + char param = 0; + return npy_clear_floatstatus_barrier(¶m); } #define HANDLEIT(NAME, str) {if (retstatus & NPY_FPE_##NAME) { \ @@ -225,7 +122,8 @@ PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first) { /* clearing is done for backward compatibility */ - int retstatus = npy_clear_floatstatus(); + int retstatus; + retstatus = npy_clear_floatstatus_barrier((char*)&retstatus); return PyUFunc_handlefperr(errmask, errobj, retstatus, first); } @@ -236,50 +134,8 @@ NPY_NO_EXPORT void PyUFunc_clearfperr() { - npy_clear_floatstatus(); -} - - -#if USE_USE_DEFAULTS==1 -static int PyUFunc_NUM_NODEFAULTS = 0; -#endif - -static PyObject * -get_global_ext_obj(void) -{ - PyObject *thedict; - PyObject *ref = NULL; - -#if USE_USE_DEFAULTS==1 - if (PyUFunc_NUM_NODEFAULTS != 0) { -#endif - thedict = PyThreadState_GetDict(); - if (thedict == NULL) { - thedict = PyEval_GetBuiltins(); - } - ref = PyDict_GetItem(thedict, npy_um_str_pyvals_name); -#if USE_USE_DEFAULTS==1 - } -#endif - - return ref; -} - - -static int -_get_bufsize_errmask(PyObject * extobj, const char *ufunc_name, - int *buffersize, int *errormask) -{ - /* Get the buffersize and errormask */ - if (extobj == NULL) { - extobj = get_global_ext_obj(); - } - if (_extract_pyvals(extobj, ufunc_name, - buffersize, errormask, NULL) < 0) { - return -1; - } - - return 0; + char param = 0; + npy_clear_floatstatus_barrier(¶m); } /* @@ -426,97 +282,6 @@ return; } -/* - * Extracts some values from the global pyvals tuple. - * all destinations may be NULL, in which case they are not retrieved - * ref - should hold the global tuple - * name - is the name of the ufunc (ufuncobj->name) - * - * bufsize - receives the buffer size to use - * errmask - receives the bitmask for error handling - * errobj - receives the python object to call with the error, - * if an error handling method is 'call' - */ -static int -_extract_pyvals(PyObject *ref, const char *name, int *bufsize, - int *errmask, PyObject **errobj) -{ - PyObject *retval; - - /* default errobj case, skips dictionary lookup */ - if (ref == NULL) { - if (errmask) { - *errmask = UFUNC_ERR_DEFAULT; - } - if (errobj) { - *errobj = Py_BuildValue("NO", PyBytes_FromString(name), Py_None); - } - if (bufsize) { - *bufsize = NPY_BUFSIZE; - } - return 0; - } - - if (!PyList_Check(ref) || (PyList_GET_SIZE(ref)!=3)) { - PyErr_Format(PyExc_TypeError, - "%s must be a length 3 list.", UFUNC_PYVALS_NAME); - return -1; - } - - if (bufsize != NULL) { - *bufsize = PyInt_AsLong(PyList_GET_ITEM(ref, 0)); - if ((*bufsize == -1) && PyErr_Occurred()) { - return -1; - } - if ((*bufsize < NPY_MIN_BUFSIZE) || - (*bufsize > NPY_MAX_BUFSIZE) || - (*bufsize % 16 != 0)) { - PyErr_Format(PyExc_ValueError, - "buffer size (%d) is not in range " - "(%"NPY_INTP_FMT" - %"NPY_INTP_FMT") or not a multiple of 16", - *bufsize, (npy_intp) NPY_MIN_BUFSIZE, - (npy_intp) NPY_MAX_BUFSIZE); - return -1; - } - } - - if (errmask != NULL) { - *errmask = PyInt_AsLong(PyList_GET_ITEM(ref, 1)); - if (*errmask < 0) { - if (PyErr_Occurred()) { - return -1; - } - PyErr_Format(PyExc_ValueError, - "invalid error mask (%d)", - *errmask); - return -1; - } - } - - if (errobj != NULL) { - *errobj = NULL; - retval = PyList_GET_ITEM(ref, 2); - if (retval != Py_None && !PyCallable_Check(retval)) { - PyObject *temp; - temp = PyObject_GetAttrString(retval, "write"); - if (temp == NULL || !PyCallable_Check(temp)) { - PyErr_SetString(PyExc_TypeError, - "python object must be callable or have " \ - "a callable write method"); - Py_XDECREF(temp); - return -1; - } - Py_DECREF(temp); - } - - *errobj = Py_BuildValue("NO", PyBytes_FromString(name), retval); - if (*errobj == NULL) { - return -1; - } - } - return 0; -} - /*UFUNC_API * @@ -761,8 +526,8 @@ * Produce a name for the ufunc, if one is not already set * This is used in the PyUFunc_handlefperr machinery, and in error messages */ -static const char* -_get_ufunc_name(PyUFuncObject *ufunc) { +NPY_NO_EXPORT const char* +ufunc_get_name_cstr(PyUFuncObject *ufunc) { return ufunc->name ? ufunc->name : ""; } @@ -789,7 +554,7 @@ int nout = ufunc->nout; PyObject *obj, *context; PyObject *str_key_obj = NULL; - const char *ufunc_name = _get_ufunc_name(ufunc); + const char *ufunc_name = ufunc_get_name_cstr(ufunc); int type_num; int any_flexible = 0, any_object = 0, any_flexible_userloops = 0; @@ -1415,7 +1180,7 @@ PyUFuncGenericFunction innerloop, void *innerloopdata) { - npy_intp i, nin = ufunc->nin, nout = ufunc->nout; + npy_intp i, iop, nin = ufunc->nin, nout = ufunc->nout; npy_intp nop = nin + nout; npy_uint32 op_flags[NPY_MAXARGS]; NpyIter *iter; @@ -1501,6 +1266,12 @@ /* Call the __array_prepare__ functions for the new array */ if (prepare_ufunc_output(ufunc, &op[nin+i], arr_prep[i], arr_prep_args, i) < 0) { + for(iop = 0; iop < nin+i; ++iop) { + if (op_it[iop] != op[iop]) { + /* ignore errrors */ + PyArray_ResolveWritebackIfCopy(op_it[iop]); + } + } NpyIter_Deallocate(iter); return -1; } @@ -1553,7 +1324,11 @@ NPY_END_THREADS; } - + for(iop = 0; iop < nop; ++iop) { + if (op_it[iop] != op[iop]) { + PyArray_ResolveWritebackIfCopy(op_it[iop]); + } + } NpyIter_Deallocate(iter); return 0; } @@ -1740,7 +1515,7 @@ PyObject **arr_prep, PyObject *arr_prep_args) { - int i, nin = ufunc->nin, nout = ufunc->nout; + int retval, i, nin = ufunc->nin, nout = ufunc->nout; int nop = nin + nout; npy_uint32 op_flags[NPY_MAXARGS]; NpyIter *iter; @@ -1926,8 +1701,16 @@ NPY_AUXDATA_FREE(innerloopdata); } + retval = 0; + nop = NpyIter_GetNOp(iter); + for(i=0; i< nop; ++i) { + if (PyArray_ResolveWritebackIfCopy(NpyIter_GetOperandArray(iter)[i]) < 0) { + retval = -1; + } + } + NpyIter_Deallocate(iter); - return 0; + return retval; } static PyObject * @@ -1969,47 +1752,9 @@ } /* - * check the floating point status - * - errmask: mask of status to check - * - extobj: ufunc pyvals object - * may be null, in which case the thread global one is fetched - * - ufunc_name: name of ufunc - */ -static int -_check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name) { - int fperr; - PyObject *errobj = NULL; - int ret; - int first = 1; - - if (!errmask) { - return 0; - } - fperr = PyUFunc_getfperr(); - if (!fperr) { - return 0; - } - - /* Get error object globals */ - if (extobj == NULL) { - extobj = get_global_ext_obj(); - } - if (_extract_pyvals(extobj, ufunc_name, - NULL, NULL, &errobj) < 0) { - Py_XDECREF(errobj); - return -1; - } - - ret = PyUFunc_handlefperr(errmask, errobj, fperr, &first); - Py_XDECREF(errobj); - - return ret; -} - -/* * Validate the core dimensions of all the operands, and collect all of * the labelled core dimensions into 'core_dim_sizes'. - * + * * Returns 0 on success, and -1 on failure * * The behavior has been changed in NumPy 1.10.0, and the following @@ -2050,7 +1795,7 @@ "%s: %s operand %d does not have enough " "dimensions (has %d, gufunc core with " "signature %s requires %d)", - _get_ufunc_name(ufunc), i < nin ? "Input" : "Output", + ufunc_get_name_cstr(ufunc), i < nin ? "Input" : "Output", i < nin ? i : i - nin, PyArray_NDIM(op[i]), ufunc->core_signature, num_dims); return -1; @@ -2074,7 +1819,7 @@ "core dimension %d, with gufunc " "signature %s (size %zd is different " "from %zd)", - _get_ufunc_name(ufunc), i < nin ? "Input" : "Output", + ufunc_get_name_cstr(ufunc), i < nin ? "Input" : "Output", i < nin ? i : i - nin, idim, ufunc->core_signature, op_dim_size, core_dim_sizes[core_dim_index]); @@ -2117,13 +1862,12 @@ PyErr_Format(PyExc_ValueError, "%s: Output operand %d has core dimension %d " "unspecified, with gufunc signature %s", - _get_ufunc_name(ufunc), out_op, i, ufunc->core_signature); + ufunc_get_name_cstr(ufunc), out_op, i, ufunc->core_signature); return -1; } return 0; } - static int PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds, @@ -2186,7 +1930,7 @@ nout = ufunc->nout; nop = nin + nout; - ufunc_name = _get_ufunc_name(ufunc); + ufunc_name = ufunc_get_name_cstr(ufunc); NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_name); @@ -2488,7 +2232,7 @@ #endif /* Start with the floating-point exception flags cleared */ - PyUFunc_clearfperr(); + npy_clear_floatstatus_barrier((char*)&iter); NPY_UF_DBG_PRINT("Executing inner loop\n"); @@ -2563,6 +2307,11 @@ goto fail; } + /* Write back any temporary data from PyArray_SetWritebackIfCopyBase */ + for(i=nin; i< nop; ++i) + if (PyArray_ResolveWritebackIfCopy(NpyIter_GetOperandArray(iter)[i]) < 0) + goto fail; + PyArray_free(inner_strides); NpyIter_Deallocate(iter); /* The caller takes ownership of all the references in op */ @@ -2648,7 +2397,7 @@ nout = ufunc->nout; nop = nin + nout; - ufunc_name = _get_ufunc_name(ufunc); + ufunc_name = ufunc_get_name_cstr(ufunc); NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_name); @@ -2733,7 +2482,7 @@ } /* Start with the floating-point exception flags cleared */ - PyUFunc_clearfperr(); + npy_clear_floatstatus_barrier((char*)&ufunc); /* Do the ufunc loop */ if (need_fancy) { @@ -2888,7 +2637,7 @@ int i, retcode; PyArrayObject *op[3] = {arr, arr, NULL}; PyArray_Descr *dtypes[3] = {NULL, NULL, NULL}; - const char *ufunc_name = _get_ufunc_name(ufunc); + const char *ufunc_name = ufunc_get_name_cstr(ufunc); PyObject *type_tup = NULL; *out_dtype = NULL; @@ -3077,7 +2826,7 @@ PyArray_Descr *dtype; PyArrayObject *result; PyArray_AssignReduceIdentityFunc *assign_identity = NULL; - const char *ufunc_name = _get_ufunc_name(ufunc); + const char *ufunc_name = ufunc_get_name_cstr(ufunc); /* These parameters come from a TLS global */ int buffersize = 0, errormask = 0; @@ -3160,7 +2909,7 @@ keepdims, 0, assign_identity, reduce_loop, - ufunc, buffersize, ufunc_name); + ufunc, buffersize, ufunc_name, errormask); Py_DECREF(dtype); return result; @@ -3185,7 +2934,7 @@ PyUFuncGenericFunction innerloop = NULL; void *innerloopdata = NULL; - const char *ufunc_name = _get_ufunc_name(ufunc); + const char *ufunc_name = ufunc_get_name_cstr(ufunc); /* These parameters come from extobj= or from a TLS global */ int buffersize = 0, errormask = 0; @@ -3495,6 +3244,9 @@ } finish: + /* Write back any temporary data from PyArray_SetWritebackIfCopyBase */ + if (PyArray_ResolveWritebackIfCopy(op[0]) < 0) + goto fail; Py_XDECREF(op_dtypes[0]); NpyIter_Deallocate(iter); NpyIter_Deallocate(iter_inner); @@ -3552,7 +3304,7 @@ PyUFuncGenericFunction innerloop = NULL; void *innerloopdata = NULL; - const char *ufunc_name = _get_ufunc_name(ufunc); + const char *ufunc_name = ufunc_get_name_cstr(ufunc); char *opname = "reduceat"; /* These parameters come from extobj= or from a TLS global */ @@ -3877,6 +3629,9 @@ } finish: + if (op[0] && PyArray_ResolveWritebackIfCopy(op[0]) < 0) { + goto fail; + } Py_XDECREF(op_dtypes[0]); NpyIter_Deallocate(iter); @@ -3904,18 +3659,17 @@ int i, naxes=0, ndim; int axes[NPY_MAXDIMS]; PyObject *axes_in = NULL; - PyArrayObject *mp, *ret = NULL; + PyArrayObject *mp = NULL, *ret = NULL; PyObject *op, *res = NULL; PyObject *obj_ind, *context; PyArrayObject *indices = NULL; PyArray_Descr *otype = NULL; - PyObject *out_obj = NULL; PyArrayObject *out = NULL; int keepdims = 0; static char *reduce_kwlist[] = { "array", "axis", "dtype", "out", "keepdims", NULL}; static char *accumulate_kwlist[] = { - "array", "axis", "dtype", "out", "keepdims", NULL}; + "array", "axis", "dtype", "out", NULL}; static char *reduceat_kwlist[] = { "array", "indices", "axis", "dtype", "out", NULL}; @@ -3956,46 +3710,32 @@ PyDict_SetItem(kwds, npy_um_str_out, out_obj); } } - + if (operation == UFUNC_REDUCEAT) { PyArray_Descr *indtype; indtype = PyArray_DescrFromType(NPY_INTP); if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|OO&O&:reduceat", reduceat_kwlist, - &op, - &obj_ind, - &axes_in, - PyArray_DescrConverter2, &otype, - PyArray_OutputConverter, &out)) { - Py_XDECREF(otype); - return NULL; + &op, + &obj_ind, + &axes_in, + PyArray_DescrConverter2, &otype, + PyArray_OutputConverter, &out)) { + goto fail; } indices = (PyArrayObject *)PyArray_FromAny(obj_ind, indtype, 1, 1, NPY_ARRAY_CARRAY, NULL); if (indices == NULL) { - Py_XDECREF(otype); - return NULL; + goto fail; } } else if (operation == UFUNC_ACCUMULATE) { - PyObject *bad_keepdimarg = NULL; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&O:accumulate", + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&:accumulate", accumulate_kwlist, &op, &axes_in, PyArray_DescrConverter2, &otype, - PyArray_OutputConverter, &out, - &bad_keepdimarg)) { - Py_XDECREF(otype); - return NULL; - } - /* Until removed outright by https://github.com/numpy/numpy/pull/8187 */ - if (bad_keepdimarg != NULL) { - if (DEPRECATE_FUTUREWARNING( - "keepdims argument has no effect on accumulate, and will be " - "removed in future") < 0) { - Py_XDECREF(otype); - return NULL; - } + PyArray_OutputConverter, &out)) { + goto fail; } } else { @@ -4006,8 +3746,7 @@ PyArray_DescrConverter2, &otype, PyArray_OutputConverter, &out, &keepdims)) { - Py_XDECREF(otype); - return NULL; + goto fail; } } /* Ensure input is an array */ @@ -4020,7 +3759,7 @@ mp = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0, context); Py_XDECREF(context); if (mp == NULL) { - return NULL; + goto fail; } ndim = PyArray_NDIM(mp); @@ -4031,9 +3770,7 @@ PyErr_Format(PyExc_TypeError, "cannot perform %s with flexible type", _reduce_type[operation]); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + goto fail; } /* Convert the 'axis' parameter into a list of axes */ @@ -4053,22 +3790,16 @@ if (naxes < 0 || naxes > NPY_MAXDIMS) { PyErr_SetString(PyExc_ValueError, "too many values for 'axis'"); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + goto fail; } for (i = 0; i < naxes; ++i) { PyObject *tmp = PyTuple_GET_ITEM(axes_in, i); int axis = PyArray_PyIntAsInt(tmp); - if (axis == -1 && PyErr_Occurred()) { - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + if (error_converting(axis)) { + goto fail; } if (check_and_adjust_axis(&axis, ndim) < 0) { - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + goto fail; } axes[i] = (int)axis; } @@ -4077,17 +3808,15 @@ else { int axis = PyArray_PyIntAsInt(axes_in); /* TODO: PyNumber_Index would be good to use here */ - if (axis == -1 && PyErr_Occurred()) { - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + if (error_converting(axis)) { + goto fail; } /* Special case letting axis={0 or -1} slip through for scalars */ if (ndim == 0 && (axis == 0 || axis == -1)) { axis = 0; } else if (check_and_adjust_axis(&axis, ndim) < 0) { - return NULL; + goto fail; } axes[0] = (int)axis; naxes = 1; @@ -4107,9 +3836,7 @@ (naxes == 0 || (naxes == 1 && axes[0] == 0)))) { PyErr_Format(PyExc_TypeError, "cannot %s on a scalar", _reduce_type[operation]); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + goto fail; } } @@ -4155,9 +3882,7 @@ if (naxes != 1) { PyErr_SetString(PyExc_ValueError, "accumulate does not allow multiple axes"); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + goto fail; } ret = (PyArrayObject *)PyUFunc_Accumulate(ufunc, mp, out, axes[0], otype->type_num); @@ -4166,9 +3891,7 @@ if (naxes != 1) { PyErr_SetString(PyExc_ValueError, "reduceat does not allow multiple axes"); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; + goto fail; } ret = (PyArrayObject *)PyUFunc_Reduceat(ufunc, mp, indices, out, axes[0], otype->type_num); @@ -4201,6 +3924,11 @@ } } return PyArray_Return(ret); + +fail: + Py_XDECREF(otype); + Py_XDECREF(mp); + return NULL; } /* @@ -4403,7 +4131,8 @@ } else if (override) { for (i = 0; i < ufunc->nargs; i++) { - PyArray_XDECREF_ERR(mps[i]); + PyArray_DiscardWritebackIfCopy(mps[i]); + Py_XDECREF(mps[i]); } return override; } @@ -4411,7 +4140,8 @@ errval = PyUFunc_GenericFunction(ufunc, args, kwds, mps); if (errval < 0) { for (i = 0; i < ufunc->nargs; i++) { - PyArray_XDECREF_ERR(mps[i]); + PyArray_DiscardWritebackIfCopy(mps[i]); + Py_XDECREF(mps[i]); } if (errval == -1) { return NULL; @@ -4536,39 +4266,6 @@ return res; } -#if USE_USE_DEFAULTS==1 -/* - * This is a strategy to buy a little speed up and avoid the dictionary - * look-up in the default case. It should work in the presence of - * threads. If it is deemed too complicated or it doesn't actually work - * it could be taken out. - */ -static int -ufunc_update_use_defaults(void) -{ - PyObject *errobj = NULL; - int errmask, bufsize; - int res; - - PyUFunc_NUM_NODEFAULTS += 1; - res = PyUFunc_GetPyValues("test", &bufsize, &errmask, &errobj); - PyUFunc_NUM_NODEFAULTS -= 1; - if (res < 0) { - Py_XDECREF(errobj); - return -1; - } - if ((errmask != UFUNC_ERR_DEFAULT) || (bufsize != NPY_BUFSIZE) - || (PyTuple_GET_ITEM(errobj, 1) != Py_None)) { - PyUFunc_NUM_NODEFAULTS += 1; - } - else if (PyUFunc_NUM_NODEFAULTS > 0) { - PyUFunc_NUM_NODEFAULTS -= 1; - } - Py_XDECREF(errobj); - return 0; -} -#endif - NPY_NO_EXPORT PyObject * ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args) { @@ -5537,6 +5234,9 @@ NpyIter_Deallocate(iter_buffer); + if (op1_array != (PyArrayObject*)op1) { + PyArray_ResolveWritebackIfCopy(op1_array); + } Py_XDECREF(op2_array); Py_XDECREF(iter); Py_XDECREF(iter2); @@ -5553,6 +5253,9 @@ fail: + if (op1_array != (PyArrayObject*)op1) { + PyArray_ResolveWritebackIfCopy(op1_array); + } Py_XDECREF(op2_array); Py_XDECREF(iter); Py_XDECREF(iter2); diff -Nru python-numpy-1.13.3/numpy/core/src/umath/ufunc_object.h python-numpy-1.14.5/numpy/core/src/umath/ufunc_object.h --- python-numpy-1.13.3/numpy/core/src/umath/ufunc_object.h 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/umath/ufunc_object.h 2018-06-12 17:31:56.000000000 +0000 @@ -7,6 +7,9 @@ NPY_NO_EXPORT PyObject * ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args); +NPY_NO_EXPORT const char* +ufunc_get_name_cstr(PyUFuncObject *ufunc); + /* interned strings (on umath import) */ NPY_VISIBILITY_HIDDEN extern PyObject * npy_um_str_out; NPY_VISIBILITY_HIDDEN extern PyObject * npy_um_str_subok; diff -Nru python-numpy-1.13.3/numpy/core/src/umath/ufunc_type_resolution.c python-numpy-1.14.5/numpy/core/src/umath/ufunc_type_resolution.c --- python-numpy-1.13.3/numpy/core/src/umath/ufunc_type_resolution.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/src/umath/ufunc_type_resolution.c 2018-06-12 17:31:56.000000000 +0000 @@ -21,6 +21,7 @@ #include "numpy/ufuncobject.h" #include "ufunc_type_resolution.h" +#include "ufunc_object.h" #include "common.h" static const char * @@ -56,9 +57,7 @@ PyArray_Descr **dtypes) { int i, nin = ufunc->nin, nop = nin + ufunc->nout; - const char *ufunc_name; - - ufunc_name = ufunc->name ? ufunc->name : ""; + const char *ufunc_name = ufunc_get_name_cstr(ufunc); for (i = 0; i < nop; ++i) { if (i < nin) { @@ -184,9 +183,7 @@ PyArray_Descr **out_dtypes) { int i, type_num1, type_num2; - const char *ufunc_name; - - ufunc_name = ufunc->name ? ufunc->name : ""; + const char *ufunc_name = ufunc_get_name_cstr(ufunc); if (ufunc->nin != 2 || ufunc->nout != 1) { PyErr_Format(PyExc_RuntimeError, "ufunc %s is configured " @@ -290,9 +287,7 @@ PyArray_Descr **out_dtypes) { int i, type_num1; - const char *ufunc_name; - - ufunc_name = ufunc->name ? ufunc->name : ""; + const char *ufunc_name = ufunc_get_name_cstr(ufunc); if (ufunc->nin != 1 || ufunc->nout != 1) { PyErr_Format(PyExc_RuntimeError, "ufunc %s is configured " @@ -430,9 +425,7 @@ PyArray_Descr **out_dtypes) { int i, type_num1, type_num2; - const char *ufunc_name; - - ufunc_name = ufunc->name ? ufunc->name : ""; + const char *ufunc_name = ufunc_get_name_cstr(ufunc); if (ufunc->nin != 2 || ufunc->nout != 1) { PyErr_Format(PyExc_RuntimeError, "ufunc %s is configured " @@ -551,7 +544,7 @@ PyArray_Descr **out_dtypes) { if (!PyTypeNum_ISDATETIME(PyArray_DESCR(operands[0])->type_num)) { - PyErr_SetString(PyExc_ValueError, + PyErr_SetString(PyExc_TypeError, "ufunc 'isnat' is only defined for datetime and timedelta."); return -1; } @@ -614,9 +607,7 @@ { int type_num1, type_num2; int i; - const char *ufunc_name; - - ufunc_name = ufunc->name ? ufunc->name : ""; + const char *ufunc_name = ufunc_get_name_cstr(ufunc); type_num1 = PyArray_DESCR(operands[0])->type_num; type_num2 = PyArray_DESCR(operands[1])->type_num; @@ -804,9 +795,7 @@ { int type_num1, type_num2; int i; - const char *ufunc_name; - - ufunc_name = ufunc->name ? ufunc->name : ""; + const char *ufunc_name = ufunc_get_name_cstr(ufunc); type_num1 = PyArray_DESCR(operands[0])->type_num; type_num2 = PyArray_DESCR(operands[1])->type_num; @@ -822,12 +811,11 @@ /* The type resolver would have upcast already */ if (out_dtypes[0]->type_num == NPY_BOOL) { - /* 2013-12-05, 1.9 */ - if (DEPRECATE("numpy boolean subtract, the `-` operator, is " - "deprecated, use the bitwise_xor, the `^` operator, " - "or the logical_xor function instead.") < 0) { - return -1; - } + PyErr_Format(PyExc_TypeError, + "numpy boolean subtract, the `-` operator, is deprecated, " + "use the bitwise_xor, the `^` operator, or the logical_xor " + "function instead."); + return -1; } return ret; } @@ -987,9 +975,7 @@ { int type_num1, type_num2; int i; - const char *ufunc_name; - - ufunc_name = ufunc->name ? ufunc->name : ""; + const char *ufunc_name = ufunc_get_name_cstr(ufunc); type_num1 = PyArray_DESCR(operands[0])->type_num; type_num2 = PyArray_DESCR(operands[1])->type_num; @@ -1131,9 +1117,7 @@ { int type_num1, type_num2; int i; - const char *ufunc_name; - - ufunc_name = ufunc->name ? ufunc->name : ""; + const char *ufunc_name = ufunc_get_name_cstr(ufunc); type_num1 = PyArray_DESCR(operands[0])->type_num; type_num2 = PyArray_DESCR(operands[1])->type_num; @@ -1385,7 +1369,7 @@ PyObject *errmsg; int i, j; - ufunc_name = ufunc->name ? ufunc->name : "(unknown)"; + ufunc_name = ufunc_get_name_cstr(ufunc); /* * If there are user-loops search them first. @@ -1875,7 +1859,7 @@ "matching the type-tuple, " "but the inputs and/or outputs could not be " "cast according to the casting rule", - self->name ? self->name : "(unknown)"); + ufunc_get_name_cstr(self)); return -1; /* Error */ case -1: @@ -1979,7 +1963,7 @@ /* For making a better error message on coercion error */ char err_dst_typecode = '-', err_src_typecode = '-'; - ufunc_name = self->name ? self->name : "(unknown)"; + ufunc_name = ufunc_get_name_cstr(self); use_min_scalar = should_use_min_scalar(op, nin); @@ -2088,7 +2072,7 @@ /* For making a better error message on coercion error */ char err_dst_typecode = '-', err_src_typecode = '-'; - ufunc_name = self->name ? self->name : "(unknown)"; + ufunc_name = ufunc_get_name_cstr(self); use_min_scalar = should_use_min_scalar(op, nin); @@ -2100,7 +2084,7 @@ PyErr_Format(PyExc_ValueError, "a type-tuple must be specified " "of length 1 or %d for ufunc '%s'", (int)nop, - self->name ? self->name : "(unknown)"); + ufunc_get_name_cstr(self)); return -1; } @@ -2153,7 +2137,7 @@ "requires 1 typecode, or " "%d typecode(s) before " \ "and %d after the -> sign", - self->name ? self->name : "(unknown)", + ufunc_get_name_cstr(self), self->nin, self->nout); Py_XDECREF(str_obj); return -1; diff -Nru python-numpy-1.13.3/numpy/core/tests/test_abc.py python-numpy-1.14.5/numpy/core/tests/test_abc.py --- python-numpy-1.13.3/numpy/core/tests/test_abc.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_abc.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,43 +1,56 @@ from __future__ import division, absolute_import, print_function -from numpy.testing import TestCase, assert_, run_module_suite +from numpy.testing import assert_, run_module_suite import numbers + +import numpy as np from numpy.core.numerictypes import sctypes -class ABC(TestCase): +class TestABC(object): + def test_abstract(self): + assert_(issubclass(np.number, numbers.Number)) + + assert_(issubclass(np.inexact, numbers.Complex)) + assert_(issubclass(np.complexfloating, numbers.Complex)) + assert_(issubclass(np.floating, numbers.Real)) + + assert_(issubclass(np.integer, numbers.Integral)) + assert_(issubclass(np.signedinteger, numbers.Integral)) + assert_(issubclass(np.unsignedinteger, numbers.Integral)) + def test_floats(self): for t in sctypes['float']: - assert_(isinstance(t(), numbers.Real), + assert_(isinstance(t(), numbers.Real), "{0} is not instance of Real".format(t.__name__)) assert_(issubclass(t, numbers.Real), "{0} is not subclass of Real".format(t.__name__)) - assert_(not isinstance(t(), numbers.Rational), + assert_(not isinstance(t(), numbers.Rational), "{0} is instance of Rational".format(t.__name__)) assert_(not issubclass(t, numbers.Rational), "{0} is subclass of Rational".format(t.__name__)) def test_complex(self): for t in sctypes['complex']: - assert_(isinstance(t(), numbers.Complex), + assert_(isinstance(t(), numbers.Complex), "{0} is not instance of Complex".format(t.__name__)) assert_(issubclass(t, numbers.Complex), "{0} is not subclass of Complex".format(t.__name__)) - assert_(not isinstance(t(), numbers.Real), + assert_(not isinstance(t(), numbers.Real), "{0} is instance of Real".format(t.__name__)) assert_(not issubclass(t, numbers.Real), "{0} is subclass of Real".format(t.__name__)) def test_int(self): for t in sctypes['int']: - assert_(isinstance(t(), numbers.Integral), + assert_(isinstance(t(), numbers.Integral), "{0} is not instance of Integral".format(t.__name__)) assert_(issubclass(t, numbers.Integral), "{0} is not subclass of Integral".format(t.__name__)) def test_uint(self): for t in sctypes['uint']: - assert_(isinstance(t(), numbers.Integral), + assert_(isinstance(t(), numbers.Integral), "{0} is not instance of Integral".format(t.__name__)) assert_(issubclass(t, numbers.Integral), "{0} is not subclass of Integral".format(t.__name__)) diff -Nru python-numpy-1.13.3/numpy/core/tests/test_arrayprint.py python-numpy-1.14.5/numpy/core/tests/test_arrayprint.py --- python-numpy-1.13.3/numpy/core/tests/test_arrayprint.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_arrayprint.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,17 +1,18 @@ # -*- coding: utf-8 -*- from __future__ import division, absolute_import, print_function -import sys +import sys, gc import numpy as np from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal + run_module_suite, assert_, assert_equal, assert_raises, assert_warns, dec ) +import textwrap class TestArrayRepr(object): def test_nan_inf(self): x = np.array([np.nan, np.inf]) - assert_equal(repr(x), 'array([ nan, inf])') + assert_equal(repr(x), 'array([nan, inf])') def test_subclass(self): class sub(np.ndarray): pass @@ -27,13 +28,94 @@ ' [3, 4]])') # two dimensional with flexible dtype - xstruct = np.ones((2,2), dtype=[('a', 'i4')]).view(sub) + xstruct = np.ones((2,2), dtype=[('a', ' 1) + y = sub(None) + x[()] = y + y[()] = x + assert_equal(repr(x), + 'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)') + assert_equal(str(x), '...') + + # nested 0d-subclass-object + x = sub(None) + x[()] = sub(None) + assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)') + assert_equal(str(x), 'None') + + # gh-10663 + class DuckCounter(np.ndarray): + def __getitem__(self, item): + result = super(DuckCounter, self).__getitem__(item) + if not isinstance(result, DuckCounter): + result = result[...].view(DuckCounter) + return result + + def to_string(self): + return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many') + + def __str__(self): + if self.shape == (): + return self.to_string() + else: + fmt = {'all': lambda x: x.to_string()} + return np.array2string(self, formatter=fmt) + + dc = np.arange(5).view(DuckCounter) + assert_equal(str(dc), "[zero one two many many]") + assert_equal(str(dc[0]), "zero") + def test_self_containing(self): arr0d = np.array(None) arr0d[()] = arr0d @@ -60,66 +142,71 @@ assert_equal(repr(arr1d), 'array([list([1, 2]), list([3])], dtype=object)') + def test_void_scalar_recursion(self): + # gh-9345 + repr(np.void(b'test')) # RecursionError ? + + def test_fieldless_structured(self): + # gh-10366 + no_fields = np.dtype([]) + arr_no_fields = np.empty(4, dtype=no_fields) + assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])') + -class TestComplexArray(TestCase): +class TestComplexArray(object): def test_str(self): rvals = [0, 1, -1, np.inf, -np.inf, np.nan] cvals = [complex(rp, ip) for rp in rvals for ip in rvals] dtypes = [np.complex64, np.cdouble, np.clongdouble] actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes] wanted = [ - '[ 0.+0.j]', '[ 0.+0.j]', '[ 0.0+0.0j]', - '[ 0.+1.j]', '[ 0.+1.j]', '[ 0.0+1.0j]', - '[ 0.-1.j]', '[ 0.-1.j]', '[ 0.0-1.0j]', - '[ 0.+infj]', '[ 0.+infj]', '[ 0.0+infj]', - '[ 0.-infj]', '[ 0.-infj]', '[ 0.0-infj]', - '[ 0.+nanj]', '[ 0.+nanj]', '[ 0.0+nanj]', - '[ 1.+0.j]', '[ 1.+0.j]', '[ 1.0+0.0j]', - '[ 1.+1.j]', '[ 1.+1.j]', '[ 1.0+1.0j]', - '[ 1.-1.j]', '[ 1.-1.j]', '[ 1.0-1.0j]', - '[ 1.+infj]', '[ 1.+infj]', '[ 1.0+infj]', - '[ 1.-infj]', '[ 1.-infj]', '[ 1.0-infj]', - '[ 1.+nanj]', '[ 1.+nanj]', '[ 1.0+nanj]', - '[-1.+0.j]', '[-1.+0.j]', '[-1.0+0.0j]', - '[-1.+1.j]', '[-1.+1.j]', '[-1.0+1.0j]', - '[-1.-1.j]', '[-1.-1.j]', '[-1.0-1.0j]', - '[-1.+infj]', '[-1.+infj]', '[-1.0+infj]', - '[-1.-infj]', '[-1.-infj]', '[-1.0-infj]', - '[-1.+nanj]', '[-1.+nanj]', '[-1.0+nanj]', - '[ inf+0.j]', '[ inf+0.j]', '[ inf+0.0j]', - '[ inf+1.j]', '[ inf+1.j]', '[ inf+1.0j]', - '[ inf-1.j]', '[ inf-1.j]', '[ inf-1.0j]', - '[ inf+infj]', '[ inf+infj]', '[ inf+infj]', - '[ inf-infj]', '[ inf-infj]', '[ inf-infj]', - '[ inf+nanj]', '[ inf+nanj]', '[ inf+nanj]', - '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.0j]', - '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.0j]', - '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.0j]', - '[-inf+infj]', '[-inf+infj]', '[-inf+infj]', - '[-inf-infj]', '[-inf-infj]', '[-inf-infj]', - '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]', - '[ nan+0.j]', '[ nan+0.j]', '[ nan+0.0j]', - '[ nan+1.j]', '[ nan+1.j]', '[ nan+1.0j]', - '[ nan-1.j]', '[ nan-1.j]', '[ nan-1.0j]', - '[ nan+infj]', '[ nan+infj]', '[ nan+infj]', - '[ nan-infj]', '[ nan-infj]', '[ nan-infj]', - '[ nan+nanj]', '[ nan+nanj]', '[ nan+nanj]'] + '[0.+0.j]', '[0.+0.j]', '[0.+0.j]', + '[0.+1.j]', '[0.+1.j]', '[0.+1.j]', + '[0.-1.j]', '[0.-1.j]', '[0.-1.j]', + '[0.+infj]', '[0.+infj]', '[0.+infj]', + '[0.-infj]', '[0.-infj]', '[0.-infj]', + '[0.+nanj]', '[0.+nanj]', '[0.+nanj]', + '[1.+0.j]', '[1.+0.j]', '[1.+0.j]', + '[1.+1.j]', '[1.+1.j]', '[1.+1.j]', + '[1.-1.j]', '[1.-1.j]', '[1.-1.j]', + '[1.+infj]', '[1.+infj]', '[1.+infj]', + '[1.-infj]', '[1.-infj]', '[1.-infj]', + '[1.+nanj]', '[1.+nanj]', '[1.+nanj]', + '[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]', + '[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]', + '[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]', + '[-1.+infj]', '[-1.+infj]', '[-1.+infj]', + '[-1.-infj]', '[-1.-infj]', '[-1.-infj]', + '[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]', + '[inf+0.j]', '[inf+0.j]', '[inf+0.j]', + '[inf+1.j]', '[inf+1.j]', '[inf+1.j]', + '[inf-1.j]', '[inf-1.j]', '[inf-1.j]', + '[inf+infj]', '[inf+infj]', '[inf+infj]', + '[inf-infj]', '[inf-infj]', '[inf-infj]', + '[inf+nanj]', '[inf+nanj]', '[inf+nanj]', + '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]', + '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]', + '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]', + '[-inf+infj]', '[-inf+infj]', '[-inf+infj]', + '[-inf-infj]', '[-inf-infj]', '[-inf-infj]', + '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]', + '[nan+0.j]', '[nan+0.j]', '[nan+0.j]', + '[nan+1.j]', '[nan+1.j]', '[nan+1.j]', + '[nan-1.j]', '[nan-1.j]', '[nan-1.j]', + '[nan+infj]', '[nan+infj]', '[nan+infj]', + '[nan-infj]', '[nan-infj]', '[nan-infj]', + '[nan+nanj]', '[nan+nanj]', '[nan+nanj]'] for res, val in zip(actual, wanted): - assert_(res == val) + assert_equal(res, val) -class TestArray2String(TestCase): +class TestArray2String(object): def test_basic(self): """Basic test of array2string.""" a = np.arange(3) assert_(np.array2string(a) == '[0 1 2]') - assert_(np.array2string(a, max_line_width=4) == '[0 1\n 2]') - - def test_style_keyword(self): - """This should only apply to 0-D arrays. See #1218.""" - stylestr = np.array2string(np.array(1.5), - style=lambda x: "Value in 0-D array: " + str(x)) - assert_(stylestr == 'Value in 0-D array: 1.5') + assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]') + assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]') def test_format_function(self): """Test custom format function for each element in array.""" @@ -159,20 +246,53 @@ assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == '[abcabc defdef]') + # check for backcompat that using FloatFormat works and emits warning + with assert_warns(DeprecationWarning): + fmt = np.core.arrayprint.FloatFormat(x, 9, 'maxprec', False) + assert_equal(np.array2string(x, formatter={'float_kind': fmt}), + '[0. 1. 2.]') + def test_structure_format(self): dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) assert_equal(np.array2string(x), - "[('Sarah', [ 8., 7.]) ('John', [ 6., 7.])]") + "[('Sarah', [8., 7.]) ('John', [6., 7.])]") - # for issue #5692 - A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) + np.set_printoptions(legacy='1.13') + try: + # for issue #5692 + A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) + A[5:].fill(np.datetime64('NaT')) + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',) + ('NaT',) ('NaT',) ('NaT',)]""") + ) + finally: + np.set_printoptions(legacy=False) + + # same again, but with non-legacy behavior + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',)]""") + ) + + # and again, with timedeltas + A = np.full(10, 123456, dtype=[("A", "m8[s]")]) A[5:].fill(np.datetime64('NaT')) - assert_equal(np.array2string(A), - "[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) " + - "('1970-01-01T00:00:00',)\n ('1970-01-01T00:00:00',) " + - "('1970-01-01T00:00:00',) ('NaT',) ('NaT',)\n " + - "('NaT',) ('NaT',) ('NaT',)]") + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) ( 'NaT',)]""") + ) # See #8160 struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)]) @@ -186,31 +306,123 @@ # See #8172 array_scalar = np.array( (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8')) - assert_equal(np.array2string(array_scalar), "( 1., 2.12345679, 3.)") + assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)") + def test_unstructured_void_repr(self): + a = np.array([27, 91, 50, 75, 7, 65, 10, 8, + 27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8') + assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')") + assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'") + assert_equal(repr(a), + r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n" + r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')") + + assert_equal(eval(repr(a), vars(np)), a) + assert_equal(eval(repr(a[0]), vars(np)), a[0]) + + def test_edgeitems_kwarg(self): + # previously the global print options would be taken over the kwarg + arr = np.zeros(3, int) + assert_equal( + np.array2string(arr, edgeitems=1, threshold=0), + "[0 ... 0]" + ) + + def test_summarize_1d(self): + A = np.arange(1001) + strA = '[ 0 1 2 ... 998 999 1000]' + assert_equal(str(A), strA) + + reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' + assert_equal(repr(A), reprA) + + def test_summarize_2d(self): + A = np.arange(1002).reshape(2, 501) + strA = '[[ 0 1 2 ... 498 499 500]\n' \ + ' [ 501 502 503 ... 999 1000 1001]]' + assert_equal(str(A), strA) + + reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ + ' [ 501, 502, 503, ..., 999, 1000, 1001]])' + assert_equal(repr(A), reprA) + + def test_linewidth(self): + a = np.full(6, 1) + + def make_str(a, width, **kw): + return np.array2string(a, separator="", max_line_width=width, **kw) + + assert_equal(make_str(a, 8, legacy='1.13'), '[111111]') + assert_equal(make_str(a, 7, legacy='1.13'), '[111111]') + assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n' + ' 11]') + + assert_equal(make_str(a, 8), '[111111]') + assert_equal(make_str(a, 7), '[11111\n' + ' 1]') + assert_equal(make_str(a, 5), '[111\n' + ' 111]') + + b = a[None,None,:] + + assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]') + assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]') + assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n' + ' 1]]]') + + assert_equal(make_str(b, 12), '[[[111111]]]') + assert_equal(make_str(b, 9), '[[[111\n' + ' 111]]]') + assert_equal(make_str(b, 8), '[[[11\n' + ' 11\n' + ' 11]]]') + + def test_wide_element(self): + a = np.array(['xxxxx']) + assert_equal( + np.array2string(a, max_line_width=5), + "['xxxxx']" + ) + assert_equal( + np.array2string(a, max_line_width=5, legacy='1.13'), + "[ 'xxxxx']" + ) + + def test_refcount(self): + # make sure we do not hold references to the array due to a recursive + # closure (gh-10620) + gc.disable() + a = np.arange(2) + r1 = sys.getrefcount(a) + np.array2string(a) + np.array2string(a) + r2 = sys.getrefcount(a) + gc.collect() + gc.enable() + assert_(r1 == r2) -class TestPrintOptions: +class TestPrintOptions(object): """Test getting and setting global print options.""" - def setUp(self): + def setup(self): self.oldopts = np.get_printoptions() - def tearDown(self): + def teardown(self): np.set_printoptions(**self.oldopts) def test_basic(self): x = np.array([1.5, 0, 1.234567890]) - assert_equal(repr(x), "array([ 1.5 , 0. , 1.23456789])") + assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])") np.set_printoptions(precision=4) - assert_equal(repr(x), "array([ 1.5 , 0. , 1.2346])") + assert_equal(repr(x), "array([1.5 , 0. , 1.2346])") def test_precision_zero(self): np.set_printoptions(precision=0) for values, string in ( - ([0.], " 0."), ([.3], " 0."), ([-.3], "-0."), ([.7], " 1."), - ([1.5], " 2."), ([-1.5], "-2."), ([-15.34], "-15."), - ([100.], " 100."), ([.2, -1, 122.51], " 0., -1., 123."), - ([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], " 0.-1.j")): + ([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."), + ([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."), + ([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."), + ([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")): x = np.array(values) assert_equal(repr(x), "array([%s])" % string) @@ -240,7 +452,383 @@ np.set_printoptions(formatter={'float':lambda x: str(x-1)}) assert_equal(repr(x), "array([-1.0, 0.0, 1.0])") np.set_printoptions(formatter={'float_kind':None}) - assert_equal(repr(x), "array([ 0., 1., 2.])") + assert_equal(repr(x), "array([0., 1., 2.])") + + def test_0d_arrays(self): + unicode = type(u'') + + assert_equal(unicode(np.array(u'café', '= 3: + assert_equal(repr(np.array('café', '= 3 else '|S4' + assert_equal(repr(np.ones(3, dtype=styp)), + "array(['1', '1', '1'], dtype='{}')".format(styp)) + assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\ + array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'], + dtype='{}')""".format(styp))) + + def test_linewidth_repr(self): + a = np.full(7, fill_value=2) + np.set_printoptions(linewidth=17) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, + 2])""") + ) + np.set_printoptions(linewidth=17, legacy='1.13') + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, 2])""") + ) + + a = np.full(8, fill_value=2) + + np.set_printoptions(linewidth=18, legacy=False) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, + 2, 2])""") + ) + + np.set_printoptions(linewidth=18, legacy='1.13') + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, 2, + 2, 2, 2, 2])""") + ) + + def test_linewidth_str(self): + a = np.full(18, fill_value=2) + np.set_printoptions(linewidth=18) + assert_equal( + str(a), + textwrap.dedent("""\ + [2 2 2 2 2 2 2 2 + 2 2 2 2 2 2 2 2 + 2 2]""") + ) + np.set_printoptions(linewidth=18, legacy='1.13') + assert_equal( + str(a), + textwrap.dedent("""\ + [2 2 2 2 2 2 2 2 2 + 2 2 2 2 2 2 2 2 2]""") + ) + + def test_edgeitems(self): + np.set_printoptions(edgeitems=1, threshold=1) + a = np.arange(27).reshape((3, 3, 3)) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([[[ 0, ..., 2], + ..., + [ 6, ..., 8]], + + ..., + + [[18, ..., 20], + ..., + [24, ..., 26]]])""") + ) + + b = np.zeros((3, 3, 1, 1)) + assert_equal( + repr(b), + textwrap.dedent("""\ + array([[[[0.]], + + ..., + + [[0.]]], + + + ..., + + + [[[0.]], + + ..., + + [[0.]]]])""") + ) + + # 1.13 had extra trailing spaces, and was missing newlines + np.set_printoptions(legacy='1.13') + + assert_equal( + repr(a), + textwrap.dedent("""\ + array([[[ 0, ..., 2], + ..., + [ 6, ..., 8]], + + ..., + [[18, ..., 20], + ..., + [24, ..., 26]]])""") + ) + + assert_equal( + repr(b), + textwrap.dedent("""\ + array([[[[ 0.]], + + ..., + [[ 0.]]], + + + ..., + [[[ 0.]], + + ..., + [[ 0.]]]])""") + ) + def test_unicode_object_array(): import sys diff -Nru python-numpy-1.13.3/numpy/core/tests/test_datetime.py python-numpy-1.14.5/numpy/core/tests/test_datetime.py --- python-numpy-1.13.3/numpy/core/tests/test_datetime.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_datetime.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,7 +6,7 @@ import numpy as np import datetime from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal, assert_raises, + run_module_suite, assert_, assert_equal, assert_raises, assert_warns, dec, suppress_warnings ) @@ -18,7 +18,7 @@ _has_pytz = False -class TestDateTime(TestCase): +class TestDateTime(object): def test_datetime_dtype_creation(self): for unit in ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', @@ -518,31 +518,38 @@ def test_datetime_string_conversion(self): a = ['2011-03-16', '1920-01-01', '2013-05-19'] str_a = np.array(a, dtype='S') + uni_a = np.array(a, dtype='U') dt_a = np.array(a, dtype='M') - str_b = np.empty_like(str_a) - dt_b = np.empty_like(dt_a) # String to datetime assert_equal(dt_a, str_a.astype('M')) assert_equal(dt_a.dtype, str_a.astype('M').dtype) + dt_b = np.empty_like(dt_a) dt_b[...] = str_a assert_equal(dt_a, dt_b) + # Datetime to string assert_equal(str_a, dt_a.astype('S0')) + str_b = np.empty_like(str_a) str_b[...] = dt_a assert_equal(str_a, str_b) - # Convert the 'S' to 'U' - str_a = str_a.astype('U') - str_b = str_b.astype('U') - # Unicode to datetime - assert_equal(dt_a, str_a.astype('M')) - assert_equal(dt_a.dtype, str_a.astype('M').dtype) - dt_b[...] = str_a + assert_equal(dt_a, uni_a.astype('M')) + assert_equal(dt_a.dtype, uni_a.astype('M').dtype) + dt_b = np.empty_like(dt_a) + dt_b[...] = uni_a assert_equal(dt_a, dt_b) + # Datetime to unicode - assert_equal(str_a, dt_a.astype('U')) + assert_equal(uni_a, dt_a.astype('U')) + uni_b = np.empty_like(uni_a) + uni_b[...] = dt_a + assert_equal(uni_a, uni_b) + + # Datetime to long string - gh-9712 + assert_equal(str_a, dt_a.astype((np.string_, 128))) + str_b = np.empty(str_a.shape, dtype=(np.string_, 128)) str_b[...] = dt_a assert_equal(str_a, str_b) @@ -558,7 +565,7 @@ # Check that one NaT doesn't corrupt subsequent entries a = np.array(['2010', 'NaT', '2030']).astype('M') - assert_equal(str(a), "['2010' 'NaT' '2030']") + assert_equal(str(a), "['2010' 'NaT' '2030']") def test_timedelta_array_str(self): a = np.array([-1, 0, 100], dtype='m') @@ -1131,7 +1138,19 @@ assert_(np.not_equal(dt_other, dt_nat)) assert_(np.not_equal(td_nat, td_other)) assert_(np.not_equal(td_other, td_nat)) - self.assertEqual(len(sup.log), 0) + assert_equal(len(sup.log), 0) + + def test_datetime_futurewarning_once_nat(self): + # Test that the futurewarning is only given once per inner loop + arr1 = np.array(['NaT', 'NaT', '2000-01-01'] * 2, dtype='M8[s]') + arr2 = np.array(['NaT', '2000-01-01', 'NaT'] * 2, dtype='M8[s]') + # All except less, because for less it can't be wrong (NaT is min) + for op in [np.equal, np.less, np.less_equal, + np.greater, np.greater_equal]: + with suppress_warnings() as sup: + rec = sup.record(FutureWarning, ".*NAT") + op(arr1, arr2) + assert_(len(rec) == 1, "failed for {}".format(op)) def test_datetime_minmax(self): # The metadata of the result should become the GCD @@ -1227,10 +1246,10 @@ def test_divisor_conversion_fs(self): assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]')) - self.assertRaises(ValueError, lambda: np.dtype('M8[3fs/10000]')) + assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]')) def test_divisor_conversion_as(self): - self.assertRaises(ValueError, lambda: np.dtype('M8[as/10]')) + assert_raises(ValueError, lambda: np.dtype('M8[as/10]')) def test_string_parser_variants(self): # Allow space instead of 'T' between date and time @@ -1944,10 +1963,10 @@ for t in np.typecodes["All"]: if t in np.typecodes["Datetime"]: continue - assert_raises(ValueError, np.isnat, np.zeros(10, t)) + assert_raises(TypeError, np.isnat, np.zeros(10, t)) -class TestDateTimeData(TestCase): +class TestDateTimeData(object): def test_basic(self): a = np.array(['1980-03-23'], dtype=np.datetime64) diff -Nru python-numpy-1.13.3/numpy/core/tests/test_defchararray.py python-numpy-1.14.5/numpy/core/tests/test_defchararray.py --- python-numpy-1.13.3/numpy/core/tests/test_defchararray.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_defchararray.py 2018-06-12 18:28:52.000000000 +0000 @@ -5,14 +5,14 @@ import numpy as np from numpy.core.multiarray import _vec_string from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal, assert_array_equal, + run_module_suite, assert_, assert_equal, assert_array_equal, assert_raises, suppress_warnings, ) kw_unicode_true = {'unicode': True} # make 2to3 work properly kw_unicode_false = {'unicode': False} -class TestBasic(TestCase): +class TestBasic(object): def test_from_object_array(self): A = np.array([['abc', 2], ['long ', '0123456789']], dtype='O') @@ -24,7 +24,7 @@ def test_from_object_array_unicode(self): A = np.array([['abc', u'Sigma \u03a3'], ['long ', '0123456789']], dtype='O') - self.assertRaises(ValueError, np.char.array, (A,)) + assert_raises(ValueError, np.char.array, (A,)) B = np.char.array(A, **kw_unicode_true) assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize) assert_array_equal(B, [['abc', u'Sigma \u03a3'], @@ -63,7 +63,7 @@ def fail(): np.char.array(A, **kw_unicode_false) - self.assertRaises(UnicodeEncodeError, fail) + assert_raises(UnicodeEncodeError, fail) def test_unicode_upconvert(self): A = np.char.array(['abc']) @@ -83,59 +83,59 @@ assert_equal(A.itemsize, 4) assert_(issubclass(A.dtype.type, np.unicode_)) -class TestVecString(TestCase): +class TestVecString(object): def test_non_existent_method(self): def fail(): _vec_string('a', np.string_, 'bogus') - self.assertRaises(AttributeError, fail) + assert_raises(AttributeError, fail) def test_non_string_array(self): def fail(): _vec_string(1, np.string_, 'strip') - self.assertRaises(TypeError, fail) + assert_raises(TypeError, fail) def test_invalid_args_tuple(self): def fail(): _vec_string(['a'], np.string_, 'strip', 1) - self.assertRaises(TypeError, fail) + assert_raises(TypeError, fail) def test_invalid_type_descr(self): def fail(): _vec_string(['a'], 'BOGUS', 'strip') - self.assertRaises(TypeError, fail) + assert_raises(TypeError, fail) def test_invalid_function_args(self): def fail(): _vec_string(['a'], np.string_, 'strip', (1,)) - self.assertRaises(TypeError, fail) + assert_raises(TypeError, fail) def test_invalid_result_type(self): def fail(): _vec_string(['a'], np.integer, 'strip') - self.assertRaises(TypeError, fail) + assert_raises(TypeError, fail) def test_broadcast_error(self): def fail(): _vec_string([['abc', 'def']], np.integer, 'find', (['a', 'd', 'j'],)) - self.assertRaises(ValueError, fail) + assert_raises(ValueError, fail) -class TestWhitespace(TestCase): - def setUp(self): +class TestWhitespace(object): + def setup(self): self.A = np.array([['abc ', '123 '], ['789 ', 'xyz ']]).view(np.chararray) self.B = np.array([['abc', '123'], @@ -149,16 +149,16 @@ assert_(not np.any(self.A < self.B)) assert_(not np.any(self.A != self.B)) -class TestChar(TestCase): - def setUp(self): +class TestChar(object): + def setup(self): self.A = np.array('abc1', dtype='c').view(np.chararray) def test_it(self): assert_equal(self.A.shape, (4,)) assert_equal(self.A.upper()[:2].tobytes(), b'AB') -class TestComparisons(TestCase): - def setUp(self): +class TestComparisons(object): + def setup(self): self.A = np.array([['abc', '123'], ['789', 'xyz']]).view(np.chararray) self.B = np.array([['efg', '123 '], @@ -185,21 +185,21 @@ class TestComparisonsMixed1(TestComparisons): """Ticket #1276""" - def setUp(self): - TestComparisons.setUp(self) + def setup(self): + TestComparisons.setup(self) self.B = np.array([['efg', '123 '], ['051', 'tuv']], np.unicode_).view(np.chararray) class TestComparisonsMixed2(TestComparisons): """Ticket #1276""" - def setUp(self): - TestComparisons.setUp(self) + def setup(self): + TestComparisons.setup(self) self.A = np.array([['abc', '123'], ['789', 'xyz']], np.unicode_).view(np.chararray) -class TestInformation(TestCase): - def setUp(self): +class TestInformation(object): + def setup(self): self.A = np.array([[' abc ', ''], ['12345', 'MixedCase'], ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray) @@ -231,7 +231,7 @@ def fail(): self.A.endswith('3', 'fdjk') - self.assertRaises(TypeError, fail) + assert_raises(TypeError, fail) def test_find(self): assert_(issubclass(self.A.find('a').dtype.type, np.integer)) @@ -245,7 +245,7 @@ def fail(): self.A.index('a') - self.assertRaises(ValueError, fail) + assert_raises(ValueError, fail) assert_(np.char.index('abcba', 'b') == 1) assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) @@ -289,7 +289,7 @@ def fail(): self.A.rindex('a') - self.assertRaises(ValueError, fail) + assert_raises(ValueError, fail) assert_(np.char.rindex('abcba', 'b') == 3) assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) @@ -301,11 +301,11 @@ def fail(): self.A.startswith('3', 'fdjk') - self.assertRaises(TypeError, fail) + assert_raises(TypeError, fail) -class TestMethods(TestCase): - def setUp(self): +class TestMethods(object): + def setup(self): self.A = np.array([[' abc ', ''], ['12345', 'MixedCase'], ['123 \t 345 \0 ', 'UPPER']], @@ -583,7 +583,7 @@ def fail(): self.A.isnumeric() - self.assertRaises(TypeError, fail) + assert_raises(TypeError, fail) assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_)) assert_array_equal(self.B.isnumeric(), [ [False, False], [True, False], [False, False]]) @@ -593,14 +593,14 @@ def fail(): self.A.isdecimal() - self.assertRaises(TypeError, fail) + assert_raises(TypeError, fail) assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_)) assert_array_equal(self.B.isdecimal(), [ [False, False], [True, False], [False, False]]) -class TestOperations(TestCase): - def setUp(self): +class TestOperations(object): + def setup(self): self.A = np.array([['abc', '123'], ['789', 'xyz']]).view(np.chararray) self.B = np.array([['efg', '456'], diff -Nru python-numpy-1.13.3/numpy/core/tests/test_deprecations.py python-numpy-1.14.5/numpy/core/tests/test_deprecations.py --- python-numpy-1.13.3/numpy/core/tests/test_deprecations.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_deprecations.py 2018-06-12 18:28:52.000000000 +0000 @@ -28,7 +28,7 @@ message = '' warning_cls = DeprecationWarning - def setUp(self): + def setup(self): self.warn_ctx = warnings.catch_warnings(record=True) self.log = self.warn_ctx.__enter__() @@ -42,7 +42,7 @@ warnings.filterwarnings("always", message=self.message, category=self.warning_cls) - def tearDown(self): + def teardown(self): self.warn_ctx.__exit__() def assert_deprecated(self, function, num=1, ignore_others=False, @@ -132,24 +132,6 @@ warning_cls = np.VisibleDeprecationWarning -class TestBooleanBinaryMinusDeprecation(_DeprecationTestCase): - """Test deprecation of binary boolean `-`. While + and * are well - defined, binary - is not and even a corrected form seems to have - no real uses. - - The deprecation process was started in NumPy 1.9. - """ - message = r"numpy boolean subtract, the `-` operator, .*" - - def test_operator_deprecation(self): - array = np.array([True]) - generic = np.bool_(True) - - # Minus operator/subtract ufunc: - self.assert_deprecated(operator.sub, args=(array, array)) - self.assert_deprecated(operator.sub, args=(generic, generic)) - - class TestRankDeprecation(_DeprecationTestCase): """Test that np.rank is deprecated. The function should simply be removed. The VisibleDeprecationWarning may become unnecessary. @@ -277,7 +259,7 @@ """ def test_fortran_contiguous(self): - self.assert_deprecated(np.ones((2,2)).T.view, args=(np.complex,)) + self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,)) self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,)) @@ -394,20 +376,10 @@ args=(dt,)) -class TestAccumulateKeepDims(_DeprecationTestCase): - """ - Deprecate the keepdims argument to np.ufunc.accumulate, which was never used or documented - """ - def test_keepdims(self): - with warnings.catch_warnings(): - warnings.filterwarnings('always', '', FutureWarning) - assert_warns(FutureWarning, np.add.accumulate, [1], keepdims=True) - - class TestTestDeprecated(object): def test_assert_deprecated(self): test_case_instance = _DeprecationTestCase() - test_case_instance.setUp() + test_case_instance.setup() assert_raises(AssertionError, test_case_instance.assert_deprecated, lambda: None) @@ -416,7 +388,7 @@ warnings.warn("foo", category=DeprecationWarning, stacklevel=2) test_case_instance.assert_deprecated(foo) - test_case_instance.tearDown() + test_case_instance.teardown() class TestClassicIntDivision(_DeprecationTestCase): @@ -462,5 +434,52 @@ assert_(npy_char_deprecation() == 'S1') +class Test_UPDATEIFCOPY(_DeprecationTestCase): + """ + v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use + WRITEBACKIFCOPY instead + """ + def test_npy_updateifcopy_deprecation(self): + from numpy.core.multiarray_tests import npy_updateifcopy_deprecation + arr = np.arange(9).reshape(3, 3) + v = arr.T + self.assert_deprecated(npy_updateifcopy_deprecation, args=(v,)) + + +class TestDatetimeEvent(_DeprecationTestCase): + # 2017-08-11, 1.14.0 + def test_3_tuple(self): + for cls in (np.datetime64, np.timedelta64): + # two valid uses - (unit, num) and (unit, num, den, None) + self.assert_not_deprecated(cls, args=(1, ('ms', 2))) + self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None))) + + # trying to use the event argument, removed in 1.7.0, is deprecated + # it used to be a uint8 + self.assert_deprecated(cls, args=(1, ('ms', 2, 'event'))) + self.assert_deprecated(cls, args=(1, ('ms', 2, 63))) + self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event'))) + self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63))) + + +class TestTruthTestingEmptyArrays(_DeprecationTestCase): + # 2017-09-25, 1.14.0 + message = '.*truth value of an empty array is ambiguous.*' + + def test_1d(self): + self.assert_deprecated(bool, args=(np.array([]),)) + + def test_2d(self): + self.assert_deprecated(bool, args=(np.zeros((1, 0)),)) + self.assert_deprecated(bool, args=(np.zeros((0, 1)),)) + self.assert_deprecated(bool, args=(np.zeros((0, 0)),)) + + +class TestBincount(_DeprecationTestCase): + # 2017-06-01, 1.14.0 + def test_bincount_minlength(self): + self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None)) + + if __name__ == "__main__": run_module_suite() diff -Nru python-numpy-1.13.3/numpy/core/tests/test_dtype.py python-numpy-1.14.5/numpy/core/tests/test_dtype.py --- python-numpy-1.13.3/numpy/core/tests/test_dtype.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_dtype.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,11 +1,13 @@ from __future__ import division, absolute_import, print_function +import pickle import sys +import operator import numpy as np from numpy.core.test_rational import rational from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal, assert_raises, + run_module_suite, assert_, assert_equal, assert_raises, dec ) @@ -19,10 +21,10 @@ assert_(hash(a) != hash(b), "two different types hash to the same value !") -class TestBuiltin(TestCase): +class TestBuiltin(object): def test_run(self): """Only test hash runs at all.""" - for t in [np.int, np.float, np.complex, np.int32, np.str, np.object, + for t in [int, float, complex, np.int32, str, object, np.unicode]: dt = np.dtype(t) hash(dt) @@ -30,15 +32,15 @@ def test_dtype(self): # Make sure equivalent byte order char hash the same (e.g. < and = on # little endian) - for t in [np.int, np.float]: + for t in [int, float]: dt = np.dtype(t) dt2 = dt.newbyteorder("<") dt3 = dt.newbyteorder(">") if dt == dt2: - self.assertTrue(dt.byteorder != dt2.byteorder, "bogus test") + assert_(dt.byteorder != dt2.byteorder, "bogus test") assert_dtype_equal(dt, dt2) else: - self.assertTrue(dt.byteorder != dt3.byteorder, "bogus test") + assert_(dt.byteorder != dt3.byteorder, "bogus test") assert_dtype_equal(dt, dt3) def test_equivalent_dtype_hashing(self): @@ -50,8 +52,8 @@ else: left = uintp right = np.dtype(np.ulonglong) - self.assertTrue(left == right) - self.assertTrue(hash(left) == hash(right)) + assert_(left == right) + assert_(hash(left) == hash(right)) def test_invalid_types(self): # Make sure invalid type strings raise an error @@ -103,17 +105,26 @@ 'formats':['i1', 'f4'], 'offsets':[0, 2]}, align=True) -class TestRecord(TestCase): + def test_field_order_equality(self): + x = np.dtype({'names': ['A', 'B'], + 'formats': ['i4', 'f4'], + 'offsets': [0, 4]}) + y = np.dtype({'names': ['B', 'A'], + 'formats': ['f4', 'i4'], + 'offsets': [4, 0]}) + assert_equal(x == y, False) + +class TestRecord(object): def test_equivalent_record(self): """Test whether equivalent record dtypes hash the same.""" - a = np.dtype([('yo', np.int)]) - b = np.dtype([('yo', np.int)]) + a = np.dtype([('yo', int)]) + b = np.dtype([('yo', int)]) assert_dtype_equal(a, b) def test_different_names(self): # In theory, they may hash the same (collision) ? - a = np.dtype([('yo', np.int)]) - b = np.dtype([('ye', np.int)]) + a = np.dtype([('yo', int)]) + b = np.dtype([('ye', int)]) assert_dtype_not_equal(a, b) def test_different_titles(self): @@ -128,9 +139,9 @@ def test_mutate(self): # Mutating a dtype should reset the cached hash value - a = np.dtype([('yo', np.int)]) - b = np.dtype([('yo', np.int)]) - c = np.dtype([('ye', np.int)]) + a = np.dtype([('yo', int)]) + b = np.dtype([('yo', int)]) + c = np.dtype([('ye', int)]) assert_dtype_equal(a, b) assert_dtype_not_equal(a, c) a.names = ['ye'] @@ -145,10 +156,10 @@ """Test if an appropriate exception is raised when passing bad values to the dtype constructor. """ - self.assertRaises(TypeError, np.dtype, - dict(names=set(['A', 'B']), formats=['f8', 'i4'])) - self.assertRaises(TypeError, np.dtype, - dict(names=['A', 'B'], formats=set(['f8', 'i4']))) + assert_raises(TypeError, np.dtype, + dict(names=set(['A', 'B']), formats=['f8', 'i4'])) + assert_raises(TypeError, np.dtype, + dict(names=['A', 'B'], formats=set(['f8', 'i4']))) def test_aligned_size(self): # Check that structured dtypes get padded to an aligned size @@ -210,11 +221,12 @@ dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['f4', (64, 64)), (1,)), ('rtile', '>f4', (64, 36))], (3,)), @@ -581,7 +606,7 @@ # Pull request #4722 np.array(["", ""]).astype(object) -class TestDtypeAttributeDeletion(TestCase): +class TestDtypeAttributeDeletion(object): def test_dtype_non_writable_attributes_deletion(self): dt = np.dtype(np.double) @@ -599,7 +624,7 @@ assert_raises(AttributeError, delattr, dt, s) -class TestDtypeAttributes(TestCase): +class TestDtypeAttributes(object): def test_descr_has_trailing_void(self): # see gh-6359 dtype = np.dtype({ @@ -624,6 +649,59 @@ assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls') +class TestPickling(object): + + def check_pickling(self, dtype): + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + pickled = pickle.loads(pickle.dumps(dtype, proto)) + assert_equal(pickled, dtype) + assert_equal(pickled.descr, dtype.descr) + if dtype.metadata is not None: + assert_equal(pickled.metadata, dtype.metadata) + # Check the reconstructed dtype is functional + x = np.zeros(3, dtype=dtype) + y = np.zeros(3, dtype=pickled) + assert_equal(x, y) + assert_equal(x[0], y[0]) + + def test_builtin(self): + for t in [int, float, complex, np.int32, str, object, + np.unicode, bool]: + self.check_pickling(np.dtype(t)) + + def test_structured(self): + dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '', p, q, r), 253) + # singleton dimensions broadcast (gh-10343) + p = np.ones((10,2)) + q = np.ones((1,2)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + np.einsum('ij,ij->j', p, q, optimize=False)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + [10.] * 2) + + # a blas-compatible contraction broadcasting case which was failing + # for optimize=True (ticket #10930) + x = np.array([2., 3.]) + y = np.array([4.]) + assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.) + assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.) + + # all-ones array was bypassing bug (ticket #10930) + p = np.ones((1, 5)) / 2 + q = np.ones((5, 5)) / 2 + for optimize in (True, False): + assert_array_equal(np.einsum("...ij,...jk->...ik", p, p, + optimize=optimize), + np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize)) + assert_array_equal(np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize), + np.full((1, 5), 1.25)) + def test_einsum_sums_int8(self): self.check_einsum_sums('i1') @@ -538,6 +565,13 @@ assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]]) + # Regression test for issue #10369 (test unicode inputs with Python 2) + assert_equal(np.einsum(u'ij...,j...->i...', a, b), [[[2], [2]]]) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20) + assert_equal(np.einsum(u'...i,...i', [1, 2, 3], [2, 3, 4]), 20) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4], + optimize=u'greedy'), 20) + # The iterator had an issue with buffering this reduction a = np.ones((5, 12, 4, 2, 3), np.int64) b = np.ones((5, 12, 11), np.int64) @@ -568,48 +602,37 @@ A = np.arange(2 * 3 * 4).reshape(2, 3, 4) B = np.arange(3) - ref = np.einsum('ijk,j->ijk', A, B) - assert_equal(np.einsum('ij...,j...->ij...', A, B), ref) - assert_equal(np.einsum('ij...,...j->ij...', A, B), ref) - assert_equal(np.einsum('ij...,j->ij...', A, B), ref) # used to raise error - - assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=True), ref) - assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=True), ref) - assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=True), ref) # used to raise error + ref = np.einsum('ijk,j->ijk', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error A = np.arange(12).reshape((4, 3)) B = np.arange(6).reshape((3, 2)) - ref = np.einsum('ik,kj->ij', A, B) - assert_equal(np.einsum('ik...,k...->i...', A, B), ref) - assert_equal(np.einsum('ik...,...kj->i...j', A, B), ref) - assert_equal(np.einsum('...k,kj', A, B), ref) # used to raise error - assert_equal(np.einsum('ik,k...->i...', A, B), ref) # used to raise error - - assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=True), ref) - assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=True), ref) - assert_equal(np.einsum('...k,kj', A, B, optimize=True), ref) # used to raise error - assert_equal(np.einsum('ik,k...->i...', A, B, optimize=True), ref) # used to raise error + ref = np.einsum('ik,kj->ij', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref) + assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error + assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error dims = [2, 3, 4, 5] a = np.arange(np.prod(dims)).reshape(dims) v = np.arange(dims[2]) - ref = np.einsum('ijkl,k->ijl', a, v) - assert_equal(np.einsum('ijkl,k', a, v), ref) - assert_equal(np.einsum('...kl,k', a, v), ref) # used to raise error - assert_equal(np.einsum('...kl,k...', a, v), ref) - # no real diff from 1st - - assert_equal(np.einsum('ijkl,k', a, v, optimize=True), ref) - assert_equal(np.einsum('...kl,k', a, v, optimize=True), ref) # used to raise error - assert_equal(np.einsum('...kl,k...', a, v, optimize=True), ref) + ref = np.einsum('ijkl,k->ijl', a, v, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref) + assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error + assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref) J, K, M = 160, 160, 120 A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M) B = np.arange(J * K * M * 3).reshape(J, K, M, 3) - ref = np.einsum('...lmn,...lmno->...o', A, B) - assert_equal(np.einsum('...lmn,lmno->...o', A, B), ref) # used to raise error - assert_equal(np.einsum('...lmn,lmno->...o', A, B, - optimize=True), ref) # used to raise error + ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('...lmn,lmno->...o', A, B, + optimize=opt), ref) # used to raise error def test_einsum_fixedstridebug(self): # Issue #4485 obscure einsum bug @@ -777,14 +800,14 @@ self.optimize_compare('aef,fbc,dca->bde') -class TestEinSumPath(TestCase): - def build_operands(self, string): +class TestEinSumPath(object): + def build_operands(self, string, size_dict=global_size_dict): # Builds views based off initial operands operands = [string] terms = string.split('->')[0].split(',') for term in terms: - dims = [global_size_dict[x] for x in term] + dims = [size_dict[x] for x in term] operands.append(np.random.rand(*dims)) return operands @@ -874,6 +897,16 @@ path, path_str = np.einsum_path(*edge_test4, optimize='optimal') self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + # Edge test5 + edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->', + size_dict={"a": 20, "b": 20, "c": 20, "d": 20}) + path, path_str = np.einsum_path(*edge_test4, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) + + path, path_str = np.einsum_path(*edge_test4, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) + + def test_path_type_input(self): # Test explicit path handeling path_test = self.build_operands('dcc,fce,ea,dbf->ab') diff -Nru python-numpy-1.13.3/numpy/core/tests/test_errstate.py python-numpy-1.14.5/numpy/core/tests/test_errstate.py --- python-numpy-1.13.3/numpy/core/tests/test_errstate.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_errstate.py 2018-06-12 18:28:52.000000000 +0000 @@ -3,10 +3,10 @@ import platform import numpy as np -from numpy.testing import TestCase, assert_, run_module_suite, dec +from numpy.testing import assert_, run_module_suite, dec -class TestErrstate(TestCase): +class TestErrstate(object): @dec.skipif(platform.machine() == "armv5tel", "See gh-413.") def test_invalid(self): with np.errstate(all='raise', under='ignore'): diff -Nru python-numpy-1.13.3/numpy/core/tests/test_extint128.py python-numpy-1.14.5/numpy/core/tests/test_extint128.py --- python-numpy-1.13.3/numpy/core/tests/test_extint128.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_extint128.py 2018-06-12 18:28:52.000000000 +0000 @@ -59,7 +59,7 @@ try: yield iterate() - except: + except Exception: import traceback msg = "At: %r\n%s" % (repr(value[0]), traceback.format_exc()) diff -Nru python-numpy-1.13.3/numpy/core/tests/test_function_base.py python-numpy-1.14.5/numpy/core/tests/test_function_base.py --- python-numpy-1.13.3/numpy/core/tests/test_function_base.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_function_base.py 2018-06-12 18:28:52.000000000 +0000 @@ -3,7 +3,7 @@ from numpy import (logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan, ndarray, sqrt, nextafter) from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal, assert_raises, + run_module_suite, assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose, suppress_warnings ) @@ -40,7 +40,7 @@ __array_priority__ = 10 -class TestLogspace(TestCase): +class TestLogspace(object): def test_basic(self): y = logspace(0, 6) @@ -76,7 +76,7 @@ assert_equal(ls, logspace(1.0, 7.0, 1)) -class TestGeomspace(TestCase): +class TestGeomspace(object): def test_basic(self): y = geomspace(1, 1e6) @@ -191,7 +191,7 @@ assert_raises(ValueError, geomspace, 0, 0) -class TestLinspace(TestCase): +class TestLinspace(object): def test_basic(self): y = linspace(0, 10) diff -Nru python-numpy-1.13.3/numpy/core/tests/test_getlimits.py python-numpy-1.14.5/numpy/core/tests/test_getlimits.py --- python-numpy-1.13.3/numpy/core/tests/test_getlimits.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_getlimits.py 2018-06-12 18:28:52.000000000 +0000 @@ -7,44 +7,44 @@ from numpy.core import finfo, iinfo from numpy import half, single, double, longdouble from numpy.testing import ( - TestCase, run_module_suite, assert_equal, assert_ + run_module_suite, assert_equal, assert_, assert_raises ) from numpy.core.getlimits import (_discovered_machar, _float16_ma, _float32_ma, _float64_ma, _float128_ma, _float80_ma) ################################################## -class TestPythonFloat(TestCase): +class TestPythonFloat(object): def test_singleton(self): ftype = finfo(float) ftype2 = finfo(float) assert_equal(id(ftype), id(ftype2)) -class TestHalf(TestCase): +class TestHalf(object): def test_singleton(self): ftype = finfo(half) ftype2 = finfo(half) assert_equal(id(ftype), id(ftype2)) -class TestSingle(TestCase): +class TestSingle(object): def test_singleton(self): ftype = finfo(single) ftype2 = finfo(single) assert_equal(id(ftype), id(ftype2)) -class TestDouble(TestCase): +class TestDouble(object): def test_singleton(self): ftype = finfo(double) ftype2 = finfo(double) assert_equal(id(ftype), id(ftype2)) -class TestLongdouble(TestCase): - def test_singleton(self,level=2): +class TestLongdouble(object): + def test_singleton(self): ftype = finfo(longdouble) ftype2 = finfo(longdouble) assert_equal(id(ftype), id(ftype2)) -class TestFinfo(TestCase): +class TestFinfo(object): def test_basic(self): dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'], [np.float16, np.float32, np.float64, np.complex64, @@ -55,9 +55,9 @@ 'nmant', 'precision', 'resolution', 'tiny'): assert_equal(getattr(finfo(dt1), attr), getattr(finfo(dt2), attr), attr) - self.assertRaises(ValueError, finfo, 'i4') + assert_raises(ValueError, finfo, 'i4') -class TestIinfo(TestCase): +class TestIinfo(object): def test_basic(self): dts = list(zip(['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'], @@ -67,14 +67,14 @@ for attr in ('bits', 'min', 'max'): assert_equal(getattr(iinfo(dt1), attr), getattr(iinfo(dt2), attr), attr) - self.assertRaises(ValueError, iinfo, 'f4') + assert_raises(ValueError, iinfo, 'f4') def test_unsigned_max(self): types = np.sctypes['uint'] for T in types: assert_equal(iinfo(T).max, T(-1)) -class TestRepr(TestCase): +class TestRepr(object): def test_iinfo_repr(self): expected = "iinfo(min=-32768, max=32767, dtype=int16)" assert_equal(repr(np.iinfo(np.int16)), expected) diff -Nru python-numpy-1.13.3/numpy/core/tests/test_half.py python-numpy-1.14.5/numpy/core/tests/test_half.py --- python-numpy-1.13.3/numpy/core/tests/test_half.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_half.py 2018-06-12 18:28:52.000000000 +0000 @@ -4,8 +4,7 @@ import numpy as np from numpy import uint16, float16, float32, float64 -from numpy.testing import TestCase, run_module_suite, assert_, assert_equal, \ - dec +from numpy.testing import run_module_suite, assert_, assert_equal, dec def assert_raises_fpe(strmatch, callable, *args, **kwargs): @@ -18,8 +17,8 @@ assert_(False, "Did not raise floating point %s error" % strmatch) -class TestHalf(TestCase): - def setUp(self): +class TestHalf(object): + def setup(self): # An array of all possible float16 values self.all_f16 = np.arange(0x10000, dtype=uint16) self.all_f16.dtype = float16 @@ -66,7 +65,7 @@ # Check the range for which all integers can be represented i_int = np.arange(-2048, 2049) i_f16 = np.array(i_int, dtype=float16) - j = np.array(i_f16, dtype=np.int) + j = np.array(i_f16, dtype=int) assert_equal(i_int, j) def test_nans_infs(self): diff -Nru python-numpy-1.13.3/numpy/core/tests/test_indexerrors.py python-numpy-1.14.5/numpy/core/tests/test_indexerrors.py --- python-numpy-1.13.3/numpy/core/tests/test_indexerrors.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_indexerrors.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,9 +1,9 @@ from __future__ import division, absolute_import, print_function import numpy as np -from numpy.testing import TestCase, run_module_suite, assert_raises +from numpy.testing import run_module_suite, assert_raises -class TestIndexErrors(TestCase): +class TestIndexErrors(object): '''Tests to exercise indexerrors not covered by other tests.''' def test_arraytypes_fasttake(self): diff -Nru python-numpy-1.13.3/numpy/core/tests/test_indexing.py python-numpy-1.14.5/numpy/core/tests/test_indexing.py --- python-numpy-1.13.3/numpy/core/tests/test_indexing.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_indexing.py 2018-06-12 18:28:52.000000000 +0000 @@ -9,8 +9,8 @@ from numpy.core.multiarray_tests import array_indexing from itertools import product from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal, assert_raises, - assert_array_equal, assert_warns, HAS_REFCOUNT + run_module_suite, assert_, assert_equal, assert_raises, + assert_array_equal, assert_warns, dec, HAS_REFCOUNT, suppress_warnings, ) @@ -28,7 +28,7 @@ _HAS_CTYPE = False -class TestIndexing(TestCase): +class TestIndexing(object): def test_index_no_floats(self): a = np.array([[[5]]]) @@ -106,6 +106,12 @@ a = np.array(0) assert_(isinstance(a[()], np.int_)) + def test_void_scalar_empty_tuple(self): + s = np.zeros((), dtype='V4') + assert_equal(s[()].dtype, s.dtype) + assert_equal(s[()], s) + assert_equal(type(s[...]), np.ndarray) + def test_same_kind_index_casting(self): # Indexes should be cast with same-kind and not safe, even if that # is somewhat unsafe. So test various different code paths. @@ -511,7 +517,7 @@ arr[slices] = 10 assert_array_equal(arr, 10.) -class TestFieldIndexing(TestCase): +class TestFieldIndexing(object): def test_scalar_return_type(self): # Field access on an array should return an array, even if it # is 0-d. @@ -520,7 +526,7 @@ assert_(isinstance(a[['a']], np.ndarray)) -class TestBroadcastedAssignments(TestCase): +class TestBroadcastedAssignments(object): def assign(self, a, ind, val): a[ind] = val return a @@ -571,7 +577,7 @@ assert_((a[::-1] == v).all()) -class TestSubclasses(TestCase): +class TestSubclasses(object): def test_basic(self): class SubClass(np.ndarray): pass @@ -616,7 +622,56 @@ assert_array_equal(new_s.finalize_status, new_s) assert_array_equal(new_s.old, s) -class TestFancyIndexingCast(TestCase): + @dec.skipif(not HAS_REFCOUNT) + def test_slice_decref_getsetslice(self): + # See gh-10066, a temporary slice object should be discarted. + # This test is only really interesting on Python 2 since + # it goes through `__set/getslice__` here and can probably be + # removed. Use 0:7 to make sure it is never None:7. + class KeepIndexObject(np.ndarray): + def __getitem__(self, indx): + self.indx = indx + if indx == slice(0, 7): + raise ValueError + + def __setitem__(self, indx, val): + self.indx = indx + if indx == slice(0, 4): + raise ValueError + + k = np.array([1]).view(KeepIndexObject) + k[0:5] + assert_equal(k.indx, slice(0, 5)) + assert_equal(sys.getrefcount(k.indx), 2) + try: + k[0:7] + raise AssertionError + except ValueError: + # The exception holds a reference to the slice so clear on Py2 + if hasattr(sys, 'exc_clear'): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + sys.exc_clear() + assert_equal(k.indx, slice(0, 7)) + assert_equal(sys.getrefcount(k.indx), 2) + + k[0:3] = 6 + assert_equal(k.indx, slice(0, 3)) + assert_equal(sys.getrefcount(k.indx), 2) + try: + k[0:4] = 2 + raise AssertionError + except ValueError: + # The exception holds a reference to the slice so clear on Py2 + if hasattr(sys, 'exc_clear'): + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + sys.exc_clear() + assert_equal(k.indx, slice(0, 4)) + assert_equal(sys.getrefcount(k.indx), 2) + + +class TestFancyIndexingCast(object): def test_boolean_index_cast_assign(self): # Setup the boolean index and float arrays. shape = (8, 63) @@ -638,7 +693,7 @@ zero_array.__setitem__, bool_index, np.array([1j])) assert_equal(zero_array[0, 1], 0) -class TestFancyIndexingEquivalence(TestCase): +class TestFancyIndexingEquivalence(object): def test_object_assign(self): # Check that the field and object special case using copyto is active. # The right hand side cannot be converted to an array here. @@ -686,7 +741,7 @@ assert_array_equal(a, b[0]) -class TestMultiIndexingAutomated(TestCase): +class TestMultiIndexingAutomated(object): """ These tests use code to mimic the C-Code indexing for selection. @@ -708,7 +763,7 @@ """ - def setUp(self): + def setup(self): self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) self.b = np.empty((3, 0, 5, 6)) self.complex_indices = ['skip', Ellipsis, @@ -847,7 +902,7 @@ try: flat_indx = np.ravel_multi_index(np.nonzero(indx), arr.shape[ax:ax+indx.ndim], mode='raise') - except: + except Exception: error_unless_broadcast_to_empty = True # fill with 0s instead, and raise error later flat_indx = np.array([0]*indx.sum(), dtype=np.intp) @@ -946,7 +1001,7 @@ try: mi = np.ravel_multi_index(indx[1:], orig_slice, mode='raise') - except: + except Exception: # This happens with 0-sized orig_slice (sometimes?) # here it is a ValueError, but indexing gives a: raise IndexError('invalid index into 0-sized') @@ -1103,7 +1158,7 @@ for index in self.complex_indices: self._check_single_index(a, index) -class TestFloatNonIntegerArgument(TestCase): +class TestFloatNonIntegerArgument(object): """ These test that ``TypeError`` is raised when you try to use non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]`` @@ -1158,7 +1213,7 @@ assert_raises(TypeError, np.min, d, (.2, 1.2)) -class TestBooleanIndexing(TestCase): +class TestBooleanIndexing(object): # Using a boolean as integer argument/indexing is an error. def test_bool_as_int_argument_errors(self): a = np.array([[[1]]]) @@ -1168,6 +1223,7 @@ # Note that operator.index(np.array(True)) does not work, a boolean # array is thus also deprecated, but not with the same message: assert_raises(TypeError, operator.index, np.array(True)) + assert_warns(DeprecationWarning, operator.index, np.True_) assert_raises(TypeError, np.take, args=(a, [0], False)) def test_boolean_indexing_weirdness(self): @@ -1178,7 +1234,7 @@ assert_raises(IndexError, lambda: a[False, [0, 1], ...]) -class TestArrayToIndexDeprecation(TestCase): +class TestArrayToIndexDeprecation(object): """Creating an an index from array not 0-D is an error. """ @@ -1191,7 +1247,7 @@ assert_raises(TypeError, np.take, a, [0], a) -class TestNonIntegerArrayLike(TestCase): +class TestNonIntegerArrayLike(object): """Tests that array_likes only valid if can safely cast to integer. For instance, lists give IndexError when they cannot be safely cast to @@ -1208,7 +1264,7 @@ a.__getitem__([]) -class TestMultipleEllipsisError(TestCase): +class TestMultipleEllipsisError(object): """An index can only have a single ellipsis. """ @@ -1219,7 +1275,7 @@ assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,)) -class TestCApiAccess(TestCase): +class TestCApiAccess(object): def test_getitem(self): subscript = functools.partial(array_indexing, 0) diff -Nru python-numpy-1.13.3/numpy/core/tests/test_item_selection.py python-numpy-1.14.5/numpy/core/tests/test_item_selection.py --- python-numpy-1.13.3/numpy/core/tests/test_item_selection.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_item_selection.py 2018-06-12 18:28:52.000000000 +0000 @@ -4,12 +4,12 @@ import numpy as np from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_raises, + run_module_suite, assert_, assert_raises, assert_array_equal, HAS_REFCOUNT ) -class TestTake(TestCase): +class TestTake(object): def test_simple(self): a = [[1, 2], [3, 4]] a_str = [[b'1', b'2'], [b'3', b'4']] @@ -24,7 +24,7 @@ # Currently all types but object, use the same function generation. # So it should not be necessary to test all. However test also a non # refcounted struct on top of object. - types = np.int, np.object, np.dtype([('', 'i', 2)]) + types = int, object, np.dtype([('', 'i', 2)]) for t in types: # ta works, even if the array may be odd if buffer interface is used ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t) @@ -53,13 +53,13 @@ for mode in ('raise', 'clip', 'wrap'): a = np.array(objects) b = np.array([2, 2, 4, 5, 3, 5]) - a.take(b, out=a[:6]) + a.take(b, out=a[:6], mode=mode) del a if HAS_REFCOUNT: assert_(all(sys.getrefcount(o) == 3 for o in objects)) # not contiguous, example: a = np.array(objects * 2)[::2] - a.take(b, out=a[:6]) + a.take(b, out=a[:6], mode=mode) del a if HAS_REFCOUNT: assert_(all(sys.getrefcount(o) == 3 for o in objects)) diff -Nru python-numpy-1.13.3/numpy/core/tests/test_longdouble.py python-numpy-1.14.5/numpy/core/tests/test_longdouble.py --- python-numpy-1.13.3/numpy/core/tests/test_longdouble.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_longdouble.py 2018-06-12 18:28:52.000000000 +0000 @@ -5,9 +5,9 @@ import numpy as np from numpy.testing import ( run_module_suite, assert_, assert_equal, dec, assert_raises, - assert_array_equal, TestCase, temppath, + assert_array_equal, temppath, ) -from test_print import in_foreign_locale +from .test_print import in_foreign_locale LD_INFO = np.finfo(np.longdouble) longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps) @@ -110,7 +110,7 @@ np.array([1])) -class FileBased(TestCase): +class TestFileBased(object): ldbl = 1 + LD_INFO.eps tgt = np.array([ldbl]*5) diff -Nru python-numpy-1.13.3/numpy/core/tests/test_machar.py python-numpy-1.14.5/numpy/core/tests/test_machar.py --- python-numpy-1.13.3/numpy/core/tests/test_machar.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_machar.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,11 +1,16 @@ +""" +Test machar. Given recent changes to hardcode type data, we might want to get +rid of both MachAr and this test at some point. + +""" from __future__ import division, absolute_import, print_function from numpy.core.machar import MachAr import numpy.core.numerictypes as ntypes from numpy import errstate, array -from numpy.testing import TestCase, run_module_suite +from numpy.testing import run_module_suite -class TestMachAr(TestCase): +class TestMachAr(object): def _run_machar_highprec(self): # Instantiate MachAr instance with high enough precision to cause # underflow @@ -13,6 +18,7 @@ hiprec = ntypes.float96 MachAr(lambda v:array([v], hiprec)) except AttributeError: + # Fixme, this needs to raise a 'skip' exception. "Skipping test: no ntypes.float96 available on this platform." def test_underlow(self): @@ -22,7 +28,8 @@ try: self._run_machar_highprec() except FloatingPointError as e: - self.fail("Caught %s exception, should not have been raised." % e) + msg = "Caught %s exception, should not have been raised." % e + raise AssertionError(msg) if __name__ == "__main__": diff -Nru python-numpy-1.13.3/numpy/core/tests/test_memmap.py python-numpy-1.14.5/numpy/core/tests/test_memmap.py --- python-numpy-1.13.3/numpy/core/tests/test_memmap.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_memmap.py 2018-06-12 18:28:52.000000000 +0000 @@ -12,12 +12,12 @@ from numpy import arange, allclose, asarray from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal, assert_array_equal, + run_module_suite, assert_, assert_equal, assert_array_equal, dec, suppress_warnings ) -class TestMemmap(TestCase): - def setUp(self): +class TestMemmap(object): + def setup(self): self.tmpfp = NamedTemporaryFile(prefix='mmap') self.tempdir = mkdtemp() self.shape = (3, 4) @@ -25,7 +25,7 @@ self.data = arange(12, dtype=self.dtype) self.data.resize(self.shape) - def tearDown(self): + def teardown(self): self.tmpfp.close() shutil.rmtree(self.tempdir) @@ -41,7 +41,7 @@ shape=self.shape) assert_(allclose(self.data, newfp)) assert_array_equal(self.data, newfp) - self.assertEqual(newfp.flags.writeable, False) + assert_equal(newfp.flags.writeable, False) def test_open_with_filename(self): tmpname = mktemp('', 'mmap', dir=self.tempdir) @@ -60,8 +60,8 @@ mode = "w+" fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, shape=self.shape, offset=offset) - self.assertEqual(offset, fp.offset) - self.assertEqual(mode, fp.mode) + assert_equal(offset, fp.offset) + assert_equal(mode, fp.mode) del fp def test_filename(self): @@ -70,9 +70,9 @@ shape=self.shape) abspath = os.path.abspath(tmpname) fp[:] = self.data[:] - self.assertEqual(abspath, fp.filename) + assert_equal(abspath, fp.filename) b = fp[:1] - self.assertEqual(abspath, b.filename) + assert_equal(abspath, b.filename) del b del fp @@ -83,16 +83,16 @@ shape=self.shape) abspath = os.path.realpath(os.path.abspath(tmpname)) fp[:] = self.data[:] - self.assertEqual(abspath, str(fp.filename.resolve())) + assert_equal(abspath, str(fp.filename.resolve())) b = fp[:1] - self.assertEqual(abspath, str(b.filename.resolve())) + assert_equal(abspath, str(b.filename.resolve())) del b del fp def test_filename_fileobj(self): fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", shape=self.shape) - self.assertEqual(fp.filename, self.tmpfp.name) + assert_equal(fp.filename, self.tmpfp.name) @dec.knownfailureif(sys.platform == 'gnu0', "This test is known to fail on hurd") def test_flush(self): diff -Nru python-numpy-1.13.3/numpy/core/tests/test_multiarray.py python-numpy-1.14.5/numpy/core/tests/test_multiarray.py --- python-numpy-1.13.3/numpy/core/tests/test_multiarray.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_multiarray.py 2018-06-12 18:28:52.000000000 +0000 @@ -18,19 +18,19 @@ else: import __builtin__ as builtins from decimal import Decimal - +from unittest import TestCase import numpy as np from numpy.compat import strchar, unicode -from test_print import in_foreign_locale +from numpy.core.tests.test_print import in_foreign_locale from numpy.core.multiarray_tests import ( test_neighborhood_iterator, test_neighborhood_iterator_oob, test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end, test_inplace_increment, get_buffer_info, test_as_c_array, ) from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_raises, assert_warns, - assert_equal, assert_almost_equal, assert_array_equal, + run_module_suite, assert_, assert_raises, assert_warns, + assert_equal, assert_almost_equal, assert_array_equal, assert_raises_regex, assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring, dec, SkipTest, temppath, suppress_warnings ) @@ -74,21 +74,22 @@ return data -class TestFlags(TestCase): - def setUp(self): +class TestFlags(object): + def setup(self): self.a = np.arange(10) def test_writeable(self): mydict = locals() self.a.flags.writeable = False - self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict) - self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict) + assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict) + assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict) self.a.flags.writeable = True self.a[0] = 5 self.a[0] = 0 def test_otherflags(self): assert_equal(self.a.flags.carray, True) + assert_equal(self.a.flags['C'], True) assert_equal(self.a.flags.farray, False) assert_equal(self.a.flags.behaved, True) assert_equal(self.a.flags.fnc, False) @@ -96,7 +97,15 @@ assert_equal(self.a.flags.owndata, True) assert_equal(self.a.flags.writeable, True) assert_equal(self.a.flags.aligned, True) - assert_equal(self.a.flags.updateifcopy, False) + with assert_warns(DeprecationWarning): + assert_equal(self.a.flags.updateifcopy, False) + with assert_warns(DeprecationWarning): + assert_equal(self.a.flags['U'], False) + assert_equal(self.a.flags['UPDATEIFCOPY'], False) + assert_equal(self.a.flags.writebackifcopy, False) + assert_equal(self.a.flags['X'], False) + assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) + def test_string_align(self): a = np.zeros(4, dtype=np.dtype('|S4')) @@ -110,7 +119,7 @@ assert_(a.flags.aligned) -class TestHash(TestCase): +class TestHash(object): # see #3793 def test_int(self): for st, ut, s in [(np.int8, np.uint8, 8), @@ -132,8 +141,8 @@ err_msg="%r: 2**%d - 1" % (ut, i)) -class TestAttributes(TestCase): - def setUp(self): +class TestAttributes(object): + def setup(self): self.one = np.arange(10) self.two = np.arange(20).reshape(4, 5) self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) @@ -164,7 +173,7 @@ assert_equal(self.three.dtype, np.dtype(np.float_)) assert_equal(self.one.dtype.char, 'l') assert_equal(self.three.dtype.char, 'd') - self.assertTrue(self.three.dtype.str[0] in '<>') + assert_(self.three.dtype.str[0] in '<>') assert_equal(self.one.dtype.str[1], 'i') assert_equal(self.three.dtype.str[1], 'f') @@ -194,12 +203,12 @@ strides=strides*x.itemsize) assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) - self.assertRaises(ValueError, make_array, 4, 4, -2) - self.assertRaises(ValueError, make_array, 4, 2, -1) - self.assertRaises(ValueError, make_array, 8, 3, 1) + assert_raises(ValueError, make_array, 4, 4, -2) + assert_raises(ValueError, make_array, 4, 2, -1) + assert_raises(ValueError, make_array, 8, 3, 1) assert_equal(make_array(8, 3, 0), np.array([3]*8)) # Check behavior reported in gh-2503: - self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) + assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) make_array(0, 0, 10) def test_set_stridesattr(self): @@ -216,9 +225,9 @@ assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9])) - self.assertRaises(ValueError, make_array, 4, 4, -2) - self.assertRaises(ValueError, make_array, 4, 2, -1) - self.assertRaises(RuntimeError, make_array, 8, 3, 1) + assert_raises(ValueError, make_array, 4, 4, -2) + assert_raises(ValueError, make_array, 4, 2, -1) + assert_raises(RuntimeError, make_array, 8, 3, 1) # Check that the true extent of the array is used. # Test relies on as_strided base not exposing a buffer. x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) @@ -226,12 +235,12 @@ def set_strides(arr, strides): arr.strides = strides - self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) + assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) # Test for offset calculations: x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], shape=(10,), strides=(-1,)) - self.assertRaises(ValueError, set_strides, x[::-1], -1) + assert_raises(ValueError, set_strides, x[::-1], -1) a = x[::-1] a.strides = 1 a[::2].strides = 2 @@ -265,7 +274,7 @@ assert_array_equal(x['b'], [-2, -2]) -class TestArrayConstruction(TestCase): +class TestArrayConstruction(object): def test_array(self): d = np.ones(6) r = np.array([d, d]) @@ -297,7 +306,7 @@ assert_equal(r[0], [d, d + 1]) assert_equal(r[1], d + 2) - tgt = np.ones((2, 3), dtype=np.bool) + tgt = np.ones((2, 3), dtype=bool) tgt[0, 2] = False tgt[1, 0:2] = False r = np.array([[True, True, False], [False, False, True]]) @@ -343,7 +352,7 @@ assert_(np.asfortranarray(d).flags.f_contiguous) -class TestAssignment(TestCase): +class TestAssignment(object): def test_assignment_broadcasting(self): a = np.arange(6).reshape(2, 3) @@ -423,33 +432,44 @@ # only relevant if longdouble is larger than float # we're looking for loss of precision - # gh-8902 - tinyb = np.nextafter(np.longdouble(0), 1) - tinya = np.nextafter(np.longdouble(0), -1) - tiny1d = np.array([tinya]) - assert_equal(tiny1d[0], tinya) - - # scalar = scalar - tiny1d[0] = tinyb - assert_equal(tiny1d[0], tinyb) - - # 0d = scalar - tiny1d[0, ...] = tinya - assert_equal(tiny1d[0], tinya) + for dtype in (np.longdouble, np.longcomplex): + # gh-8902 + tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype) + tinya = np.nextafter(np.longdouble(0), -1).astype(dtype) + + # construction + tiny1d = np.array([tinya]) + assert_equal(tiny1d[0], tinya) + + # scalar = scalar + tiny1d[0] = tinyb + assert_equal(tiny1d[0], tinyb) + + # 0d = scalar + tiny1d[0, ...] = tinya + assert_equal(tiny1d[0], tinya) + + # 0d = 0d + tiny1d[0, ...] = tinyb[...] + assert_equal(tiny1d[0], tinyb) + + # scalar = 0d + tiny1d[0] = tinyb[...] + assert_equal(tiny1d[0], tinyb) + + arr = np.array([np.array(tinya)]) + assert_equal(arr[0], tinya) + + def test_cast_to_string(self): + # cast to str should do "str(scalar)", not "str(scalar.item())" + # Example: In python2, str(float) is truncated, so we want to avoid + # str(np.float64(...).item()) as this would incorrectly truncate. + a = np.zeros(1, dtype='S20') + a[:] = np.array(['1.12345678901234567890'], dtype='f8') + assert_equal(a[0], b"1.1234567890123457") - # 0d = 0d - tiny1d[0, ...] = tinyb[...] - assert_equal(tiny1d[0], tinyb) - # scalar = 0d - tiny1d[0] = tinyb[...] - assert_equal(tiny1d[0], tinyb) - - arr = np.array([np.array(tinya)]) - assert_equal(arr[0], tinya) - - -class TestDtypedescr(TestCase): +class TestDtypedescr(object): def test_construction(self): d1 = np.dtype('i4') assert_equal(d1, np.dtype(np.int32)) @@ -457,48 +477,58 @@ assert_equal(d2, np.dtype(np.float64)) def test_byteorders(self): - self.assertNotEqual(np.dtype('i4')) - self.assertNotEqual(np.dtype([('a', 'i4')])) + assert_(np.dtype('i4')) + assert_(np.dtype([('a', 'i4')])) + + def test_structured_non_void(self): + fields = [('a', '= 3) def test_sequence_long(self): assert_equal(np.array([long(4), long(4)]).dtype, np.long) - assert_equal(np.array([long(4), 2**80]).dtype, np.object) - assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object) - assert_equal(np.array([2**80, long(4)]).dtype, np.object) + assert_equal(np.array([long(4), 2**80]).dtype, object) + assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object) + assert_equal(np.array([2**80, long(4)]).dtype, object) def test_non_sequence_sequence(self): """Should not segfault. @@ -856,7 +886,7 @@ shape=(max_bytes//itemsize + 1,), dtype=dtype) -class TestStructured(TestCase): +class TestStructured(object): def test_subarray_field_access(self): a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))]) a['a'] = np.arange(60).reshape(3, 5, 2, 2) @@ -876,7 +906,7 @@ # multi-dimensional field types work properly a = np.rec.fromrecords( [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])], - dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))]) + dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))]) b = a.copy() assert_equal(a == b, [True, True]) assert_equal(a != b, [False, False]) @@ -952,16 +982,13 @@ # Check that equality comparison works on structured arrays if # they are 'equiv'-castable a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', 'f8'), ('a', 'f8')]) assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) assert_equal(a == b, [True, True]) - # Check that 'equiv' casting can reorder fields and change byte - # order - # New in 1.12: This behavior changes in 1.13, test for dep warning + # Check that 'equiv' casting can change byte order assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) - with assert_warns(FutureWarning): - c = a.astype(b.dtype, casting='equiv') + c = a.astype(b.dtype, casting='equiv') assert_equal(a == c, [True, True]) # Check that 'safe' casting can change byte order and up-cast @@ -1096,20 +1123,70 @@ b = a[0] assert_(b.base is a) + def test_assignment(self): + def testassign(arr, v): + c = arr.copy() + c[0] = v # assign using setitem + c[1:] = v # assign using "dtype_transfer" code paths + return c + + dt = np.dtype([('foo', 'i8'), ('bar', 'i8')]) + arr = np.ones(2, dt) + v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')]) + v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')]) + v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')]) + v4 = np.array([(2,)], dtype=[('bar', 'i8')]) + v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')]) + w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]}) + + ans = np.array([(2,3),(2,3)], dtype=dt) + assert_equal(testassign(arr, v1), ans) + assert_equal(testassign(arr, v2), ans) + assert_equal(testassign(arr, v3), ans) + assert_raises(ValueError, lambda: testassign(arr, v4)) + assert_equal(testassign(arr, v5), ans) + w[:] = 4 + assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt)) + + # test field-reordering, assignment by position, and self-assignment + a = np.array([(1,2,3)], + dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')]) + a[['foo', 'bar']] = a[['bar', 'foo']] + assert_equal(a[0].item(), (2,1,3)) + + # test that this works even for 'simple_unaligned' structs + # (ie, that PyArray_EquivTypes cares about field order too) + a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')]) + a[['a', 'b']] = a[['b', 'a']] + assert_equal(a[0].item(), (2,1)) + + def test_structuredscalar_indexing(self): + # test gh-7262 + x = np.empty(shape=1, dtype="(2)3S,(2)3U") + assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]]) + assert_equal(x[0], x[0][()]) + + def test_multiindex_titles(self): + a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')]) + assert_raises(KeyError, lambda : a[['a','c']]) + assert_raises(KeyError, lambda : a[['a','a']]) + assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated + a[['b','c']] # no exception -class TestBool(TestCase): + +class TestBool(object): def test_test_interning(self): a0 = np.bool_(0) b0 = np.bool_(False) - self.assertTrue(a0 is b0) + assert_(a0 is b0) a1 = np.bool_(1) b1 = np.bool_(True) - self.assertTrue(a1 is b1) - self.assertTrue(np.array([True])[0] is a1) - self.assertTrue(np.array(True)[()] is a1) + assert_(a1 is b1) + assert_(np.array([True])[0] is a1) + assert_(np.array(True)[()] is a1) def test_sum(self): - d = np.ones(101, dtype=np.bool) + d = np.ones(101, dtype=bool) assert_equal(d.sum(), d.size) assert_equal(d[::2].sum(), d[::2].size) assert_equal(d[::-2].sum(), d[::-2].size) @@ -1123,16 +1200,16 @@ powers = [2 ** i for i in range(length)] for i in range(2**power): l = [(i & x) != 0 for x in powers] - a = np.array(l, dtype=np.bool) + a = np.array(l, dtype=bool) c = builtins.sum(l) - self.assertEqual(np.count_nonzero(a), c) + assert_equal(np.count_nonzero(a), c) av = a.view(np.uint8) av *= 3 - self.assertEqual(np.count_nonzero(a), c) + assert_equal(np.count_nonzero(a), c) av *= 4 - self.assertEqual(np.count_nonzero(a), c) + assert_equal(np.count_nonzero(a), c) av[av != 0] = 0xFF - self.assertEqual(np.count_nonzero(a), c) + assert_equal(np.count_nonzero(a), c) def test_count_nonzero(self): # check all 12 bit combinations in a length 17 array @@ -1148,15 +1225,114 @@ def test_count_nonzero_unaligned(self): # prevent mistakes as e.g. gh-4060 for o in range(7): - a = np.zeros((18,), dtype=np.bool)[o+1:] + a = np.zeros((18,), dtype=bool)[o+1:] a[:o] = True - self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist())) - a = np.ones((18,), dtype=np.bool)[o+1:] + assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) + a = np.ones((18,), dtype=bool)[o+1:] a[:o] = False - self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist())) + assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) + + def _test_cast_from_flexible(self, dtype): + # empty string -> false + for n in range(3): + v = np.array(b'', (dtype, n)) + assert_equal(bool(v), False) + assert_equal(bool(v[()]), False) + assert_equal(v.astype(bool), False) + assert_(isinstance(v.astype(bool), np.ndarray)) + assert_(v[()].astype(bool) is np.False_) + + # anything else -> true + for n in range(1, 4): + for val in [b'a', b'0', b' ']: + v = np.array(val, (dtype, n)) + assert_equal(bool(v), True) + assert_equal(bool(v[()]), True) + assert_equal(v.astype(bool), True) + assert_(isinstance(v.astype(bool), np.ndarray)) + assert_(v[()].astype(bool) is np.True_) + + def test_cast_from_void(self): + self._test_cast_from_flexible(np.void) + + @dec.knownfailureif(True, "See gh-9847") + def test_cast_from_unicode(self): + self._test_cast_from_flexible(np.unicode_) + + @dec.knownfailureif(True, "See gh-9847") + def test_cast_from_bytes(self): + self._test_cast_from_flexible(np.bytes_) + + +class TestZeroSizeFlexible(object): + @staticmethod + def _zeros(shape, dtype=str): + dtype = np.dtype(dtype) + if dtype == np.void: + return np.zeros(shape, dtype=(dtype, 0)) + + # not constructable directly + dtype = np.dtype([('x', dtype, 0)]) + return np.zeros(shape, dtype=dtype)['x'] + + def test_create(self): + zs = self._zeros(10, bytes) + assert_equal(zs.itemsize, 0) + zs = self._zeros(10, np.void) + assert_equal(zs.itemsize, 0) + zs = self._zeros(10, unicode) + assert_equal(zs.itemsize, 0) + + def _test_sort_partition(self, name, kinds, **kwargs): + # Previously, these would all hang + for dt in [bytes, np.void, unicode]: + zs = self._zeros(10, dt) + sort_method = getattr(zs, name) + sort_func = getattr(np, name) + for kind in kinds: + sort_method(kind=kind, **kwargs) + sort_func(zs, kind=kind, **kwargs) + + def test_sort(self): + self._test_sort_partition('sort', kinds='qhm') + + def test_argsort(self): + self._test_sort_partition('argsort', kinds='qhm') + + def test_partition(self): + self._test_sort_partition('partition', kinds=['introselect'], kth=2) + def test_argpartition(self): + self._test_sort_partition('argpartition', kinds=['introselect'], kth=2) -class TestMethods(TestCase): + def test_resize(self): + # previously an error + for dt in [bytes, np.void, unicode]: + zs = self._zeros(10, dt) + zs.resize(25) + zs.resize((10, 10)) + + def test_view(self): + for dt in [bytes, np.void, unicode]: + zs = self._zeros(10, dt) + + # viewing as itself should be allowed + assert_equal(zs.view(dt).dtype, np.dtype(dt)) + + # viewing as any non-empty type gives an empty result + assert_equal(zs.view((dt, 1)).shape, (0,)) + + def test_pickle(self): + import pickle + for dt in [bytes, np.void, unicode]: + zs = self._zeros(10, dt) + p = pickle.dumps(zs) + zs2 = pickle.loads(p) + + assert_equal(zs.dtype, zs2.dtype) + + +class TestMethods(object): def test_compress(self): tgt = [[5, 6, 7, 8, 9]] arr = np.arange(10).reshape(2, 5) @@ -1201,8 +1377,8 @@ a = np.array(ba, ctype) a2 = np.array(ba2, ctype) if ctype in ['1', 'b']: - self.assertRaises(ArithmeticError, a.prod) - self.assertRaises(ArithmeticError, a2.prod, axis=1) + assert_raises(ArithmeticError, a.prod) + assert_raises(ArithmeticError, a2.prod, axis=1) else: assert_equal(a.prod(axis=0), 26400) assert_array_equal(a2.prod(axis=0), @@ -1283,9 +1459,9 @@ def test_transpose(self): a = np.array([[1, 2], [3, 4]]) assert_equal(a.transpose(), [[1, 3], [2, 4]]) - self.assertRaises(ValueError, lambda: a.transpose(0)) - self.assertRaises(ValueError, lambda: a.transpose(0, 0)) - self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2)) + assert_raises(ValueError, lambda: a.transpose(0)) + assert_raises(ValueError, lambda: a.transpose(0, 0)) + assert_raises(ValueError, lambda: a.transpose(0, 1, 2)) def test_sort(self): # test ordering for floats and complex containing nans. It is only @@ -1381,7 +1557,7 @@ assert_equal(c, a, msg) # test object array sorts. - a = np.empty((101,), dtype=np.object) + a = np.empty((101,), dtype=object) a[:] = list(range(101)) b = a[::-1] for kind in ['q', 'h', 'm']: @@ -1481,7 +1657,7 @@ arr = np.array([0, datetime.now(), 1], dtype=object) for kind in ['q', 'm', 'h']: assert_raises(TypeError, arr.sort, kind=kind) - #gh-3879 + #gh-3879 class Raiser(object): def raises_anything(*args, **kwargs): raise TypeError("SOMETHING ERRORED") @@ -1553,6 +1729,9 @@ assert_equal(r.word, np.array(['my', 'first', 'name'])) assert_equal(r.number, np.array([3.1, 4.5, 6.2])) + assert_raises_regex(ValueError, 'duplicate', + lambda: r.sort(order=['id', 'id'])) + if sys.byteorder == 'little': strtype = '>i2' else: @@ -1624,7 +1803,7 @@ assert_equal(b.copy().argsort(kind=kind), rr, msg) # test object array argsorts. - a = np.empty((101,), dtype=np.object) + a = np.empty((101,), dtype=object) a[:] = list(range(101)) b = a[::-1] r = np.arange(101) @@ -1691,7 +1870,7 @@ a = np.zeros(100) assert_equal(a.argsort(kind='m'), r) # complex - a = np.zeros(100, dtype=np.complex) + a = np.zeros(100, dtype=complex) assert_equal(a.argsort(kind='m'), r) # string a = np.array(['aaaaaaaaa' for i in range(100)]) @@ -2041,8 +2220,8 @@ # sorted d = np.arange(49) - self.assertEqual(np.partition(d, 5, kind=k)[5], 5) - self.assertEqual(np.partition(d, 15, kind=k)[15], 15) + assert_equal(np.partition(d, 5, kind=k)[5], 5) + assert_equal(np.partition(d, 15, kind=k)[15], 15) assert_array_equal(d[np.argpartition(d, 5, kind=k)], np.partition(d, 5, kind=k)) assert_array_equal(d[np.argpartition(d, 15, kind=k)], @@ -2050,8 +2229,8 @@ # rsorted d = np.arange(47)[::-1] - self.assertEqual(np.partition(d, 6, kind=k)[6], 6) - self.assertEqual(np.partition(d, 16, kind=k)[16], 16) + assert_equal(np.partition(d, 6, kind=k)[6], 6) + assert_equal(np.partition(d, 16, kind=k)[16], 16) assert_array_equal(d[np.argpartition(d, 6, kind=k)], np.partition(d, 6, kind=k)) assert_array_equal(d[np.argpartition(d, 16, kind=k)], @@ -2091,7 +2270,7 @@ tgt = np.sort(np.arange(47) % 7) np.random.shuffle(d) for i in range(d.size): - self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i]) + assert_equal(np.partition(d, i, kind=k)[i], tgt[i]) assert_array_equal(d[np.argpartition(d, 6, kind=k)], np.partition(d, 6, kind=k)) assert_array_equal(d[np.argpartition(d, 16, kind=k)], @@ -2143,7 +2322,7 @@ for s in (9, 16)] for dt, s in td: aae = assert_array_equal - at = self.assertTrue + at = assert_ d = np.arange(s, dtype=dt) np.random.shuffle(d) @@ -2152,7 +2331,7 @@ d0 = np.transpose(d1) for i in range(d.size): p = np.partition(d, i, kind=k) - self.assertEqual(p[i], i) + assert_equal(p[i], i) # all before are smaller assert_array_less(p[:i], p[i]) # all after are larger @@ -2450,6 +2629,18 @@ assert_raises(ValueError, np.dot, a, b, out=b[::2]) assert_raises(ValueError, np.dot, a, b, out=b.T) + def test_dot_matmul_out(self): + # gh-9641 + class Sub(np.ndarray): + pass + a = np.ones((2, 2)).view(Sub) + b = np.ones((2, 2)).view(Sub) + out = np.ones((2, 2)) + + # make sure out can be any ndarray (not only subclass of inputs) + np.dot(a, b, out=out) + np.matmul(a, b, out=out) + def test_diagonal(self): a = np.arange(12).reshape((3, 4)) assert_equal(a.diagonal(), [0, 5, 10]) @@ -2530,7 +2721,7 @@ b = np.arange(8).reshape((2, 2, 2)).view(MyArray) t = b.trace() - assert isinstance(t, MyArray) + assert_(isinstance(t, MyArray)) def test_put(self): icodes = np.typecodes['AllInteger'] @@ -3121,7 +3312,7 @@ a ** 2 -class TestTemporaryElide(TestCase): +class TestTemporaryElide(object): # elision is only triggered on relatively large arrays def test_extension_incref_elide(self): @@ -3192,7 +3383,7 @@ # only triggers elision code path in debug mode as triggering it in # normal mode needs 256kb large matching dimension, so a lot of memory d = np.ones((2000, 1), dtype=int) - b = np.ones((2000), dtype=np.bool) + b = np.ones((2000), dtype=bool) r = (1 - d) + b assert_equal(r, 1) assert_equal(r.shape, (2000, 2000)) @@ -3223,7 +3414,7 @@ assert_equal(a, 1) -class TestCAPI(TestCase): +class TestCAPI(object): def test_IsPythonScalar(self): from numpy.core.multiarray_tests import IsPythonScalar assert_(IsPythonScalar(b'foobar')) @@ -3233,16 +3424,16 @@ assert_(IsPythonScalar("a")) -class TestSubscripting(TestCase): +class TestSubscripting(object): def test_test_zero_rank(self): x = np.array([1, 2, 3]) - self.assertTrue(isinstance(x[0], np.int_)) + assert_(isinstance(x[0], np.int_)) if sys.version_info[0] < 3: - self.assertTrue(isinstance(x[0], int)) - self.assertTrue(type(x[0, ...]) is np.ndarray) + assert_(isinstance(x[0], int)) + assert_(type(x[0, ...]) is np.ndarray) -class TestPickling(TestCase): +class TestPickling(object): def test_roundtrip(self): import pickle carray = np.array([[2, 9], [7, 0], [3, 8]]) @@ -3308,7 +3499,7 @@ assert_equal(a, p) -class TestFancyIndexing(TestCase): +class TestFancyIndexing(object): def test_list(self): x = np.ones((1, 1)) x[:, [0]] = 2.0 @@ -3362,7 +3553,7 @@ assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]])) -class TestStringCompare(TestCase): +class TestStringCompare(object): def test_string(self): g1 = np.array(["This", "is", "example"]) g2 = np.array(["This", "was", "example"]) @@ -3394,7 +3585,7 @@ assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) -class TestArgmax(TestCase): +class TestArgmax(object): nan_arr = [ ([0, 1, 2, 3, np.nan], 4), @@ -3470,8 +3661,13 @@ def test_combinations(self): for arr, pos in self.nan_arr: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "invalid value encountered in reduce") + max_val = np.max(arr) + assert_equal(np.argmax(arr), pos, err_msg="%r" % arr) - assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr) + assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr) def test_output_shape(self): # see also gh-616 @@ -3523,7 +3719,7 @@ assert_equal(a.argmax(), 1) -class TestArgmin(TestCase): +class TestArgmin(object): nan_arr = [ ([0, 1, 2, 3, np.nan], 4), @@ -3599,8 +3795,13 @@ def test_combinations(self): for arr, pos in self.nan_arr: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "invalid value encountered in reduce") + min_val = np.min(arr) + assert_equal(np.argmin(arr), pos, err_msg="%r" % arr) - assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr) + assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr) def test_minimum_signed_integers(self): @@ -3666,7 +3867,7 @@ assert_equal(a.argmin(), 1) -class TestMinMax(TestCase): +class TestMinMax(object): def test_scalar(self): assert_raises(np.AxisError, np.amax, 1, 1) @@ -3696,14 +3897,14 @@ assert_equal(np.amax(a), a[0]) -class TestNewaxis(TestCase): +class TestNewaxis(object): def test_basic(self): sk = np.array([0, -0.1, 0.1]) res = 250*sk[:, np.newaxis] assert_almost_equal(res.ravel(), 250*sk) -class TestClip(TestCase): +class TestClip(object): def _check_range(self, x, cmin, cmax): assert_(np.all(x >= cmin)) assert_(np.all(x <= cmax)) @@ -3777,7 +3978,7 @@ assert_array_equal(result, expected) -class TestCompress(TestCase): +class TestCompress(object): def test_axis(self): tgt = [[5, 6, 7, 8, 9]] arr = np.arange(10).reshape(2, 5) @@ -3895,7 +4096,7 @@ assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0) -class TestLexsort(TestCase): +class TestLexsort(object): def test_basic(self): a = [1, 2, 1, 3, 1, 5] b = [0, 4, 5, 6, 2, 3] @@ -3942,19 +4143,19 @@ x = np.linspace(0., 1., 42*3).reshape(42, 3) assert_raises(np.AxisError, np.lexsort, x, axis=2) -class TestIO(TestCase): +class TestIO(object): """Test tofile, fromfile, tobytes, and fromstring""" - def setUp(self): + def setup(self): shape = (2, 4, 3) rand = np.random.random - self.x = rand(shape) + rand(shape).astype(np.complex)*1j + self.x = rand(shape) + rand(shape).astype(complex)*1j self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan] self.dtype = self.x.dtype self.tempdir = tempfile.mkdtemp() self.filename = tempfile.mktemp(dir=self.tempdir) - def tearDown(self): + def teardown(self): shutil.rmtree(self.tempdir) def test_nofile(self): @@ -4011,11 +4212,11 @@ def test_roundtrip_binary_str(self): s = self.x.tobytes() - y = np.fromstring(s, dtype=self.dtype) + y = np.frombuffer(s, dtype=self.dtype) assert_array_equal(y, self.x.flat) s = self.x.tobytes('F') - y = np.fromstring(s, dtype=self.dtype) + y = np.frombuffer(s, dtype=self.dtype) assert_array_equal(y, self.x.flatten('F')) def test_roundtrip_str(self): @@ -4043,7 +4244,7 @@ with io.open(self.filename, 'rb', buffering=0) as f: f.seek = fail f.tell = fail - self.assertRaises(IOError, np.fromfile, f, dtype=self.dtype) + assert_raises(IOError, np.fromfile, f, dtype=self.dtype) def test_io_open_unbuffered_fromfile(self): # gh-6632 @@ -4130,7 +4331,10 @@ assert_equal(pos, 10, err_msg=err_msg) def _check_from(self, s, value, **kw): - y = np.fromstring(s, **kw) + if 'sep' not in kw: + y = np.frombuffer(s, **kw) + else: + y = np.fromstring(s, **kw) assert_array_equal(y, value) f = open(self.filename, 'wb') @@ -4264,7 +4468,7 @@ def test_ip_basic(self): for byteorder in ['<', '>']: - for dtype in [float, int, np.complex]: + for dtype in [float, int, complex]: dt = np.dtype(dtype).newbyteorder(byteorder) x = (np.random.random((4, 7))*5).astype(dt) buf = x.tobytes() @@ -4274,8 +4478,8 @@ yield self.tst_basic, b'', np.array([]), {} -class TestFlat(TestCase): - def setUp(self): +class TestFlat(object): + def setup(self): a0 = np.arange(20.0) a = a0.reshape(4, 5) a0.shape = (4, 5) @@ -4311,17 +4515,26 @@ assert_(c.flags.writeable is False) assert_(d.flags.writeable is False) + # for 1.14 all are set to non-writeable on the way to replacing the + # UPDATEIFCOPY array returned for non-contiguous arrays. assert_(e.flags.writeable is True) - assert_(f.flags.writeable is True) - - assert_(c.flags.updateifcopy is False) - assert_(d.flags.updateifcopy is False) - assert_(e.flags.updateifcopy is False) - assert_(f.flags.updateifcopy is True) - assert_(f.base is self.b0) + assert_(f.flags.writeable is False) + with assert_warns(DeprecationWarning): + assert_(c.flags.updateifcopy is False) + with assert_warns(DeprecationWarning): + assert_(d.flags.updateifcopy is False) + with assert_warns(DeprecationWarning): + assert_(e.flags.updateifcopy is False) + with assert_warns(DeprecationWarning): + # UPDATEIFCOPY is removed. + assert_(f.flags.updateifcopy is False) + assert_(c.flags.writebackifcopy is False) + assert_(d.flags.writebackifcopy is False) + assert_(e.flags.writebackifcopy is False) + assert_(f.flags.writebackifcopy is False) -class TestResize(TestCase): +class TestResize(object): def test_basic(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) if IS_PYPY: @@ -4335,7 +4548,7 @@ def test_check_reference(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) y = x - self.assertRaises(ValueError, x.resize, (5, 1)) + assert_raises(ValueError, x.resize, (5, 1)) del y # avoid pyflakes unused variable warning. def test_int_shape(self): @@ -4366,10 +4579,10 @@ assert_equal(x.size, 1) def test_invalid_arguments(self): - self.assertRaises(TypeError, np.eye(3).resize, 'hi') - self.assertRaises(ValueError, np.eye(3).resize, -1) - self.assertRaises(TypeError, np.eye(3).resize, order=1) - self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi') + assert_raises(TypeError, np.eye(3).resize, 'hi') + assert_raises(ValueError, np.eye(3).resize, -1) + assert_raises(TypeError, np.eye(3).resize, order=1) + assert_raises(TypeError, np.eye(3).resize, refcheck='hi') def test_freeform_shape(self): x = np.eye(3) @@ -4399,8 +4612,16 @@ assert_array_equal(a['k'][-5:], 0) assert_array_equal(a['k'][:-5], 1) + def test_empty_view(self): + # check that sizes containing a zero don't trigger a reallocate for + # already empty arrays + x = np.zeros((10, 0), int) + x_view = x[...] + x_view.resize((0, 10)) + x_view.resize((0, 100)) -class TestRecord(TestCase): + +class TestRecord(object): def test_field_rename(self): dt = np.dtype([('f', float), ('i', int)]) dt.names = ['p', 'q'] @@ -4421,7 +4642,7 @@ assert_raises(TypeError, np.dtype, [(('b', b'a'), int)]) dt = np.dtype([((b'a', 'b'), int)]) - assert_raises(ValueError, dt.__getitem__, b'a') + assert_raises(TypeError, dt.__getitem__, b'a') x = np.array([(1,), (2,), (3,)], dtype=dt) assert_raises(IndexError, x.__getitem__, b'a') @@ -4498,8 +4719,6 @@ b[fn2] = 3 with suppress_warnings() as sup: sup.filter(FutureWarning, - "Assignment between structured arrays.*") - sup.filter(FutureWarning, "Numpy has detected that you .*") assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) @@ -4513,6 +4732,7 @@ view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])] assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,))) + # non-ascii unicode field indexing is well behaved if not is_py3: raise SkipTest('non ascii unicode field indexing skipped; ' @@ -4564,10 +4784,6 @@ assert_equal(collect_warnings(c[['f0', 'f2']].view, 'i8,i8'), [FutureWarning]) - # make sure assignment using a different dtype warns - a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')]) - b = np.zeros(2, dtype=[('b', 'i4'), ('a', 'i4')]) - assert_equal(collect_warnings(a.__setitem__, (), b), [FutureWarning]) def test_record_hash(self): a = np.array([(1, 2), (1, 2)], dtype='i1,i2') @@ -4576,14 +4792,14 @@ b.flags.writeable = False c = np.array([(1, 2), (3, 4)], dtype='i1,i2') c.flags.writeable = False - self.assertTrue(hash(a[0]) == hash(a[1])) - self.assertTrue(hash(a[0]) == hash(b[0])) - self.assertTrue(hash(a[0]) != hash(b[1])) - self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0]) + assert_(hash(a[0]) == hash(a[1])) + assert_(hash(a[0]) == hash(b[0])) + assert_(hash(a[0]) != hash(b[1])) + assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0]) def test_record_no_hash(self): a = np.array([(1, 2), (1, 2)], dtype='i1,i2') - self.assertRaises(TypeError, hash, a[0]) + assert_raises(TypeError, hash, a[0]) def test_empty_structure_creation(self): # make sure these do not raise errors (gh-5631) @@ -4592,7 +4808,7 @@ np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [], 'offsets': [], 'itemsize': 12}) -class TestView(TestCase): +class TestView(object): def test_basic(self): x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype=[('r', np.int8), ('g', np.int8), @@ -4617,11 +4833,11 @@ return a.std(**args) -class TestStats(TestCase): +class TestStats(object): funcs = [_mean, _var, _std] - def setUp(self): + def setup(self): np.random.seed(range(3)) self.rmat = np.random.random((4, 5)) self.cmat = self.rmat + 1j * self.rmat @@ -4787,7 +5003,7 @@ def test_mean_float16(self): # This fail if the sum inside mean is done in float16 instead # of float32. - assert _mean(np.ones(100000, dtype='float16')) == 1 + assert_(_mean(np.ones(100000, dtype='float16')) == 1) def test_var_values(self): for mat in [self.rmat, self.cmat, self.omat]: @@ -4824,7 +5040,7 @@ res = dat.var(1) assert_(res.info == dat.info) -class TestVdot(TestCase): +class TestVdot(object): def test_basic(self): dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] dt_complex = np.typecodes['Complex'] @@ -4846,7 +5062,7 @@ assert_equal(np.vdot(b, b), 3) # test boolean - b = np.eye(3, dtype=np.bool) + b = np.eye(3, dtype=bool) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), True) @@ -4884,8 +5100,8 @@ np.vdot(a.flatten(), b.flatten())) -class TestDot(TestCase): - def setUp(self): +class TestDot(object): + def setup(self): np.random.seed(128) self.A = np.random.rand(4, 2) self.b1 = np.random.rand(2, 1) @@ -5162,7 +5378,7 @@ assert_dot_close(A_f_12, X_f_2, desired) -class MatmulCommon(): +class MatmulCommon(object): """Common tests for '@' operator and numpy.matmul. Do not derive from TestCase to avoid nose running it. @@ -5357,23 +5573,23 @@ assert_equal(res, tgt12_21) -class TestMatmul(MatmulCommon, TestCase): +class TestMatmul(MatmulCommon): matmul = np.matmul def test_out_arg(self): - a = np.ones((2, 2), dtype=np.float) - b = np.ones((2, 2), dtype=np.float) - tgt = np.full((2,2), 2, dtype=np.float) + a = np.ones((2, 2), dtype=float) + b = np.ones((2, 2), dtype=float) + tgt = np.full((2,2), 2, dtype=float) # test as positional argument msg = "out positional argument" - out = np.zeros((2, 2), dtype=np.float) + out = np.zeros((2, 2), dtype=float) self.matmul(a, b, out) assert_array_equal(out, tgt, err_msg=msg) # test as keyword argument msg = "out keyword argument" - out = np.zeros((2, 2), dtype=np.float) + out = np.zeros((2, 2), dtype=float) self.matmul(a, b, out=out) assert_array_equal(out, tgt, err_msg=msg) @@ -5396,13 +5612,13 @@ # test out non-contiguous # msg = "out argument with non-contiguous layout" - # c = np.zeros((2, 2, 2), dtype=np.float) + # c = np.zeros((2, 2, 2), dtype=float) # self.matmul(a, b, out=c[..., 0]) # assert_array_equal(c, tgt, err_msg=msg) if sys.version_info[:2] >= (3, 5): - class TestMatmulOperator(MatmulCommon, TestCase): + class TestMatmulOperator(MatmulCommon): import operator matmul = operator.matmul @@ -5437,7 +5653,7 @@ assert_raises(TypeError, exec_, "a @= b", globals(), locals()) -class TestInner(TestCase): +class TestInner(object): def test_inner_type_mismatch(self): c = 1. @@ -5530,46 +5746,26 @@ assert_equal(np.inner(b, a).transpose(2,3,0,1), desired) -class TestSummarization(TestCase): - def test_1d(self): - A = np.arange(1001) - strA = '[ 0 1 2 ..., 998 999 1000]' - assert_(str(A) == strA) - - reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' - assert_(repr(A) == reprA) - - def test_2d(self): - A = np.arange(1002).reshape(2, 501) - strA = '[[ 0 1 2 ..., 498 499 500]\n' \ - ' [ 501 502 503 ..., 999 1000 1001]]' - assert_(str(A) == strA) - - reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ - ' [ 501, 502, 503, ..., 999, 1000, 1001]])' - assert_(repr(A) == reprA) - - -class TestAlen(TestCase): +class TestAlen(object): def test_basic(self): m = np.array([1, 2, 3]) - self.assertEqual(np.alen(m), 3) + assert_equal(np.alen(m), 3) m = np.array([[1, 2, 3], [4, 5, 7]]) - self.assertEqual(np.alen(m), 2) + assert_equal(np.alen(m), 2) m = [1, 2, 3] - self.assertEqual(np.alen(m), 3) + assert_equal(np.alen(m), 3) m = [[1, 2, 3], [4, 5, 7]] - self.assertEqual(np.alen(m), 2) + assert_equal(np.alen(m), 2) def test_singleton(self): - self.assertEqual(np.alen(5), 1) + assert_equal(np.alen(5), 1) -class TestChoose(TestCase): - def setUp(self): +class TestChoose(object): + def setup(self): self.x = 2*np.ones((3,), dtype=int) self.y = 3*np.ones((3,), dtype=int) self.x2 = 2*np.ones((2, 3), dtype=int) @@ -5589,8 +5785,8 @@ assert_equal(A, [[2, 2, 3], [2, 2, 3]]) -class TestRepeat(TestCase): - def setUp(self): +class TestRepeat(object): + def setup(self): self.m = np.array([1, 2, 3, 4, 5, 6]) self.m_rect = self.m.reshape((2, 3)) @@ -5630,7 +5826,7 @@ NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} -class TestNeighborhoodIter(TestCase): +class TestNeighborhoodIter(object): # Simple, 2d tests def _test_simple2d(self, dt): # Test zero and one padding for simple data type @@ -5660,7 +5856,7 @@ assert_array_equal(l, r) def test_simple2d(self): - self._test_simple2d(np.float) + self._test_simple2d(float) def test_simple2d_object(self): self._test_simple2d(Decimal) @@ -5676,7 +5872,7 @@ assert_array_equal(l, r) def test_mirror2d(self): - self._test_mirror2d(np.float) + self._test_mirror2d(float) def test_mirror2d_object(self): self._test_mirror2d(Decimal) @@ -5698,7 +5894,7 @@ assert_array_equal(l, r) def test_simple_float(self): - self._test_simple(np.float) + self._test_simple(float) def test_simple_object(self): self._test_simple(Decimal) @@ -5709,11 +5905,11 @@ r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror']) - self.assertTrue([i.dtype == dt for i in l]) + assert_([i.dtype == dt for i in l]) assert_array_equal(l, r) def test_mirror(self): - self._test_mirror(np.float) + self._test_mirror(float) def test_mirror_object(self): self._test_mirror(Decimal) @@ -5727,13 +5923,13 @@ assert_array_equal(l, r) def test_circular(self): - self._test_circular(np.float) + self._test_circular(float) def test_circular_object(self): self._test_circular(Decimal) # Test stacking neighborhood iterators -class TestStackedNeighborhoodIter(TestCase): +class TestStackedNeighborhoodIter(object): # Simple, 1d test: stacking 2 constant-padded neigh iterators def test_simple_const(self): dt = np.float64 @@ -6326,6 +6522,19 @@ shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS']) assert_(strides[-1] == 8) + def test_out_of_order_fields(self): + dt = np.dtype(dict( + formats=['= (3, 4): + assert_raises(TypeError, '{:30}'.format, a) + else: + with suppress_warnings() as sup: + sup.filter(PendingDeprecationWarning) + res = '{:30}'.format(a) + dst = object.__format__(a, '30') + assert_equal(res, dst) -class TestCTypes(TestCase): + +class TestCTypes(object): def test_ctypes_is_available(self): test_arr = np.array([[1, 2, 3], [4, 5, 6]]) - self.assertEqual(ctypes, test_arr.ctypes._ctypes) + assert_equal(ctypes, test_arr.ctypes._ctypes) assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) def test_ctypes_is_not_available(self): @@ -6905,13 +7192,143 @@ try: test_arr = np.array([[1, 2, 3], [4, 5, 6]]) - self.assertIsInstance( - test_arr.ctypes._ctypes, _internal._missing_ctypes) + assert_(isinstance(test_arr.ctypes._ctypes, + _internal._missing_ctypes)) assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) finally: _internal.ctypes = ctypes +class TestWritebackIfCopy(TestCase): + # all these tests use the WRITEBACKIFCOPY mechanism + def test_argmax_with_out(self): + mat = np.eye(5) + out = np.empty(5, dtype='i2') + res = np.argmax(mat, 0, out=out) + assert_equal(res, range(5)) + + def test_argmin_with_out(self): + mat = -np.eye(5) + out = np.empty(5, dtype='i2') + res = np.argmin(mat, 0, out=out) + assert_equal(res, range(5)) + + def test_clip_with_out(self): + mat = np.eye(5) + out = np.eye(5, dtype='i2') + res = np.clip(mat, a_min=-10, a_max=0, out=out) + assert_equal(np.sum(out), 0) + + def test_insert_noncontiguous(self): + a = np.arange(6).reshape(2,3).T # force non-c-contiguous + # uses arr_insert + np.place(a, a>2, [44, 55]) + assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]])) + + def test_put_noncontiguous(self): + a = np.arange(6).reshape(2,3).T # force non-c-contiguous + np.put(a, [0, 2], [44, 55]) + assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]])) + + def test_putmask_noncontiguous(self): + a = np.arange(6).reshape(2,3).T # force non-c-contiguous + # uses arr_putmask + np.putmask(a, a>2, a**2) + assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]])) + + def test_take_mode_raise(self): + a = np.arange(6, dtype='int') + out = np.empty(2, dtype='int') + np.take(a, [0, 2], out=out, mode='raise') + assert_equal(out, np.array([0, 2])) + + def test_choose_mod_raise(self): + a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) + out = np.empty((3,3), dtype='int') + choices = [-10, 10] + np.choose(a, choices, out=out, mode='raise') + assert_equal(out, np.array([[ 10, -10, 10], + [-10, 10, -10], + [ 10, -10, 10]])) + + def test_flatiter__array__(self): + a = np.arange(9).reshape(3,3) + b = a.T.flat + c = b.__array__() + # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics + del c + + def test_dot_out(self): + # if HAVE_CBLAS, will use WRITEBACKIFCOPY + a = np.arange(9, dtype=float).reshape(3,3) + b = np.dot(a, a, out=a) + assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]])) + + def test_view_assign(self): + from numpy.core.multiarray_tests import npy_create_writebackifcopy, npy_resolve + + arr = np.arange(9).reshape(3, 3).T + arr_wb = npy_create_writebackifcopy(arr) + assert_(arr_wb.flags.writebackifcopy) + assert_(arr_wb.base is arr) + arr_wb[...] = -100 + npy_resolve(arr_wb) + # arr changes after resolve, even though we assigned to arr_wb + assert_equal(arr, -100) + # after resolve, the two arrays no longer reference each other + assert_(arr_wb.ctypes.data != 0) + assert_equal(arr_wb.base, None) + # assigning to arr_wb does not get transfered to arr + arr_wb[...] = 100 + assert_equal(arr, -100) + + + def test_view_discard_refcount(self): + from numpy.core.multiarray_tests import npy_create_writebackifcopy, npy_discard + + arr = np.arange(9).reshape(3, 3).T + orig = arr.copy() + if HAS_REFCOUNT: + arr_cnt = sys.getrefcount(arr) + arr_wb = npy_create_writebackifcopy(arr) + assert_(arr_wb.flags.writebackifcopy) + assert_(arr_wb.base is arr) + arr_wb[...] = -100 + npy_discard(arr_wb) + # arr remains unchanged after discard + assert_equal(arr, orig) + # after discard, the two arrays no longer reference each other + assert_(arr_wb.ctypes.data != 0) + assert_equal(arr_wb.base, None) + if HAS_REFCOUNT: + assert_equal(arr_cnt, sys.getrefcount(arr)) + # assigning to arr_wb does not get transfered to arr + arr_wb[...] = 100 + assert_equal(arr, orig) + + +class TestArange(object): + def test_infinite(self): + assert_raises_regex( + ValueError, "size exceeded", + np.arange, 0, np.inf + ) + + def test_nan_step(self): + assert_raises_regex( + ValueError, "cannot compute length", + np.arange, 0, 1, np.nan + ) + + def test_zero_step(self): + assert_raises(ZeroDivisionError, np.arange, 0, 10, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0) + + # empty range + assert_raises(ZeroDivisionError, np.arange, 0, 0, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0) + + def test_orderconverter_with_nonASCII_unicode_ordering(): # gh-7475 a = np.arange(5) @@ -6944,5 +7361,46 @@ assert_equal(array != my_always_equal, 'ne') +def test_npymath_complex(): + # Smoketest npymath functions + from numpy.core.multiarray_tests import ( + npy_cabs, npy_carg) + + funcs = {npy_cabs: np.absolute, + npy_carg: np.angle} + vals = (1, np.inf, -np.inf, np.nan) + types = (np.complex64, np.complex128, np.clongdouble) + + for fun, npfun in funcs.items(): + for x, y in itertools.product(vals, vals): + for t in types: + z = t(complex(x, y)) + got = fun(z) + expected = npfun(z) + assert_allclose(got, expected) + + +def test_npymath_real(): + # Smoketest npymath functions + from numpy.core.multiarray_tests import ( + npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh) + + funcs = {npy_log10: np.log10, + npy_cosh: np.cosh, + npy_sinh: np.sinh, + npy_tan: np.tan, + npy_tanh: np.tanh} + vals = (1, np.inf, -np.inf, np.nan) + types = (np.float32, np.float64, np.longdouble) + + with np.errstate(all='ignore'): + for fun, npfun in funcs.items(): + for x, t in itertools.product(vals, types): + z = t(x) + got = fun(z) + expected = npfun(z) + assert_allclose(got, expected) + + if __name__ == "__main__": run_module_suite() diff -Nru python-numpy-1.13.3/numpy/core/tests/test_nditer.py python-numpy-1.14.5/numpy/core/tests/test_nditer.py --- python-numpy-1.13.3/numpy/core/tests/test_nditer.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_nditer.py 2018-06-12 18:28:52.000000000 +0000 @@ -1816,100 +1816,45 @@ if HAS_REFCOUNT: assert_equal(sys.getrefcount(a[0]), rc) - # struct type -> simple (takes the first value) - sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] - a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) + # single-field struct type -> simple + sdt = [('a', 'f4')] + a = np.array([(5.5,), (8,)], dtype=sdt) i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], casting='unsafe', op_dtypes='i4') assert_equal([x_[()] for x_ in i], [5, 8]) + # make sure multi-field struct type -> simple doesn't work + sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) + assert_raises(ValueError, lambda: ( + nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes='i4'))) + # struct type -> struct type (field-wise copy) sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) - # New in 1.12: This behavior changes in 1.13, test for dep warning - with assert_warns(FutureWarning): - i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], - casting='unsafe', - op_dtypes=sdt2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) assert_equal(i[0].dtype, np.dtype(sdt2)) assert_equal([np.array(x_) for x_ in i], - [np.array((3, 1, 2), dtype=sdt2), - np.array((6, 4, 5), dtype=sdt2)]) + [np.array((1, 2, 3), dtype=sdt2), + np.array((4, 5, 6), dtype=sdt2)]) - # struct type -> struct type (field gets discarded) + # make sure struct type -> struct type with different + # number of fields fails sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] sdt2 = [('b', 'O'), ('a', 'f8')] a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) - # New in 1.12: This behavior changes in 1.13, test for dep warning - with assert_warns(FutureWarning): - i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - vals = [] - for x in i: - vals.append(np.array(x)) - x['a'] = x['b']+3 - assert_equal(vals, [np.array((2, 1), dtype=sdt2), - np.array((5, 4), dtype=sdt2)]) - assert_equal(a, np.array([(5, 2, None), (8, 5, None)], dtype=sdt1)) - # struct type -> struct type (structured field gets discarded) - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'i4')])] - sdt2 = [('b', 'O'), ('a', 'f8')] - a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1) - # New in 1.12: This behavior changes in 1.13, test for dep warning - with assert_warns(FutureWarning): - i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - vals = [] - for x in i: - vals.append(np.array(x)) - x['a'] = x['b']+3 - assert_equal(vals, [np.array((2, 1), dtype=sdt2), - np.array((5, 4), dtype=sdt2)]) - assert_equal(a, np.array([(5, 2, (0, 0)), (8, 5, (0, 0))], dtype=sdt1)) + assert_raises(ValueError, lambda : ( + nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2))) - # struct type -> struct type (structured field w/ ref gets discarded) - sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])] - sdt2 = [('b', 'O'), ('a', 'f8')] - a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1) - # New in 1.12: This behavior changes in 1.13, test for dep warning - with assert_warns(FutureWarning): - i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - vals = [] - for x in i: - vals.append(np.array(x)) - x['a'] = x['b']+3 - assert_equal(vals, [np.array((2, 1), dtype=sdt2), - np.array((5, 4), dtype=sdt2)]) - assert_equal(a, np.array([(5, 2, (0, None)), (8, 5, (0, None))], dtype=sdt1)) - - # struct type -> struct type back (structured field w/ ref gets discarded) - sdt1 = [('b', 'O'), ('a', 'f8')] - sdt2 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])] - a = np.array([(1, 2), (4, 5)], dtype=sdt1) - # New in 1.12: This behavior changes in 1.13, test for dep warning - with assert_warns(FutureWarning): - i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], - casting='unsafe', - op_dtypes=sdt2) - assert_equal(i[0].dtype, np.dtype(sdt2)) - vals = [] - for x in i: - vals.append(np.array(x)) - assert_equal(x['d'], np.array((0, None), dtype=[('a', 'i2'), ('b', 'O')])) - x['a'] = x['b']+3 - assert_equal(vals, [np.array((2, 1, (0, None)), dtype=sdt2), - np.array((5, 4, (0, None)), dtype=sdt2)]) - assert_equal(a, np.array([(1, 4), (4, 7)], dtype=sdt1)) def test_iter_buffered_cast_subarray(): # Tests buffering of subarrays @@ -2145,7 +2090,7 @@ op_flags = [('readonly',), ('readwrite', 'allocate')] op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] # wrong dtype to force buffering - op_dtypes = [np.float, a.dtype] + op_dtypes = [float, a.dtype] def get_params(): for xs in range(-3**2, 3**2 + 1): @@ -2201,172 +2146,197 @@ assert_raises(ValueError, nditer, [a, b, c], [], [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) -def test_iter_nested_iters_basic(): - # Test nested iteration basic usage - a = arange(12).reshape(2, 3, 2) - - i, j = np.nested_iters(a, [[0], [1, 2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[0, 1], [2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - - i, j = np.nested_iters(a, [[0, 2], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - -def test_iter_nested_iters_reorder(): - # Test nested iteration basic usage - a = arange(12).reshape(2, 3, 2) - - # In 'K' order (default), it gets reordered - i, j = np.nested_iters(a, [[0], [2, 1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[1, 0], [2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - - i, j = np.nested_iters(a, [[2, 0], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - # In 'C' order, it doesn't - i, j = np.nested_iters(a, [[0], [2, 1]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) - - i, j = np.nested_iters(a, [[1, 0], [2]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) - - i, j = np.nested_iters(a, [[2, 0], [1]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) - -def test_iter_nested_iters_flip_axes(): - # Test nested iteration with negative axes - a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] - - # In 'K' order (default), the axes all get flipped - i, j = np.nested_iters(a, [[0], [1, 2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[0, 1], [2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) - - i, j = np.nested_iters(a, [[0, 2], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - # In 'C' order, flipping axes is disabled - i, j = np.nested_iters(a, [[0], [1, 2]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) - i, j = np.nested_iters(a, [[0, 1], [2]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) +class TestIterNested(object): - i, j = np.nested_iters(a, [[0, 2], [1]], order='C') - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) + def test_basic(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + def test_reorder(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + # In 'K' order (default), it gets reordered + i, j = np.nested_iters(a, [[0], [2, 1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, it doesn't + i, j = np.nested_iters(a, [[0], [2, 1]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) + + def test_flip_axes(self): + # Test nested iteration with negative axes + a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] + + # In 'K' order (default), the axes all get flipped + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, flipping axes is disabled + i, j = np.nested_iters(a, [[0], [1, 2]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) + + i, j = np.nested_iters(a, [[0, 1], [2]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) + + i, j = np.nested_iters(a, [[0, 2], [1]], order='C') + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) + + def test_broadcast(self): + # Test nested iteration with broadcasting + a = arange(2).reshape(2, 1) + b = arange(3).reshape(1, 3) + + i, j = np.nested_iters([a, b], [[0], [1]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) + + i, j = np.nested_iters([a, b], [[1], [0]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) + + def test_dtype_copy(self): + # Test nested iteration with a copy to change dtype + + # copy + a = arange(6, dtype='i4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readonly', 'copy'], + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) + vals = None + + # updateifcopy + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + i, j, x, y = (None,)*4 # force the updateifcopy + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_dtype_buffered(self): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_0d(self): + a = np.arange(12).reshape(2, 3, 2) + i, j = np.nested_iters(a, [[], [1, 0, 2]]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0, 2], []]) + vals = [] + for x in i: + vals.append([y for y in j]) + assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) + + i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) + vals = [] + for x in i: + for y in j: + vals.append([z for z in k]) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) -def test_iter_nested_iters_broadcast(): - # Test nested iteration with broadcasting - a = arange(2).reshape(2, 1) - b = arange(3).reshape(1, 3) - - i, j = np.nested_iters([a, b], [[0], [1]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) - - i, j = np.nested_iters([a, b], [[1], [0]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) - -def test_iter_nested_iters_dtype_copy(): - # Test nested iteration with a copy to change dtype - - # copy - a = arange(6, dtype='i4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - op_flags=['readonly', 'copy'], - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) - vals = None - - # updateifcopy - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - op_flags=['readwrite', 'updateifcopy'], - casting='same_kind', - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[0, 1, 2], [3, 4, 5]]) - i, j, x, y = (None,)*4 # force the updateifcopy - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) - -def test_iter_nested_iters_dtype_buffered(): - # Test nested iteration with buffering to change dtype - - a = arange(6, dtype='f4').reshape(2, 3) - i, j = np.nested_iters(a, [[0], [1]], - flags=['buffered'], - op_flags=['readwrite'], - casting='same_kind', - op_dtypes='f8') - assert_equal(j[0].dtype, np.dtype('f8')) - for x in i: - for y in j: - y[...] += 1 - assert_equal(a, [[1, 2, 3], [4, 5, 6]]) def test_iter_reduction_error(): @@ -2641,7 +2611,7 @@ del it[1:2] except TypeError: pass - except: + except Exception: raise AssertionError def test_iter_allocated_array_dtypes(): @@ -2694,28 +2664,6 @@ assert_equal(vals['d'], 0.5) -def test_0d_nested_iter(): - a = np.arange(12).reshape(2, 3, 2) - i, j = np.nested_iters(a, [[], [1, 0, 2]]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) - - i, j = np.nested_iters(a, [[1, 0, 2], []]) - vals = [] - for x in i: - vals.append([y for y in j]) - assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) - - i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) - vals = [] - for x in i: - for y in j: - vals.append([z for z in k]) - assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) - - def test_iter_too_large(): # The total size of the iterator must not exceed the maximum intp due # to broadcasting. Dividing by 1024 will keep it small enough to diff -Nru python-numpy-1.13.3/numpy/core/tests/test_numeric.py python-numpy-1.14.5/numpy/core/tests/test_numeric.py --- python-numpy-1.13.3/numpy/core/tests/test_numeric.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_numeric.py 2018-06-12 18:28:52.000000000 +0000 @@ -10,13 +10,13 @@ from numpy.core import umath from numpy.random import rand, randint, randn from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal, assert_raises, + run_module_suite, assert_, assert_equal, assert_raises, assert_raises_regex, assert_array_equal, assert_almost_equal, assert_array_almost_equal, dec, HAS_REFCOUNT, suppress_warnings ) -class TestResize(TestCase): +class TestResize(object): def test_copies(self): A = np.array([[1, 2], [3, 4]]) Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) @@ -34,6 +34,12 @@ assert_array_equal(Ar, np.array([])) assert_equal(A.dtype, Ar.dtype) + Ar = np.resize(A, (0, 2)) + assert_equal(Ar.shape, (0, 2)) + + Ar = np.resize(A, (2, 0)) + assert_equal(Ar.shape, (2, 0)) + def test_reshape_from_zero(self): # See also gh-6740 A = np.zeros(0, dtype=[('a', np.float32, 1)]) @@ -42,7 +48,7 @@ assert_equal(A.dtype, Ar.dtype) -class TestNonarrayArgs(TestCase): +class TestNonarrayArgs(object): # check that non-array arguments to functions wrap them in arrays def test_choose(self): choices = [[0, 1, 2], @@ -202,45 +208,61 @@ assert_(w[0].category is RuntimeWarning) -class TestBoolScalar(TestCase): +class TestIsscalar(object): + def test_isscalar(self): + assert_(np.isscalar(3.1)) + assert_(np.isscalar(np.int16(12345))) + assert_(np.isscalar(False)) + assert_(np.isscalar('numpy')) + assert_(not np.isscalar([3.1])) + assert_(not np.isscalar(None)) + + # PEP 3141 + from fractions import Fraction + assert_(np.isscalar(Fraction(5, 17))) + from numbers import Number + assert_(np.isscalar(Number())) + + +class TestBoolScalar(object): def test_logical(self): f = np.False_ t = np.True_ s = "xyz" - self.assertTrue((t and s) is s) - self.assertTrue((f and s) is f) + assert_((t and s) is s) + assert_((f and s) is f) def test_bitwise_or(self): f = np.False_ t = np.True_ - self.assertTrue((t | t) is t) - self.assertTrue((f | t) is t) - self.assertTrue((t | f) is t) - self.assertTrue((f | f) is f) + assert_((t | t) is t) + assert_((f | t) is t) + assert_((t | f) is t) + assert_((f | f) is f) def test_bitwise_and(self): f = np.False_ t = np.True_ - self.assertTrue((t & t) is t) - self.assertTrue((f & t) is f) - self.assertTrue((t & f) is f) - self.assertTrue((f & f) is f) + assert_((t & t) is t) + assert_((f & t) is f) + assert_((t & f) is f) + assert_((f & f) is f) def test_bitwise_xor(self): f = np.False_ t = np.True_ - self.assertTrue((t ^ t) is f) - self.assertTrue((f ^ t) is t) - self.assertTrue((t ^ f) is t) - self.assertTrue((f ^ f) is f) + assert_((t ^ t) is f) + assert_((f ^ t) is t) + assert_((t ^ f) is t) + assert_((f ^ f) is f) -class TestBoolArray(TestCase): - def setUp(self): +class TestBoolArray(object): + def setup(self): # offset for simd tests - self.t = np.array([True] * 41, dtype=np.bool)[1::] - self.f = np.array([False] * 41, dtype=np.bool)[1::] - self.o = np.array([False] * 42, dtype=np.bool)[2::] + self.t = np.array([True] * 41, dtype=bool)[1::] + self.f = np.array([False] * 41, dtype=bool)[1::] + self.o = np.array([False] * 42, dtype=bool)[2::] self.nm = self.f.copy() self.im = self.t.copy() self.nm[3] = True @@ -249,31 +271,31 @@ self.im[-2] = False def test_all_any(self): - self.assertTrue(self.t.all()) - self.assertTrue(self.t.any()) - self.assertFalse(self.f.all()) - self.assertFalse(self.f.any()) - self.assertTrue(self.nm.any()) - self.assertTrue(self.im.any()) - self.assertFalse(self.nm.all()) - self.assertFalse(self.im.all()) + assert_(self.t.all()) + assert_(self.t.any()) + assert_(not self.f.all()) + assert_(not self.f.any()) + assert_(self.nm.any()) + assert_(self.im.any()) + assert_(not self.nm.all()) + assert_(not self.im.all()) # check bad element in all positions for i in range(256 - 7): - d = np.array([False] * 256, dtype=np.bool)[7::] + d = np.array([False] * 256, dtype=bool)[7::] d[i] = True - self.assertTrue(np.any(d)) - e = np.array([True] * 256, dtype=np.bool)[7::] + assert_(np.any(d)) + e = np.array([True] * 256, dtype=bool)[7::] e[i] = False - self.assertFalse(np.all(e)) + assert_(not np.all(e)) assert_array_equal(e, ~d) # big array test for blocked libc loops for i in list(range(9, 6000, 507)) + [7764, 90021, -10]: - d = np.array([False] * 100043, dtype=np.bool) + d = np.array([False] * 100043, dtype=bool) d[i] = True - self.assertTrue(np.any(d), msg="%r" % i) - e = np.array([True] * 100043, dtype=np.bool) + assert_(np.any(d), msg="%r" % i) + e = np.array([True] * 100043, dtype=bool) e[i] = False - self.assertFalse(np.all(e), msg="%r" % i) + assert_(not np.all(e), msg="%r" % i) def test_logical_not_abs(self): assert_array_equal(~self.t, self.f) @@ -322,12 +344,12 @@ assert_array_equal(self.im ^ False, self.im) -class TestBoolCmp(TestCase): - def setUp(self): +class TestBoolCmp(object): + def setup(self): self.f = np.ones(256, dtype=np.float32) - self.ef = np.ones(self.f.size, dtype=np.bool) + self.ef = np.ones(self.f.size, dtype=bool) self.d = np.ones(128, dtype=np.float64) - self.ed = np.ones(self.d.size, dtype=np.bool) + self.ed = np.ones(self.d.size, dtype=bool) # generate values for all permutation of 256bit simd vectors s = 0 for i in range(32): @@ -422,28 +444,28 @@ assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) -class TestSeterr(TestCase): +class TestSeterr(object): def test_default(self): err = np.geterr() - self.assertEqual(err, dict( - divide='warn', - invalid='warn', - over='warn', - under='ignore', - )) + assert_equal(err, + dict(divide='warn', + invalid='warn', + over='warn', + under='ignore') + ) def test_set(self): with np.errstate(): err = np.seterr() old = np.seterr(divide='print') - self.assertTrue(err == old) + assert_(err == old) new = np.seterr() - self.assertTrue(new['divide'] == 'print') + assert_(new['divide'] == 'print') np.seterr(over='raise') - self.assertTrue(np.geterr()['over'] == 'raise') - self.assertTrue(new['divide'] == 'print') + assert_(np.geterr()['over'] == 'raise') + assert_(new['divide'] == 'print') np.seterr(**old) - self.assertTrue(np.geterr() == old) + assert_(np.geterr() == old) @dec.skipif(platform.machine() == "armv5tel", "See gh-413.") def test_divide_err(self): @@ -466,7 +488,7 @@ with np.errstate(divide='warn'): np.seterrobj([20000, 1, None]) np.array([1.]) / np.array([0.]) - self.assertEqual(len(w), 1) + assert_equal(len(w), 1) def log_err(*args): self.called += 1 @@ -477,12 +499,12 @@ with np.errstate(divide='ignore'): np.seterrobj([20000, 3, log_err]) np.array([1.]) / np.array([0.]) - self.assertEqual(self.called, 1) + assert_equal(self.called, 1) np.seterrobj(olderrobj) with np.errstate(divide='ignore'): np.divide(1., 0., extobj=[20000, 3, log_err]) - self.assertEqual(self.called, 2) + assert_equal(self.called, 2) finally: np.seterrobj(olderrobj) del self.called @@ -506,7 +528,7 @@ np.seterrobj(olderrobj) -class TestFloatExceptions(TestCase): +class TestFloatExceptions(object): def assert_raises_fpe(self, fpeerr, flop, x, y): ftype = type(x) try: @@ -590,20 +612,20 @@ warnings.simplefilter("always") with np.errstate(all="warn"): np.divide(1, 0.) - self.assertEqual(len(w), 1) - self.assertTrue("divide by zero" in str(w[0].message)) + assert_equal(len(w), 1) + assert_("divide by zero" in str(w[0].message)) np.array(1e300) * np.array(1e300) - self.assertEqual(len(w), 2) - self.assertTrue("overflow" in str(w[-1].message)) + assert_equal(len(w), 2) + assert_("overflow" in str(w[-1].message)) np.array(np.inf) - np.array(np.inf) - self.assertEqual(len(w), 3) - self.assertTrue("invalid value" in str(w[-1].message)) + assert_equal(len(w), 3) + assert_("invalid value" in str(w[-1].message)) np.array(1e-300) * np.array(1e-300) - self.assertEqual(len(w), 4) - self.assertTrue("underflow" in str(w[-1].message)) + assert_equal(len(w), 4) + assert_("underflow" in str(w[-1].message)) -class TestTypes(TestCase): +class TestTypes(object): def check_promotion_cases(self, promote_func): # tests that the scalars get coerced correctly. b = np.bool_(0) @@ -794,8 +816,8 @@ def test_can_cast(self): assert_(np.can_cast(np.int32, np.int64)) - assert_(np.can_cast(np.float64, np.complex)) - assert_(not np.can_cast(np.complex, np.float)) + assert_(np.can_cast(np.float64, complex)) + assert_(not np.can_cast(complex, float)) assert_(np.can_cast('i8', 'f8')) assert_(not np.can_cast('i8', 'f4')) @@ -866,13 +888,30 @@ assert_raises(TypeError, np.can_cast, 'i4', None) assert_raises(TypeError, np.can_cast, None, 'i4') + # Also test keyword arguments + assert_(np.can_cast(from_=np.int32, to=np.int64)) + + def test_can_cast_values(self): + # gh-5917 + for dt in np.sctypes['int'] + np.sctypes['uint']: + ii = np.iinfo(dt) + assert_(np.can_cast(ii.min, dt)) + assert_(np.can_cast(ii.max, dt)) + assert_(not np.can_cast(ii.min - 1, dt)) + assert_(not np.can_cast(ii.max + 1, dt)) + + for dt in np.sctypes['float']: + fi = np.finfo(dt) + assert_(np.can_cast(fi.min, dt)) + assert_(np.can_cast(fi.max, dt)) + # Custom exception class to test exception propagation in fromiter class NIterError(Exception): pass -class TestFromiter(TestCase): +class TestFromiter(object): def makegen(self): for x in range(24): yield x**2 @@ -881,25 +920,25 @@ ai32 = np.fromiter(self.makegen(), np.int32) ai64 = np.fromiter(self.makegen(), np.int64) af = np.fromiter(self.makegen(), float) - self.assertTrue(ai32.dtype == np.dtype(np.int32)) - self.assertTrue(ai64.dtype == np.dtype(np.int64)) - self.assertTrue(af.dtype == np.dtype(float)) + assert_(ai32.dtype == np.dtype(np.int32)) + assert_(ai64.dtype == np.dtype(np.int64)) + assert_(af.dtype == np.dtype(float)) def test_lengths(self): expected = np.array(list(self.makegen())) a = np.fromiter(self.makegen(), int) a20 = np.fromiter(self.makegen(), int, 20) - self.assertTrue(len(a) == len(expected)) - self.assertTrue(len(a20) == 20) - self.assertRaises(ValueError, np.fromiter, + assert_(len(a) == len(expected)) + assert_(len(a20) == 20) + assert_raises(ValueError, np.fromiter, self.makegen(), int, len(expected) + 10) def test_values(self): expected = np.array(list(self.makegen())) a = np.fromiter(self.makegen(), int) a20 = np.fromiter(self.makegen(), int, 20) - self.assertTrue(np.alltrue(a == expected, axis=0)) - self.assertTrue(np.alltrue(a20 == expected[:20], axis=0)) + assert_(np.alltrue(a == expected, axis=0)) + assert_(np.alltrue(a20 == expected[:20], axis=0)) def load_data(self, n, eindex): # Utility method for the issue 2592 tests. @@ -912,18 +951,18 @@ def test_2592(self): # Test iteration exceptions are correctly raised. count, eindex = 10, 5 - self.assertRaises(NIterError, np.fromiter, + assert_raises(NIterError, np.fromiter, self.load_data(count, eindex), dtype=int, count=count) def test_2592_edge(self): # Test iter. exceptions, edge case (exception at end of iterator). count = 10 eindex = count-1 - self.assertRaises(NIterError, np.fromiter, + assert_raises(NIterError, np.fromiter, self.load_data(count, eindex), dtype=int, count=count) -class TestNonzero(TestCase): +class TestNonzero(object): def test_nonzero_trivial(self): assert_equal(np.count_nonzero(np.array([])), 0) assert_equal(np.count_nonzero(np.array([], dtype='?')), 0) @@ -975,11 +1014,11 @@ def test_sparse(self): # test special sparse condition boolean code path for i in range(20): - c = np.zeros(200, dtype=np.bool) + c = np.zeros(200, dtype=bool) c[i::20] = True assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20)) - c = np.zeros(400, dtype=np.bool) + c = np.zeros(400, dtype=bool) c[10 + i:20 + i] = True c[20 + i*2] = True assert_equal(np.nonzero(c)[0], @@ -1093,7 +1132,7 @@ rng = np.random.RandomState(1234) m = rng.randint(-100, 100, size=size) - n = m.astype(np.object) + n = m.astype(object) for length in range(len(axis)): for combo in combinations(axis, length): @@ -1103,6 +1142,10 @@ np.count_nonzero(n, axis=perm), err_msg=msg % (perm,)) + def test_countnonzero_axis_empty(self): + a = np.array([[0, 0, 1], [1, 0, 1]]) + assert_equal(np.count_nonzero(a, axis=()), a.astype(bool)) + def test_array_method(self): # Tests that the array method # call to nonzero works @@ -1125,7 +1168,7 @@ assert_raises(ValueError, np.nonzero, np.array([BoolErrors()])) -class TestIndex(TestCase): +class TestIndex(object): def test_boolean(self): a = rand(3, 5, 8) V = rand(5, 8) @@ -1142,7 +1185,7 @@ assert_equal(c.dtype, np.dtype('int32')) -class TestBinaryRepr(TestCase): +class TestBinaryRepr(object): def test_zero(self): assert_equal(np.binary_repr(0), '0') @@ -1179,7 +1222,7 @@ assert_equal(np.binary_repr(num, width=width), exp) -class TestBaseRepr(TestCase): +class TestBaseRepr(object): def test_base3(self): assert_equal(np.base_repr(3**5, 3), '100000') @@ -1195,13 +1238,13 @@ assert_equal(np.base_repr(-12, 4), '-30') def test_base_range(self): - with self.assertRaises(ValueError): + with assert_raises(ValueError): np.base_repr(1, 1) - with self.assertRaises(ValueError): + with assert_raises(ValueError): np.base_repr(1, 37) -class TestArrayComparisons(TestCase): +class TestArrayComparisons(object): def test_array_equal(self): res = np.array_equal(np.array([1, 2]), np.array([1, 2])) assert_(res) @@ -1276,13 +1319,13 @@ assert_(x.flags.writeable == y.flags.writeable) assert_(x.flags.c_contiguous == y.flags.c_contiguous) assert_(x.flags.f_contiguous == y.flags.f_contiguous) - assert_(x.flags.updateifcopy == y.flags.updateifcopy) + assert_(x.flags.writebackifcopy == y.flags.writebackifcopy) # check endianness assert_(x.dtype.isnative == y.dtype.isnative) -class TestClip(TestCase): - def setUp(self): +class TestClip(object): + def setup(self): self.nr = 5 self.nc = 3 @@ -1397,7 +1440,7 @@ # Address Issue gh-5354 for clipping complex arrays # Test native complex input without explicit min/max # ie, either min=None or max=None - a = np.ones(10, dtype=np.complex) + a = np.ones(10, dtype=complex) m = a.min() M = a.max() am = self.fastclip(a, m, None) @@ -1708,7 +1751,7 @@ a2 = np.clip(a, m, M, out=a) self.clip(a, m, M, ac) assert_array_strict_equal(a2, ac) - self.assertTrue(a2 is a) + assert_(a2 is a) def test_clip_nan(self): d = np.arange(7.) @@ -1723,10 +1766,10 @@ rtol = 1e-5 atol = 1e-8 - def setUp(self): + def setup(self): self.olderr = np.seterr(invalid='ignore') - def tearDown(self): + def teardown(self): np.seterr(**self.olderr) def tst_allclose(self, x, y): @@ -1937,13 +1980,13 @@ def test_non_finite_scalar(self): # GH7014, when two scalars are compared the output should also be a # scalar - assert_(np.isclose(np.inf, -np.inf) is False) - assert_(np.isclose(0, np.inf) is False) - assert_(type(np.isclose(0, np.inf)) is bool) + assert_(np.isclose(np.inf, -np.inf) is np.False_) + assert_(np.isclose(0, np.inf) is np.False_) + assert_(type(np.isclose(0, np.inf)) is np.bool_) -class TestStdVar(TestCase): - def setUp(self): +class TestStdVar(object): + def setup(self): self.A = np.array([1, -1, 1, -1]) self.real_var = 1 @@ -1981,7 +2024,7 @@ assert_array_equal(r, out) -class TestStdVarComplex(TestCase): +class TestStdVarComplex(object): def test_basic(self): A = np.array([1, 1.j, -1, -1.j]) real_var = 1 @@ -1993,10 +2036,10 @@ assert_equal(np.std(1j), 0) -class TestCreationFuncs(TestCase): +class TestCreationFuncs(object): # Test ones, zeros, empty and full. - def setUp(self): + def setup(self): dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())} # void, bytes, str variable_sized = {tp for tp in dtypes if tp.str.endswith('0')} @@ -2064,10 +2107,10 @@ assert_(sys.getrefcount(dim) == beg) -class TestLikeFuncs(TestCase): +class TestLikeFuncs(object): '''Test ones_like, zeros_like, empty_like and full_like''' - def setUp(self): + def setup(self): self.data = [ # Array scalars (np.array(3.), None), @@ -2182,7 +2225,7 @@ self.check_like_function(np.full_like, np.inf, True) -class TestCorrelate(TestCase): +class TestCorrelate(object): def _setup(self, dt): self.x = np.array([1, 2, 3, 4, 5], dtype=dt) self.xs = np.arange(1, 20)[::3] @@ -2196,7 +2239,7 @@ -102., -54., -19.], dtype=dt) def test_float(self): - self._setup(np.float) + self._setup(float) z = np.correlate(self.x, self.y, 'full') assert_array_almost_equal(z, self.z1) z = np.correlate(self.x, self.y[:-1], 'full') @@ -2225,15 +2268,15 @@ assert_array_equal(k, np.ones(3)) def test_complex(self): - x = np.array([1, 2, 3, 4+1j], dtype=np.complex) - y = np.array([-1, -2j, 3+1j], dtype=np.complex) - r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=np.complex) + x = np.array([1, 2, 3, 4+1j], dtype=complex) + y = np.array([-1, -2j, 3+1j], dtype=complex) + r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex) r_z = r_z[::-1].conjugate() z = np.correlate(y, x, mode='full') assert_array_almost_equal(z, r_z) -class TestConvolve(TestCase): +class TestConvolve(object): def test_object(self): d = [1.] * 100 k = [1.] * 3 @@ -2275,7 +2318,7 @@ assert_equal(str(a), "[1]") -class TestRoll(TestCase): +class TestRoll(object): def test_roll1d(self): x = np.arange(10) xr = np.roll(x, 2) @@ -2333,7 +2376,7 @@ assert_equal(np.roll(x, 1), np.array([])) -class TestRollaxis(TestCase): +class TestRollaxis(object): # expected shape indexed by (axis, start) for array of # shape (1, 2, 3, 4) @@ -2395,7 +2438,7 @@ assert_(not res.flags['OWNDATA']) -class TestMoveaxis(TestCase): +class TestMoveaxis(object): def test_move_to_end(self): x = np.random.randn(5, 6, 7) for source, expected in [(0, (6, 7, 5)), @@ -2469,7 +2512,7 @@ assert_(isinstance(result, np.ndarray)) -class TestCross(TestCase): +class TestCross(object): def test_2x2(self): u = [1, 2] v = [3, 4] @@ -2632,7 +2675,7 @@ yield self.set_and_check_flag, flag, None, a -class TestBroadcast(TestCase): +class TestBroadcast(object): def test_broadcast_in_args(self): # gh-5881 arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)), @@ -2669,7 +2712,7 @@ assert_equal(mit.numiter, j) -class TestKeepdims(TestCase): +class TestKeepdims(object): class sub_array(np.ndarray): def sum(self, axis=None, dtype=None, out=None): @@ -2681,5 +2724,16 @@ assert_raises(TypeError, np.sum, x, keepdims=True) +class TestTensordot(object): + + def test_zero_dimension(self): + # Test resolution to issue #5663 + a = np.ndarray((3,0)) + b = np.ndarray((0,4)) + td = np.tensordot(a, b, (1, 0)) + assert_array_equal(td, np.dot(a, b)) + assert_array_equal(td, np.einsum('ij,jk', a, b)) + + if __name__ == "__main__": run_module_suite() diff -Nru python-numpy-1.13.3/numpy/core/tests/test_numerictypes.py python-numpy-1.14.5/numpy/core/tests/test_numerictypes.py --- python-numpy-1.13.3/numpy/core/tests/test_numerictypes.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_numerictypes.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,10 +1,11 @@ from __future__ import division, absolute_import, print_function import sys +import itertools import numpy as np from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal + run_module_suite, assert_, assert_equal, assert_raises ) # This is the structure of the table used for plain objects: @@ -102,99 +103,99 @@ # Creation tests ############################################################ -class create_zeros(object): +class CreateZeros(object): """Check the creation of heterogeneous arrays zero-valued""" def test_zeros0D(self): """Check creation of 0-dimensional objects""" h = np.zeros((), dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) - self.assertTrue(h.dtype.fields['x'][0].name[:4] == 'void') - self.assertTrue(h.dtype.fields['x'][0].char == 'V') - self.assertTrue(h.dtype.fields['x'][0].type == np.void) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype.fields['x'][0].name[:4] == 'void') + assert_(h.dtype.fields['x'][0].char == 'V') + assert_(h.dtype.fields['x'][0].type == np.void) # A small check that data is ok assert_equal(h['z'], np.zeros((), dtype='u1')) def test_zerosSD(self): """Check creation of single-dimensional objects""" h = np.zeros((2,), dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) - self.assertTrue(h.dtype['y'].name[:4] == 'void') - self.assertTrue(h.dtype['y'].char == 'V') - self.assertTrue(h.dtype['y'].type == np.void) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype['y'].name[:4] == 'void') + assert_(h.dtype['y'].char == 'V') + assert_(h.dtype['y'].type == np.void) # A small check that data is ok assert_equal(h['z'], np.zeros((2,), dtype='u1')) def test_zerosMD(self): """Check creation of multi-dimensional objects""" h = np.zeros((2, 3), dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) - self.assertTrue(h.dtype['z'].name == 'uint8') - self.assertTrue(h.dtype['z'].char == 'B') - self.assertTrue(h.dtype['z'].type == np.uint8) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype['z'].name == 'uint8') + assert_(h.dtype['z'].char == 'B') + assert_(h.dtype['z'].type == np.uint8) # A small check that data is ok assert_equal(h['z'], np.zeros((2, 3), dtype='u1')) -class test_create_zeros_plain(create_zeros, TestCase): +class TestCreateZerosPlain(CreateZeros): """Check the creation of heterogeneous arrays zero-valued (plain)""" _descr = Pdescr -class test_create_zeros_nested(create_zeros, TestCase): +class TestCreateZerosNested(CreateZeros): """Check the creation of heterogeneous arrays zero-valued (nested)""" _descr = Ndescr -class create_values(object): +class CreateValues(object): """Check the creation of heterogeneous arrays with values""" def test_tuple(self): """Check creation from tuples""" h = np.array(self._buffer, dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) if self.multiple_rows: - self.assertTrue(h.shape == (2,)) + assert_(h.shape == (2,)) else: - self.assertTrue(h.shape == ()) + assert_(h.shape == ()) def test_list_of_tuple(self): """Check creation from list of tuples""" h = np.array([self._buffer], dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) if self.multiple_rows: - self.assertTrue(h.shape == (1, 2)) + assert_(h.shape == (1, 2)) else: - self.assertTrue(h.shape == (1,)) + assert_(h.shape == (1,)) def test_list_of_list_of_tuple(self): """Check creation from list of list of tuples""" h = np.array([[self._buffer]], dtype=self._descr) - self.assertTrue(normalize_descr(self._descr) == h.dtype.descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) if self.multiple_rows: - self.assertTrue(h.shape == (1, 1, 2)) + assert_(h.shape == (1, 1, 2)) else: - self.assertTrue(h.shape == (1, 1)) + assert_(h.shape == (1, 1)) -class test_create_values_plain_single(create_values, TestCase): +class TestCreateValuesPlainSingle(CreateValues): """Check the creation of heterogeneous arrays (plain, single row)""" _descr = Pdescr multiple_rows = 0 _buffer = PbufferT[0] -class test_create_values_plain_multiple(create_values, TestCase): +class TestCreateValuesPlainMultiple(CreateValues): """Check the creation of heterogeneous arrays (plain, multiple rows)""" _descr = Pdescr multiple_rows = 1 _buffer = PbufferT -class test_create_values_nested_single(create_values, TestCase): +class TestCreateValuesNestedSingle(CreateValues): """Check the creation of heterogeneous arrays (nested, single row)""" _descr = Ndescr multiple_rows = 0 _buffer = NbufferT[0] -class test_create_values_nested_multiple(create_values, TestCase): +class TestCreateValuesNestedMultiple(CreateValues): """Check the creation of heterogeneous arrays (nested, multiple rows)""" _descr = Ndescr multiple_rows = 1 @@ -205,18 +206,18 @@ # Reading tests ############################################################ -class read_values_plain(object): +class ReadValuesPlain(object): """Check the reading of values in heterogeneous arrays (plain)""" def test_access_fields(self): h = np.array(self._buffer, dtype=self._descr) if not self.multiple_rows: - self.assertTrue(h.shape == ()) + assert_(h.shape == ()) assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) assert_equal(h['y'], np.array(self._buffer[1], dtype='f8')) assert_equal(h['z'], np.array(self._buffer[2], dtype='u1')) else: - self.assertTrue(len(h) == 2) + assert_(len(h) == 2) assert_equal(h['x'], np.array([self._buffer[0][0], self._buffer[1][0]], dtype='i4')) assert_equal(h['y'], np.array([self._buffer[0][1], @@ -225,31 +226,31 @@ self._buffer[1][2]], dtype='u1')) -class test_read_values_plain_single(read_values_plain, TestCase): +class TestReadValuesPlainSingle(ReadValuesPlain): """Check the creation of heterogeneous arrays (plain, single row)""" _descr = Pdescr multiple_rows = 0 _buffer = PbufferT[0] -class test_read_values_plain_multiple(read_values_plain, TestCase): +class TestReadValuesPlainMultiple(ReadValuesPlain): """Check the values of heterogeneous arrays (plain, multiple rows)""" _descr = Pdescr multiple_rows = 1 _buffer = PbufferT -class read_values_nested(object): +class ReadValuesNested(object): """Check the reading of values in heterogeneous arrays (nested)""" def test_access_top_fields(self): """Check reading the top fields of a nested array""" h = np.array(self._buffer, dtype=self._descr) if not self.multiple_rows: - self.assertTrue(h.shape == ()) + assert_(h.shape == ()) assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) assert_equal(h['y'], np.array(self._buffer[4], dtype='f8')) assert_equal(h['z'], np.array(self._buffer[5], dtype='u1')) else: - self.assertTrue(len(h) == 2) + assert_(len(h) == 2) assert_equal(h['x'], np.array([self._buffer[0][0], self._buffer[1][0]], dtype='i4')) assert_equal(h['y'], np.array([self._buffer[0][4], @@ -308,41 +309,41 @@ def test_nested1_descriptor(self): """Check access nested descriptors of a nested array (1st level)""" h = np.array(self._buffer, dtype=self._descr) - self.assertTrue(h.dtype['Info']['value'].name == 'complex128') - self.assertTrue(h.dtype['Info']['y2'].name == 'float64') + assert_(h.dtype['Info']['value'].name == 'complex128') + assert_(h.dtype['Info']['y2'].name == 'float64') if sys.version_info[0] >= 3: - self.assertTrue(h.dtype['info']['Name'].name == 'str256') + assert_(h.dtype['info']['Name'].name == 'str256') else: - self.assertTrue(h.dtype['info']['Name'].name == 'unicode256') - self.assertTrue(h.dtype['info']['Value'].name == 'complex128') + assert_(h.dtype['info']['Name'].name == 'unicode256') + assert_(h.dtype['info']['Value'].name == 'complex128') def test_nested2_descriptor(self): """Check access nested descriptors of a nested array (2nd level)""" h = np.array(self._buffer, dtype=self._descr) - self.assertTrue(h.dtype['Info']['Info2']['value'].name == 'void256') - self.assertTrue(h.dtype['Info']['Info2']['z3'].name == 'void64') + assert_(h.dtype['Info']['Info2']['value'].name == 'void256') + assert_(h.dtype['Info']['Info2']['z3'].name == 'void64') -class test_read_values_nested_single(read_values_nested, TestCase): +class TestReadValuesNestedSingle(ReadValuesNested): """Check the values of heterogeneous arrays (nested, single row)""" _descr = Ndescr multiple_rows = False _buffer = NbufferT[0] -class test_read_values_nested_multiple(read_values_nested, TestCase): +class TestReadValuesNestedMultiple(ReadValuesNested): """Check the values of heterogeneous arrays (nested, multiple rows)""" _descr = Ndescr multiple_rows = True _buffer = NbufferT -class TestEmptyField(TestCase): +class TestEmptyField(object): def test_assign(self): a = np.arange(10, dtype=np.float32) a.dtype = [("int", "<0i4"), ("float", "<2f4")] assert_(a['int'].shape == (5, 0)) assert_(a['float'].shape == (5, 2)) -class TestCommonType(TestCase): +class TestCommonType(object): def test_scalar_loses1(self): res = np.find_common_type(['f4', 'f4', 'i2'], ['f8']) assert_(res == 'f4') @@ -363,19 +364,50 @@ res = np.find_common_type(['u8', 'i8', 'i8'], ['f8']) assert_(res == 'f8') -class TestMultipleFields(TestCase): - def setUp(self): +class TestMultipleFields(object): + def setup(self): self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') def _bad_call(self): return self.ary['f0', 'f1'] def test_no_tuple(self): - self.assertRaises(IndexError, self._bad_call) + assert_raises(IndexError, self._bad_call) def test_return(self): res = self.ary[['f0', 'f2']].tolist() assert_(res == [(1, 3), (5, 7)]) + +class TestIsSubDType(object): + # scalar types can be promoted into dtypes + wrappers = [np.dtype, lambda x: x] + + def test_both_abstract(self): + assert_(np.issubdtype(np.floating, np.inexact)) + assert_(not np.issubdtype(np.inexact, np.floating)) + + def test_same(self): + for cls in (np.float32, np.int32): + for w1, w2 in itertools.product(self.wrappers, repeat=2): + assert_(np.issubdtype(w1(cls), w2(cls))) + + def test_subclass(self): + # note we cannot promote floating to a dtype, as it would turn into a + # concrete type + for w in self.wrappers: + assert_(np.issubdtype(w(np.float32), np.floating)) + assert_(np.issubdtype(w(np.float64), np.floating)) + + def test_subclass_backwards(self): + for w in self.wrappers: + assert_(not np.issubdtype(np.floating, w(np.float32))) + assert_(not np.issubdtype(np.floating, w(np.float64))) + + def test_sibling_class(self): + for w1, w2 in itertools.product(self.wrappers, repeat=2): + assert_(not np.issubdtype(w1(np.float32), w2(np.float64))) + assert_(not np.issubdtype(w1(np.float64), w2(np.float32))) + if __name__ == "__main__": run_module_suite() diff -Nru python-numpy-1.13.3/numpy/core/tests/test_print.py python-numpy-1.14.5/numpy/core/tests/test_print.py --- python-numpy-1.13.3/numpy/core/tests/test_print.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_print.py 2018-06-12 18:28:52.000000000 +0000 @@ -23,19 +23,19 @@ assert_equal(str(tp(x)), str(float(x)), err_msg='Failed str formatting for type %s' % tp) - if tp(1e10).itemsize > 4: - assert_equal(str(tp(1e10)), str(float('1e10')), + if tp(1e16).itemsize > 4: + assert_equal(str(tp(1e16)), str(float('1e16')), err_msg='Failed str formatting for type %s' % tp) else: - ref = '1e+10' - assert_equal(str(tp(1e10)), ref, + ref = '1e+16' + assert_equal(str(tp(1e16)), ref, err_msg='Failed str formatting for type %s' % tp) def test_float_types(): """ Check formatting. This is only for the str function, and only for simple types. - The precision of np.float and np.longdouble aren't the same as the + The precision of np.float32 and np.longdouble aren't the same as the python float precision. """ @@ -51,7 +51,7 @@ """ Check formatting of nan & inf. This is only for the str function, and only for simple types. - The precision of np.float and np.longdouble aren't the same as the + The precision of np.float32 and np.longdouble aren't the same as the python float precision. """ @@ -67,19 +67,19 @@ assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)), err_msg='Failed str formatting for type %s' % tp) - if tp(1e10).itemsize > 8: - assert_equal(str(tp(1e10)), str(complex(1e10)), + if tp(1e16).itemsize > 8: + assert_equal(str(tp(1e16)), str(complex(1e16)), err_msg='Failed str formatting for type %s' % tp) else: - ref = '(1e+10+0j)' - assert_equal(str(tp(1e10)), ref, + ref = '(1e+16+0j)' + assert_equal(str(tp(1e16)), ref, err_msg='Failed str formatting for type %s' % tp) def test_complex_types(): """Check formatting of complex types. This is only for the str function, and only for simple types. - The precision of np.float and np.longdouble aren't the same as the + The precision of np.float32 and np.longdouble aren't the same as the python float precision. """ @@ -90,21 +90,21 @@ """Check inf/nan formatting of complex types.""" TESTS = { complex(np.inf, 0): "(inf+0j)", - complex(0, np.inf): "inf*j", + complex(0, np.inf): "infj", complex(-np.inf, 0): "(-inf+0j)", - complex(0, -np.inf): "-inf*j", + complex(0, -np.inf): "-infj", complex(np.inf, 1): "(inf+1j)", - complex(1, np.inf): "(1+inf*j)", + complex(1, np.inf): "(1+infj)", complex(-np.inf, 1): "(-inf+1j)", - complex(1, -np.inf): "(1-inf*j)", + complex(1, -np.inf): "(1-infj)", complex(np.nan, 0): "(nan+0j)", - complex(0, np.nan): "nan*j", + complex(0, np.nan): "nanj", complex(-np.nan, 0): "(nan+0j)", - complex(0, -np.nan): "nan*j", + complex(0, -np.nan): "nanj", complex(np.nan, 1): "(nan+1j)", - complex(1, np.nan): "(1+nan*j)", + complex(1, np.nan): "(1+nanj)", complex(-np.nan, 1): "(nan+1j)", - complex(1, -np.nan): "(1+nan*j)", + complex(1, -np.nan): "(1+nanj)", } for tp in [np.complex64, np.cdouble, np.clongdouble]: for c, s in TESTS.items(): @@ -139,11 +139,11 @@ for x in [np.inf, -np.inf, np.nan]: _test_redirected_print(float(x), tp, _REF[x]) - if tp(1e10).itemsize > 4: - _test_redirected_print(float(1e10), tp) + if tp(1e16).itemsize > 4: + _test_redirected_print(float(1e16), tp) else: - ref = '1e+10' - _test_redirected_print(float(1e10), tp, ref) + ref = '1e+16' + _test_redirected_print(float(1e16), tp, ref) def check_complex_type_print(tp): # We do not create complex with inf/nan directly because the feature is @@ -151,11 +151,11 @@ for x in [0, 1, -1, 1e20]: _test_redirected_print(complex(x), tp) - if tp(1e10).itemsize > 8: - _test_redirected_print(complex(1e10), tp) + if tp(1e16).itemsize > 8: + _test_redirected_print(complex(1e16), tp) else: - ref = '(1e+10+0j)' - _test_redirected_print(complex(1e10), tp, ref) + ref = '(1e+16+0j)' + _test_redirected_print(complex(1e16), tp, ref) _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)') _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)') @@ -240,7 +240,7 @@ @in_foreign_locale def test_locale_longdouble(): - assert_equal(str(np.longdouble(1.2)), str(float(1.2))) + assert_equal(str(np.longdouble('1.2')), str(float(1.2))) if __name__ == "__main__": run_module_suite() diff -Nru python-numpy-1.13.3/numpy/core/tests/test_records.py python-numpy-1.14.5/numpy/core/tests/test_records.py --- python-numpy-1.13.3/numpy/core/tests/test_records.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_records.py 2018-06-12 18:28:52.000000000 +0000 @@ -4,16 +4,17 @@ import collections import pickle import warnings +import textwrap from os import path import numpy as np from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal, assert_array_equal, + run_module_suite, assert_, assert_equal, assert_array_equal, assert_array_almost_equal, assert_raises, assert_warns ) -class TestFromrecords(TestCase): +class TestFromrecords(object): def test_fromrecords(self): r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], names='col1,col2,col3') @@ -29,7 +30,7 @@ def test_fromrecords_0len(self): """ Verify fromrecords works with a 0-length input """ - dtype = [('a', np.float), ('b', np.float)] + dtype = [('a', float), ('b', float)] r = np.rec.fromrecords([], dtype=dtype) assert_equal(r.shape, (0,)) @@ -53,6 +54,14 @@ assert_equal(r1, r2) + def test_fromrecords_list_of_lists(self): + # gh-10870 : For numpy 1.14 we keep the deprecated behavior + # that 1d list-of-lists input is accepted by fromrecords + expected = np.rec.array([(1, 1.5), (2, 2.5)], dtype='i8,f8') + with assert_warns(FutureWarning): + r = np.rec.fromrecords([[1, 1.5], [2, 2.5]], dtype='i8,f8') + assert_equal(r, expected) + def test_method_array(self): r = np.rec.array(b'abcdefg' * 100, formats='i2,a3,i4', shape=3, byteorder='big') assert_equal(r[1].item(), (25444, b'efg', 1633837924)) @@ -101,6 +110,42 @@ assert_((mine.data1[i] == 0.0)) assert_((mine.data2[i] == 0.0)) + def test_recarray_repr(self): + a = np.array([(1, 0.1), (2, 0.2)], + dtype=[('foo', '= 3) or - (sys.platform == "win32" and - platform.architecture()[0] == "64bit"), - "numpy.intp('0xff', 16) not supported on Py3, " - "as it does not inherit from Python int") - def test_intp(self, level=rlevel): - # Ticket #99 - i_width = np.int_(0).nbytes*2 - 1 - np.intp('0x' + 'f'*i_width, 16) - self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16) - self.assertRaises(ValueError, np.intp, '0x1', 32) - assert_equal(255, np.intp('0xFF', 16)) - assert_equal(1024, np.intp(1024)) - - def test_endian_bool_indexing(self, level=rlevel): + def test_endian_bool_indexing(self): # Ticket #105 a = np.arange(10., dtype='>f8') b = np.arange(10., dtype=' 0.5)) assert_(np.all(b[yb] > 0.5)) - def test_endian_where(self, level=rlevel): + def test_endian_where(self): # GitHub issue #369 net = np.zeros(3, dtype='>f4') net[1] = 0.00458849 @@ -197,7 +186,7 @@ correct = np.array([ 0.60520202, 0.00458849, 0.60520202]) assert_array_almost_equal(test, correct) - def test_endian_recarray(self, level=rlevel): + def test_endian_recarray(self): # Ticket #2185 dt = np.dtype([ ('head', '>u4'), @@ -213,7 +202,7 @@ buf[0]['data'][0] = d assert_(buf[0]['head'] == 1) - def test_mem_dot(self, level=rlevel): + def test_mem_dot(self): # Ticket #106 x = np.random.randn(0, 1) y = np.random.randn(10, 1) @@ -227,7 +216,7 @@ np.core.multiarray.dot(x, np.transpose(y), out=z) assert_equal(_z, np.ones(10)) - def test_arange_endian(self, level=rlevel): + def test_arange_endian(self): # Ticket #111 ref = np.arange(10) x = np.arange(10, dtype=' 1 and x['two'] > 2) - def test_method_args(self, level=rlevel): + def test_method_args(self): # Make sure methods and functions have same default axis # keyword and arguments funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'), @@ -515,17 +504,17 @@ res2 = getattr(np, func)(arr1, arr2) assert_(abs(res1-res2).max() < 1e-8, func) - def test_mem_lexsort_strings(self, level=rlevel): + def test_mem_lexsort_strings(self): # Ticket #298 lst = ['abc', 'cde', 'fgh'] np.lexsort((lst,)) - def test_fancy_index(self, level=rlevel): + def test_fancy_index(self): # Ticket #302 x = np.array([1, 2])[np.array([0])] assert_equal(x.shape, (1,)) - def test_recarray_copy(self, level=rlevel): + def test_recarray_copy(self): # Ticket #312 dt = [('x', np.int16), ('y', np.float64)] ra = np.array([(1, 2.3)], dtype=dt) @@ -533,64 +522,64 @@ rb['x'] = 2. assert_(ra['x'] != rb['x']) - def test_rec_fromarray(self, level=rlevel): + def test_rec_fromarray(self): # Ticket #322 x1 = np.array([[1, 2], [3, 4], [5, 6]]) x2 = np.array(['a', 'dd', 'xyz']) x3 = np.array([1.1, 2, 3]) np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8") - def test_object_array_assign(self, level=rlevel): + def test_object_array_assign(self): x = np.empty((2, 2), object) x.flat[2] = (1, 2, 3) assert_equal(x.flat[2], (1, 2, 3)) - def test_ndmin_float64(self, level=rlevel): + def test_ndmin_float64(self): # Ticket #324 x = np.array([1, 2, 3], dtype=np.float64) assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2) assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2) - def test_ndmin_order(self, level=rlevel): + def test_ndmin_order(self): # Issue #465 and related checks assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous) assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous) assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous) assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous) - def test_mem_axis_minimization(self, level=rlevel): + def test_mem_axis_minimization(self): # Ticket #327 data = np.arange(5) data = np.add.outer(data, data) - def test_mem_float_imag(self, level=rlevel): + def test_mem_float_imag(self): # Ticket #330 np.float64(1.0).imag - def test_dtype_tuple(self, level=rlevel): + def test_dtype_tuple(self): # Ticket #334 assert_(np.dtype('i4') == np.dtype(('i4', ()))) - def test_dtype_posttuple(self, level=rlevel): + def test_dtype_posttuple(self): # Ticket #335 np.dtype([('col1', '()i4')]) - def test_numeric_carray_compare(self, level=rlevel): + def test_numeric_carray_compare(self): # Ticket #341 assert_equal(np.array(['X'], 'c'), b'X') - def test_string_array_size(self, level=rlevel): + def test_string_array_size(self): # Ticket #342 - self.assertRaises(ValueError, + assert_raises(ValueError, np.array, [['X'], ['X', 'X', 'X']], '|S1') - def test_dtype_repr(self, level=rlevel): + def test_dtype_repr(self): # Ticket #344 dt1 = np.dtype(('uint32', 2)) dt2 = np.dtype(('uint32', (2,))) assert_equal(dt1.__repr__(), dt2.__repr__()) - def test_reshape_order(self, level=rlevel): + def test_reshape_order(self): # Make sure reshape order works. a = np.arange(6).reshape(2, 3, order='F') assert_equal(a, [[0, 2, 4], [1, 3, 5]]) @@ -598,13 +587,13 @@ b = a[:, 1] assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]]) - def test_reshape_zero_strides(self, level=rlevel): + def test_reshape_zero_strides(self): # Issue #380, test reshaping of zero strided arrays a = np.ones(1) a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,)) assert_(a.reshape(5, 1).strides[0] == 0) - def test_reshape_zero_size(self, level=rlevel): + def test_reshape_zero_size(self): # GitHub Issue #2700, setting shape failed for 0-sized arrays a = np.ones((0, 2)) a.shape = (-1, 2) @@ -621,22 +610,22 @@ assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f) assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4)) - def test_repeat_discont(self, level=rlevel): + def test_repeat_discont(self): # Ticket #352 a = np.arange(12).reshape(4, 3)[:, 2] assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11]) - def test_array_index(self, level=rlevel): + def test_array_index(self): # Make sure optimization is not called in this case. a = np.array([1, 2, 3]) a2 = np.array([[1, 2, 3]]) assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)]) - def test_object_argmax(self, level=rlevel): + def test_object_argmax(self): a = np.array([1, 2, 3], dtype=object) assert_(a.argmax() == 2) - def test_recarray_fields(self, level=rlevel): + def test_recarray_fields(self): # Ticket #372 dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')]) dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')]) @@ -647,22 +636,22 @@ np.rec.fromarrays([(1, 2), (3, 4)])]: assert_(a.dtype in [dt0, dt1]) - def test_random_shuffle(self, level=rlevel): + def test_random_shuffle(self): # Ticket #374 a = np.arange(5).reshape((5, 1)) b = a.copy() np.random.shuffle(b) assert_equal(np.sort(b, axis=0), a) - def test_refcount_vdot(self, level=rlevel): + def test_refcount_vdot(self): # Changeset #3443 _assert_valid_refcount(np.vdot) - def test_startswith(self, level=rlevel): + def test_startswith(self): ca = np.char.array(['Hi', 'There']) assert_equal(ca.startswith('H'), [True, False]) - def test_noncommutative_reduce_accumulate(self, level=rlevel): + def test_noncommutative_reduce_accumulate(self): # Ticket #413 tosubtract = np.arange(5) todivide = np.array([2.0, 0.5, 0.25]) @@ -673,28 +662,28 @@ assert_array_equal(np.divide.accumulate(todivide), np.array([2., 4., 16.])) - def test_convolve_empty(self, level=rlevel): + def test_convolve_empty(self): # Convolve should raise an error for empty input array. - self.assertRaises(ValueError, np.convolve, [], [1]) - self.assertRaises(ValueError, np.convolve, [1], []) + assert_raises(ValueError, np.convolve, [], [1]) + assert_raises(ValueError, np.convolve, [1], []) - def test_multidim_byteswap(self, level=rlevel): + def test_multidim_byteswap(self): # Ticket #449 r = np.array([(1, (0, 1, 2))], dtype="i2,3i2") assert_array_equal(r.byteswap(), np.array([(256, (0, 256, 512))], r.dtype)) - def test_string_NULL(self, level=rlevel): + def test_string_NULL(self): # Changeset 3557 assert_equal(np.array("a\x00\x0b\x0c\x00").item(), 'a\x00\x0b\x0c') - def test_junk_in_string_fields_of_recarray(self, level=rlevel): + def test_junk_in_string_fields_of_recarray(self): # Ticket #483 r = np.array([[b'abc']], dtype=[('var1', '|S20')]) assert_(asbytes(r['var1'][0][0]) == b'abc') - def test_take_output(self, level=rlevel): + def test_take_output(self): # Ensure that 'take' honours output parameter. x = np.arange(12).reshape((3, 4)) a = np.take(x, [0, 2], axis=1) @@ -715,13 +704,13 @@ if HAS_REFCOUNT: assert_(ref_d == sys.getrefcount(d)) - def test_array_str_64bit(self, level=rlevel): + def test_array_str_64bit(self): # Ticket #501 s = np.array([1, np.nan], dtype=np.float64) with np.errstate(all='raise'): np.array_str(s) # Should succeed - def test_frompyfunc_endian(self, level=rlevel): + def test_frompyfunc_endian(self): # Ticket #503 from math import radians uradians = np.frompyfunc(radians, 1, 1) @@ -730,33 +719,33 @@ assert_almost_equal(uradians(big_endian).astype(float), uradians(little_endian).astype(float)) - def test_mem_string_arr(self, level=rlevel): + def test_mem_string_arr(self): # Ticket #514 s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" t = [] np.hstack((t, s)) - def test_arr_transpose(self, level=rlevel): + def test_arr_transpose(self): # Ticket #516 x = np.random.rand(*(2,)*16) x.transpose(list(range(16))) # Should succeed - def test_string_mergesort(self, level=rlevel): + def test_string_mergesort(self): # Ticket #540 x = np.array(['a']*32) assert_array_equal(x.argsort(kind='m'), np.arange(32)) - def test_argmax_byteorder(self, level=rlevel): + def test_argmax_byteorder(self): # Ticket #546 a = np.arange(3, dtype='>f') assert_(a[a.argmax()] == a.max()) - def test_rand_seed(self, level=rlevel): + def test_rand_seed(self): # Ticket #555 for l in np.arange(4): np.random.seed(l) - def test_mem_deallocation_leak(self, level=rlevel): + def test_mem_deallocation_leak(self): # Ticket #562 a = np.zeros(5, dtype=float) b = np.array(a, dtype=float) @@ -764,9 +753,9 @@ def test_mem_on_invalid_dtype(self): "Ticket #583" - self.assertRaises(ValueError, np.fromiter, [['12', ''], ['13', '']], str) + assert_raises(ValueError, np.fromiter, [['12', ''], ['13', '']], str) - def test_dot_negative_stride(self, level=rlevel): + def test_dot_negative_stride(self): # Ticket #588 x = np.array([[1, 5, 25, 125., 625]]) y = np.array([[20.], [160.], [640.], [1280.], [1024.]]) @@ -774,7 +763,7 @@ y2 = y[::-1] assert_equal(np.dot(x, z), np.dot(x, y2)) - def test_object_casting(self, level=rlevel): + def test_object_casting(self): # This used to trigger the object-type version of # the bitwise_or operation, because float64 -> object # casting succeeds @@ -783,16 +772,16 @@ y = np.zeros([484, 286]) x |= y - self.assertRaises(TypeError, rs) + assert_raises(TypeError, rs) - def test_unicode_scalar(self, level=rlevel): + def test_unicode_scalar(self): # Ticket #600 x = np.array(["DROND", "DROND1"], dtype="U6") el = x[1] new = pickle.loads(pickle.dumps(el)) assert_equal(new, el) - def test_arange_non_native_dtype(self, level=rlevel): + def test_arange_non_native_dtype(self): # Ticket #616 for T in ('>f4', ' 0)] = v - self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float)) - self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float)) + assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float)) + assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float)) # Old special case (different code path): - self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) - self.assertRaises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float)) + assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) + assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float)) - def test_mem_scalar_indexing(self, level=rlevel): + def test_mem_scalar_indexing(self): # Ticket #603 x = np.array([0], dtype=float) index = np.array(0, dtype=np.int32) x[index] - def test_binary_repr_0_width(self, level=rlevel): + def test_binary_repr_0_width(self): assert_equal(np.binary_repr(0, width=3), '000') - def test_fromstring(self, level=rlevel): + def test_fromstring(self): assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"), [12, 9, 9]) - def test_searchsorted_variable_length(self, level=rlevel): + def test_searchsorted_variable_length(self): x = np.array(['a', 'aa', 'b']) y = np.array(['d', 'e']) assert_equal(x.searchsorted(y), [3, 3]) - def test_string_argsort_with_zeros(self, level=rlevel): + def test_string_argsort_with_zeros(self): # Check argsort for strings containing zeros. - x = np.fromstring("\x00\x02\x00\x01", dtype="|S2") + x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") assert_array_equal(x.argsort(kind='m'), np.array([1, 0])) assert_array_equal(x.argsort(kind='q'), np.array([1, 0])) - def test_string_sort_with_zeros(self, level=rlevel): + def test_string_sort_with_zeros(self): # Check sort for strings containing zeros. - x = np.fromstring("\x00\x02\x00\x01", dtype="|S2") - y = np.fromstring("\x00\x01\x00\x02", dtype="|S2") + x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") + y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2") assert_array_equal(np.sort(x, kind="q"), y) - def test_copy_detection_zero_dim(self, level=rlevel): + def test_copy_detection_zero_dim(self): # Ticket #658 np.indices((0, 3, 4)).T.reshape(-1, 3) - def test_flat_byteorder(self, level=rlevel): + def test_flat_byteorder(self): # Ticket #657 x = np.arange(10) assert_array_equal(x.astype('>i4'), x.astype('i4').flat[:], x.astype('i4')): x = np.array([-1, 0, 1], dtype=dt) assert_equal(x.flat[0].dtype, x[0].dtype) - def test_copy_detection_corner_case(self, level=rlevel): + def test_copy_detection_corner_case(self): # Ticket #658 np.indices((0, 3, 4)).T.reshape(-1, 3) @@ -874,13 +860,13 @@ # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous, # 0-sized reshape itself is tested elsewhere. @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max) - def test_copy_detection_corner_case2(self, level=rlevel): + def test_copy_detection_corner_case2(self): # Ticket #771: strides are not set correctly when reshaping 0-sized # arrays b = np.indices((0, 3, 4)).T.reshape(-1, 3) assert_equal(b.strides, (3 * b.itemsize, b.itemsize)) - def test_object_array_refcounting(self, level=rlevel): + def test_object_array_refcounting(self): # Ticket #633 if not hasattr(sys, 'getrefcount'): return @@ -983,7 +969,7 @@ del tmp # Avoid pyflakes unused variable warning - def test_mem_custom_float_to_array(self, level=rlevel): + def test_mem_custom_float_to_array(self): # Ticket 702 class MyFloat(object): def __float__(self): @@ -992,7 +978,7 @@ tmp = np.atleast_1d([MyFloat()]) tmp.astype(float) # Should succeed - def test_object_array_refcount_self_assign(self, level=rlevel): + def test_object_array_refcount_self_assign(self): # Ticket #711 class VictimObject(object): deleted = False @@ -1009,32 +995,23 @@ arr[:] = arr # trying to induce a segfault by doing it again... assert_(not arr[0].deleted) - def test_mem_fromiter_invalid_dtype_string(self, level=rlevel): + def test_mem_fromiter_invalid_dtype_string(self): x = [1, 2, 3] - self.assertRaises(ValueError, + assert_raises(ValueError, np.fromiter, [xi for xi in x], dtype='S') - def test_reduce_big_object_array(self, level=rlevel): + def test_reduce_big_object_array(self): # Ticket #713 oldsize = np.setbufsize(10*16) a = np.array([None]*161, object) assert_(not np.any(a)) np.setbufsize(oldsize) - def test_mem_0d_array_index(self, level=rlevel): + def test_mem_0d_array_index(self): # Ticket #714 np.zeros(10)[np.array(0)] - def test_floats_from_string(self, level=rlevel): - # Ticket #640, floats from string - fsingle = np.single('1.234') - fdouble = np.double('1.234') - flongdouble = np.longdouble('1.234') - assert_almost_equal(fsingle, 1.234) - assert_almost_equal(fdouble, 1.234) - assert_almost_equal(flongdouble, 1.234) - - def test_nonnative_endian_fill(self, level=rlevel): + def test_nonnative_endian_fill(self): # Non-native endian arrays were incorrectly filled with scalars # before r5034. if sys.byteorder == 'little': @@ -1045,7 +1022,7 @@ x.fill(1) assert_equal(x, np.array([1], dtype=dtype)) - def test_dot_alignment_sse2(self, level=rlevel): + def test_dot_alignment_sse2(self): # Test for ticket #551, changeset r5140 x = np.zeros((30, 40)) y = pickle.loads(pickle.dumps(x)) @@ -1054,7 +1031,7 @@ # This shouldn't cause a segmentation fault: np.dot(z, y) - def test_astype_copy(self, level=rlevel): + def test_astype_copy(self): # Ticket #788, changeset r5155 # The test data file was generated by scipy.io.savemat. # The dtype is float64, but the isbuiltin attribute is 0. @@ -1072,7 +1049,7 @@ assert_((xp.__array_interface__['data'][0] != xpd.__array_interface__['data'][0])) - def test_compress_small_type(self, level=rlevel): + def test_compress_small_type(self): # Ticket #789, changeset 5217. # compress with out argument segfaulted if cannot cast safely import numpy as np @@ -1086,7 +1063,7 @@ except TypeError: pass - def test_attributes(self, level=rlevel): + def test_attributes(self): # Ticket #791 class TestArray(np.ndarray): def __new__(cls, data, info): @@ -1158,7 +1135,7 @@ assert_(type(dat.nonzero()[0]) is np.ndarray) assert_(type(dat.nonzero()[1]) is np.ndarray) - def test_recarray_tolist(self, level=rlevel): + def test_recarray_tolist(self): # Ticket #793, changeset r5215 # Comparisons fail for NaN, so we can't use random memory # for the test. @@ -1173,12 +1150,12 @@ a = np.arange(5) assert_raises(ValueError, a.item) - def test_char_array_creation(self, level=rlevel): + def test_char_array_creation(self): a = np.array('123', dtype='c') b = np.array([b'1', b'2', b'3']) assert_equal(a, b) - def test_unaligned_unicode_access(self, level=rlevel): + def test_unaligned_unicode_access(self): # Ticket #825 for i in range(1, 9): msg = 'unicode offset: %d chars' % i @@ -1189,7 +1166,7 @@ else: assert_equal(str(x), "[('a', u'b')]", err_msg=msg) - def test_sign_for_complex_nan(self, level=rlevel): + def test_sign_for_complex_nan(self): # Ticket 794. with np.errstate(invalid='ignore'): C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan]) @@ -1197,7 +1174,7 @@ want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan]) assert_equal(have, want) - def test_for_equal_names(self, level=rlevel): + def test_for_equal_names(self): # Ticket #674 dt = np.dtype([('foo', float), ('bar', float)]) a = np.zeros(10, dt) @@ -1207,7 +1184,7 @@ assert_(a.dtype.names[0] == "notfoo") assert_(a.dtype.names[1] == "bar") - def test_for_object_scalar_creation(self, level=rlevel): + def test_for_object_scalar_creation(self): # Ticket #816 a = np.object_() b = np.object_(3) @@ -1224,18 +1201,18 @@ def test_array_resize_method_system_error(self): # Ticket #840 - order should be an invalid keyword. x = np.array([[0, 1], [2, 3]]) - self.assertRaises(TypeError, x.resize, (2, 2), order='C') + assert_raises(TypeError, x.resize, (2, 2), order='C') - def test_for_zero_length_in_choose(self, level=rlevel): + def test_for_zero_length_in_choose(self): "Ticket #882" a = np.array(1) - self.assertRaises(ValueError, lambda x: x.choose([]), a) + assert_raises(ValueError, lambda x: x.choose([]), a) def test_array_ndmin_overflow(self): "Ticket #947." - self.assertRaises(ValueError, lambda: np.array([1], ndmin=33)) + assert_raises(ValueError, lambda: np.array([1], ndmin=33)) - def test_void_scalar_with_titles(self, level=rlevel): + def test_void_scalar_with_titles(self): # No ticket data = [('john', 4), ('mary', 5)] dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)] @@ -1308,7 +1285,7 @@ good = 'Maximum allowed size exceeded' try: np.arange(sz) - self.assertTrue(np.size == sz) + assert_(np.size == sz) except ValueError as e: if not str(e) == good: self.fail("Got msg '%s', expected '%s'" % (e, good)) @@ -1353,14 +1330,14 @@ dt = np.dtype([('f1', np.uint)]) assert_raises(KeyError, dt.__getitem__, "f2") assert_raises(IndexError, dt.__getitem__, 1) - assert_raises(ValueError, dt.__getitem__, 0.0) + assert_raises(TypeError, dt.__getitem__, 0.0) def test_lexsort_buffer_length(self): # Ticket #1217, don't segfault. a = np.ones(100, dtype=np.int8) b = np.ones(100, dtype=np.int32) i = np.lexsort((a[::-1], b)) - assert_equal(i, np.arange(100, dtype=np.int)) + assert_equal(i, np.arange(100, dtype=int)) def test_object_array_to_fixed_string(self): # Ticket #1235. @@ -1380,7 +1357,7 @@ a = np.array([[u'abc', u'\u03a3'], [u'asdf', u'erw']], dtype='U') - self.assertRaises(UnicodeEncodeError, np.array, a, 'S4') + assert_raises(UnicodeEncodeError, np.array, a, 'S4') def test_mixed_string_unicode_array_creation(self): a = np.array(['1234', u'123']) @@ -1431,10 +1408,10 @@ y = x.byteswap() if x.dtype.byteorder == z.dtype.byteorder: # little-endian machine - assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype.newbyteorder())) + assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder())) else: # big-endian machine - assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype)) + assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype)) # double check real and imaginary parts: assert_equal(x.real, y.real.byteswap()) assert_equal(x.imag, y.imag.byteswap()) @@ -1462,7 +1439,7 @@ def test_duplicate_title_and_name(self): # Ticket #1254 dtspec = [(('a', 'a'), 'i'), ('b', 'i')] - self.assertRaises(ValueError, np.dtype, dtspec) + assert_raises(ValueError, np.dtype, dtspec) def test_signed_integer_division_overflow(self): # Ticket #1317. @@ -1471,7 +1448,7 @@ min //= -1 with np.errstate(divide="ignore"): - for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long): + for t in (np.int8, np.int16, np.int32, np.int64, int, np.long): test_type(t) def test_buffer_hashlib(self): @@ -1491,7 +1468,7 @@ # Check if log1p is behaving on 32 bit intel systems. assert_(np.isfinite(np.log1p(np.exp2(-53)))) - def test_fromiter_comparison(self, level=rlevel): + def test_fromiter_comparison(self): a = np.fromiter(list(range(10)), dtype='b') b = np.fromiter(list(range(10)), dtype='B') assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) @@ -1563,9 +1540,9 @@ @dec.skipif(not HAS_REFCOUNT, "python has no sys.getrefcount") def test_take_refcount(self): # ticket #939 - a = np.arange(16, dtype=np.float) + a = np.arange(16, dtype=float) a.shape = (4, 4) - lut = np.ones((5 + 3, 4), np.float) + lut = np.ones((5 + 3, 4), float) rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) c1 = sys.getrefcount(rgba) try: @@ -1710,25 +1687,47 @@ # Object arrays with references to themselves can cause problems a = np.array(0, dtype=object) a[()] = a - assert_raises(TypeError, int, a) - assert_raises(TypeError, long, a) - assert_raises(TypeError, float, a) - assert_raises(TypeError, oct, a) - assert_raises(TypeError, hex, a) + assert_raises(RecursionError, int, a) + assert_raises(RecursionError, long, a) + assert_raises(RecursionError, float, a) + if sys.version_info.major == 2: + # in python 3, this falls back on operator.index, which fails on + # on dtype=object + assert_raises(RecursionError, oct, a) + assert_raises(RecursionError, hex, a) + a[()] = None + def test_object_array_circular_reference(self): # Test the same for a circular reference. - b = np.array(a, dtype=object) + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) a[()] = b - assert_raises(TypeError, int, a) + b[()] = a + assert_raises(RecursionError, int, a) # NumPy has no tp_traverse currently, so circular references # cannot be detected. So resolve it: - a[()] = 0 + a[()] = None # This was causing a to become like the above a = np.array(0, dtype=object) a[...] += 1 assert_equal(a, 1) + def test_object_array_nested(self): + # but is fine with a reference to a different array + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) + a[()] = b + assert_equal(int(a), int(0)) + assert_equal(long(a), long(0)) + assert_equal(float(a), float(0)) + if sys.version_info.major == 2: + # in python 3, this falls back on operator.index, which fails on + # on dtype=object + assert_equal(oct(a), oct(0)) + assert_equal(hex(a), hex(0)) + + def test_object_array_self_copy(self): # An object array being copied into itself DECREF'ed before INCREF'ing # causing segmentation faults (gh-3787) @@ -1784,8 +1783,8 @@ assert_equal(a1, a2) def test_fields_strides(self): - "Ticket #1760" - r = np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2') + "gh-2355" + r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2') assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2]) assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1']) assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()]) @@ -1807,7 +1806,7 @@ a['f2'] = 1 except ValueError: pass - except: + except Exception: raise AssertionError def test_ticket_1608(self): @@ -2068,8 +2067,8 @@ assert_equal(arr, arr_cp) assert_equal(arr.shape, arr_cp.shape) assert_equal(int(arr), int(arr_cp)) - self.assertTrue(arr is not arr_cp) - self.assertTrue(isinstance(arr_cp, type(arr))) + assert_(arr is not arr_cp) + assert_(isinstance(arr_cp, type(arr))) def test_deepcopy_F_order_object_array(self): # Ticket #6456. @@ -2079,13 +2078,13 @@ arr_cp = copy.deepcopy(arr) assert_equal(arr, arr_cp) - self.assertTrue(arr is not arr_cp) + assert_(arr is not arr_cp) # Ensure that we have actually copied the item. - self.assertTrue(arr[0, 1] is not arr_cp[1, 1]) + assert_(arr[0, 1] is not arr_cp[1, 1]) # Ensure we are allowed to have references to the same object. - self.assertTrue(arr[0, 1] is arr[1, 1]) + assert_(arr[0, 1] is arr[1, 1]) # Check the references hold for the copied objects. - self.assertTrue(arr_cp[0, 1] is arr_cp[1, 1]) + assert_(arr_cp[0, 1] is arr_cp[1, 1]) def test_deepcopy_empty_object_array(self): # Ticket #8536. @@ -2173,7 +2172,7 @@ # gh-6250 recordtype = np.dtype([('a', np.float64), ('b', np.int32), - ('d', (np.str, 5))]) + ('d', (str, 5))]) # Simple case a = np.zeros(2, dtype=recordtype) @@ -2248,5 +2247,38 @@ else: assert_(t.__hash__ != None) + def test_scalar_copy(self): + scalar_types = set(np.sctypeDict.values()) + values = { + np.void: b"a", + np.bytes_: b"a", + np.unicode_: "a", + np.datetime64: "2017-08-25", + } + for sctype in scalar_types: + item = sctype(values.get(sctype, 1)) + item2 = copy.copy(item) + assert_equal(item, item2) + + def test_void_item_memview(self): + va = np.zeros(10, 'V4') + # for now, there is just a futurewarning + assert_warns(FutureWarning, va[:1].item) + # in the future, test we got a bytes copy: + #x = va[:1].item() + #va[0] = b'\xff\xff\xff\xff' + #del va + #assert_equal(x, b'\x00\x00\x00\x00') + + def test_structarray_title(self): + # The following used to segfault on pypy, due to NPY_TITLE_KEY + # not working properly and resulting to double-decref of the + # structured array field items: + # See: https://bitbucket.org/pypy/pypy/issues/2789 + for j in range(5): + structure = np.array([1], dtype=[(('x', 'X'), np.object_)]) + structure[0]['x'] = np.array([2]) + gc.collect() + if __name__ == "__main__": run_module_suite() diff -Nru python-numpy-1.13.3/numpy/core/tests/test_scalar_ctors.py python-numpy-1.14.5/numpy/core/tests/test_scalar_ctors.py --- python-numpy-1.13.3/numpy/core/tests/test_scalar_ctors.py 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_scalar_ctors.py 2018-06-12 18:28:52.000000000 +0000 @@ -0,0 +1,70 @@ +""" +Test the scalar contructors, which also do type-coercion +""" +from __future__ import division, absolute_import, print_function + +import sys +import platform +import numpy as np + +from numpy.testing import ( + run_module_suite, + assert_equal, assert_almost_equal, assert_raises, assert_warns, + dec +) + +class TestFromString(object): + def test_floating(self): + # Ticket #640, floats from string + fsingle = np.single('1.234') + fdouble = np.double('1.234') + flongdouble = np.longdouble('1.234') + assert_almost_equal(fsingle, 1.234) + assert_almost_equal(fdouble, 1.234) + assert_almost_equal(flongdouble, 1.234) + + def test_floating_overflow(self): + """ Strings containing an unrepresentable float overflow """ + fhalf = np.half('1e10000') + assert_equal(fhalf, np.inf) + fsingle = np.single('1e10000') + assert_equal(fsingle, np.inf) + fdouble = np.double('1e10000') + assert_equal(fdouble, np.inf) + flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000') + assert_equal(flongdouble, np.inf) + + fhalf = np.half('-1e10000') + assert_equal(fhalf, -np.inf) + fsingle = np.single('-1e10000') + assert_equal(fsingle, -np.inf) + fdouble = np.double('-1e10000') + assert_equal(fdouble, -np.inf) + flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000') + assert_equal(flongdouble, -np.inf) + + @dec.knownfailureif((sys.version_info[0] >= 3) or + (sys.platform == "win32" and + platform.architecture()[0] == "64bit"), + "numpy.intp('0xff', 16) not supported on Py3, " + "as it does not inherit from Python int") + def test_intp(self): + # Ticket #99 + i_width = np.int_(0).nbytes*2 - 1 + np.intp('0x' + 'f'*i_width, 16) + assert_raises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16) + assert_raises(ValueError, np.intp, '0x1', 32) + assert_equal(255, np.intp('0xFF', 16)) + + +class TestFromInt(object): + def test_intp(self): + # Ticket #99 + assert_equal(1024, np.intp(1024)) + + def test_uint64_from_negative(self): + assert_equal(np.uint64(-2), np.uint64(18446744073709551614)) + + +if __name__ == "__main__": + run_module_suite() diff -Nru python-numpy-1.13.3/numpy/core/tests/test_scalarinherit.py python-numpy-1.14.5/numpy/core/tests/test_scalarinherit.py --- python-numpy-1.13.3/numpy/core/tests/test_scalarinherit.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_scalarinherit.py 2018-06-12 18:28:52.000000000 +0000 @@ -5,7 +5,7 @@ from __future__ import division, absolute_import, print_function import numpy as np -from numpy.testing import TestCase, run_module_suite, assert_ +from numpy.testing import run_module_suite, assert_ class A(object): @@ -23,7 +23,7 @@ class C0(B0): pass -class TestInherit(TestCase): +class TestInherit(object): def test_init(self): x = B(1.0) assert_(str(x) == '1.0') @@ -38,5 +38,41 @@ y = C0(2.0) assert_(str(y) == '2.0') + +class TestCharacter(object): + def test_char_radd(self): + # GH issue 9620, reached gentype_add and raise TypeError + np_s = np.string_('abc') + np_u = np.unicode_('abc') + s = b'def' + u = u'def' + assert_(np_s.__radd__(np_s) is NotImplemented) + assert_(np_s.__radd__(np_u) is NotImplemented) + assert_(np_s.__radd__(s) is NotImplemented) + assert_(np_s.__radd__(u) is NotImplemented) + assert_(np_u.__radd__(np_s) is NotImplemented) + assert_(np_u.__radd__(np_u) is NotImplemented) + assert_(np_u.__radd__(s) is NotImplemented) + assert_(np_u.__radd__(u) is NotImplemented) + assert_(s + np_s == b'defabc') + assert_(u + np_u == u'defabc') + + + class Mystr(str, np.generic): + # would segfault + pass + + ret = s + Mystr('abc') + assert_(type(ret) is type(s)) + + def test_char_repeat(self): + np_s = np.string_('abc') + np_u = np.unicode_('abc') + np_i = np.int(5) + res_np = np_s * np_i + res_s = b'abc' * 5 + assert_(res_np == res_s) + + if __name__ == "__main__": run_module_suite() diff -Nru python-numpy-1.13.3/numpy/core/tests/test_scalarmath.py python-numpy-1.14.5/numpy/core/tests/test_scalarmath.py --- python-numpy-1.13.3/numpy/core/tests/test_scalarmath.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_scalarmath.py 2018-06-12 18:28:52.000000000 +0000 @@ -4,13 +4,14 @@ import warnings import itertools import operator +import platform import numpy as np -from numpy.testing.utils import _gen_alignment_data from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal, assert_raises, - assert_almost_equal, assert_allclose, assert_array_equal, IS_PYPY, - suppress_warnings + run_module_suite, + assert_, assert_equal, assert_raises, + assert_almost_equal, assert_allclose, assert_array_equal, + IS_PYPY, suppress_warnings, dec, _gen_alignment_data, ) types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, @@ -19,17 +20,18 @@ np.cdouble, np.clongdouble] floating_types = np.floating.__subclasses__() +complex_floating_types = np.complexfloating.__subclasses__() # This compares scalarmath against ufuncs. -class TestTypes(TestCase): - def test_types(self, level=1): +class TestTypes(object): + def test_types(self): for atype in types: a = atype(1) assert_(a == 1, "error with %r: got %r" % (atype, a)) - def test_type_add(self, level=1): + def test_type_add(self): # list of types for k, atype in enumerate(types): a_scalar = atype(3) @@ -49,7 +51,7 @@ "error with types (%d/'%c' + %d/'%c')" % (k, np.dtype(atype).char, l, np.dtype(btype).char)) - def test_type_create(self, level=1): + def test_type_create(self): for k, atype in enumerate(types): a = np.array([1, 2, 3], atype) b = atype([1, 2, 3]) @@ -62,7 +64,7 @@ np.add(1, 1) -class TestBaseMath(TestCase): +class TestBaseMath(object): def test_blocked(self): # test alignments offsets for simd instructions # alignments for vz + 2 * (vs - 1) + 1 @@ -108,7 +110,7 @@ np.add(d, np.ones_like(d)) -class TestPower(TestCase): +class TestPower(object): def test_small_types(self): for t in [np.int8, np.int16, np.float16]: a = t(3) @@ -127,7 +129,7 @@ def test_integers_to_negative_integer_power(self): # Note that the combination of uint64 with a signed integer - # has common type np.float. The other combinations should all + # has common type np.float64. The other combinations should all # raise a ValueError for integer ** negative integer. exp = [np.array(-1, dt)[()] for dt in 'bhilq'] @@ -200,7 +202,7 @@ return (+1, -1) -class TestModulus(TestCase): +class TestModulus(object): def test_modulus_basic(self): dt = np.typecodes['AllInteger'] + np.typecodes['Float'] @@ -292,7 +294,7 @@ assert_(np.isnan(rem), 'dt: %s' % dt) -class TestComplexDivision(TestCase): +class TestComplexDivision(object): def test_zero_division(self): with np.errstate(all="ignore"): for t in [np.complex64, np.complex128]: @@ -364,7 +366,7 @@ assert_equal(result.imag, ex[1]) -class TestConversion(TestCase): +class TestConversion(object): def test_int_from_long(self): l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18] li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18] @@ -398,12 +400,43 @@ for code in 'lLqQ': assert_raises(OverflowError, overflow_error_func, code) - def test_longdouble_int(self): + def test_int_from_infinite_longdouble(self): # gh-627 x = np.longdouble(np.inf) + assert_raises(OverflowError, int, x) + with suppress_warnings() as sup: + sup.record(np.ComplexWarning) + x = np.clongdouble(np.inf) + assert_raises(OverflowError, int, x) + assert_equal(len(sup.log), 1) + + @dec.knownfailureif(not IS_PYPY, + "__int__ is not the same as int in cpython (gh-9972)") + def test_int_from_infinite_longdouble___int__(self): + x = np.longdouble(np.inf) assert_raises(OverflowError, x.__int__) - x = np.clongdouble(np.inf) - assert_raises(OverflowError, x.__int__) + with suppress_warnings() as sup: + sup.record(np.ComplexWarning) + x = np.clongdouble(np.inf) + assert_raises(OverflowError, x.__int__) + assert_equal(len(sup.log), 1) + + @dec.knownfailureif(platform.machine().startswith("ppc64")) + @dec.skipif(np.finfo(np.double) == np.finfo(np.longdouble)) + def test_int_from_huge_longdouble(self): + # Produce a longdouble that would overflow a double, + # use exponent that avoids bug in Darwin pow function. + exp = np.finfo(np.double).maxexp - 1 + huge_ld = 2 * 1234 * np.longdouble(2) ** exp + huge_i = 2 * 1234 * 2 ** exp + assert_(huge_ld != np.inf) + assert_equal(int(huge_ld), huge_i) + + def test_int_from_longdouble(self): + x = np.longdouble(1.5) + assert_equal(int(x), 1) + x = np.longdouble(-10.5) + assert_equal(int(x), -10) def test_numpy_scalar_relational_operators(self): # All integer @@ -468,7 +501,7 @@ assert_(np.equal(np.datetime64('NaT'), None)) -#class TestRepr(TestCase): +#class TestRepr(object): # def test_repr(self): # for t in types: # val = t(1197346475.0137341) @@ -512,7 +545,7 @@ if not IS_PYPY: # sys.getsizeof() is not valid on PyPy - class TestSizeOf(TestCase): + class TestSizeOf(object): def test_equal_nbytes(self): for type in types: @@ -524,7 +557,7 @@ assert_raises(TypeError, d.__sizeof__, "a") -class TestMultiply(TestCase): +class TestMultiply(object): def test_seq_repeat(self): # Test that basic sequences get repeated when multiplied with # numpy integers. And errors are raised when multiplied with others. @@ -562,7 +595,7 @@ assert_array_equal(np.int_(3) * arr_like, np.full(3, 3)) -class TestNegative(TestCase): +class TestNegative(object): def test_exceptions(self): a = np.ones((), dtype=np.bool_)[()] assert_raises(TypeError, operator.neg, a) @@ -576,10 +609,13 @@ assert_equal(operator.neg(a) + a, 0) -class TestSubtract(TestCase): +class TestSubtract(object): + def test_exceptions(self): + a = np.ones((), dtype=np.bool_)[()] + assert_raises(TypeError, operator.sub, a, a) def test_result(self): - types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?' + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] with suppress_warnings() as sup: sup.filter(RuntimeWarning) for dt in types: @@ -587,10 +623,9 @@ assert_equal(operator.sub(a, a), 0) -class TestAbs(TestCase): - +class TestAbs(object): def _test_abs_func(self, absfunc): - for tp in floating_types: + for tp in floating_types + complex_floating_types: x = tp(-1.5) assert_equal(absfunc(x), 1.5) x = tp(0.0) @@ -601,6 +636,15 @@ res = absfunc(x) assert_equal(res, 0.0) + x = tp(np.finfo(tp).max) + assert_equal(absfunc(x), x.real) + + x = tp(np.finfo(tp).tiny) + assert_equal(absfunc(x), x.real) + + x = tp(np.finfo(tp).min) + assert_equal(absfunc(x), -x.real) + def test_builtin_abs(self): self._test_abs_func(abs) diff -Nru python-numpy-1.13.3/numpy/core/tests/test_scalarprint.py python-numpy-1.14.5/numpy/core/tests/test_scalarprint.py --- python-numpy-1.13.3/numpy/core/tests/test_scalarprint.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_scalarprint.py 2018-06-12 18:28:52.000000000 +0000 @@ -4,27 +4,265 @@ """ from __future__ import division, absolute_import, print_function +import code, sys +from tempfile import TemporaryFile import numpy as np -from numpy.testing import TestCase, assert_, run_module_suite +from numpy.testing import assert_, assert_equal, suppress_warnings,\ + run_module_suite +import sys, tempfile - -class TestRealScalars(TestCase): +class TestRealScalars(object): def test_str(self): svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan] styps = [np.float16, np.float32, np.float64, np.longdouble] - actual = [str(f(c)) for c in svals for f in styps] wanted = [ - '0.0', '0.0', '0.0', '0.0', - '-0.0', '-0.0', '-0.0', '-0.0', - '1.0', '1.0', '1.0', '1.0', - '-1.0', '-1.0', '-1.0', '-1.0', - 'inf', 'inf', 'inf', 'inf', - '-inf', '-inf', '-inf', '-inf', - 'nan', 'nan', 'nan', 'nan'] + ['0.0', '0.0', '0.0', '0.0' ], + ['-0.0', '-0.0', '-0.0', '-0.0'], + ['1.0', '1.0', '1.0', '1.0' ], + ['-1.0', '-1.0', '-1.0', '-1.0'], + ['inf', 'inf', 'inf', 'inf' ], + ['-inf', '-inf', '-inf', '-inf'], + ['nan', 'nan', 'nan', 'nan']] + + for wants, val in zip(wanted, svals): + for want, styp in zip(wants, styps): + msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val)) + assert_equal(str(styp(val)), want, err_msg=msg) + + def test_scalar_cutoffs(self): + # test that both the str and repr of np.float64 behaves + # like python floats in python3. Note that in python2 + # the str has truncated digits, but we do not do this + def check(v): + # we compare str to repr, to avoid python2 truncation behavior + assert_equal(str(np.float64(v)), repr(v)) + assert_equal(repr(np.float64(v)), repr(v)) + + # check we use the same number of significant digits + check(1.12345678901234567890) + check(0.0112345678901234567890) + + # check switch from scientific output to positional and back + check(1e-5) + check(1e-4) + check(1e15) + check(1e16) + + def test_py2_float_print(self): + # gh-10753 + # In python2, the python float type implements an obsolte method + # tp_print, which overrides tp_repr and tp_str when using the "print" + # keyword/method to output to a "real file" (ie, not a StringIO). Make + # sure we don't inherit it. + x = np.double(0.1999999999999) + with TemporaryFile('r+t') as f: + print(x, file=f) + f.seek(0) + output = f.read() + assert_equal(output, str(x) + '\n') + # In python2 the value float('0.1999999999999') prints with reduced + # precision as '0.2', but we want numpy's np.double('0.1999999999999') + # to print the unique value, '0.1999999999999'. + + # gh-11031 + # Only in the python2 interactive shell and when stdout is a "real" + # file, the output of the last command is printed to stdout without + # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print + # x` are potentially different. Make sure they are the same. The only + # way I found to get prompt-like output is using an actual prompt from + # the 'code' module. Again, must use tempfile to get a "real" file. + + # dummy user-input which enters one line and then ctrl-Ds. + def userinput(): + yield 'np.sqrt(2)' + raise EOFError + gen = userinput() + input_func = lambda prompt="": next(gen) + + with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe: + orig_stdout, orig_stderr = sys.stdout, sys.stderr + sys.stdout, sys.stderr = fo, fe + + # py2 code.interact sends irrelevant internal DeprecationWarnings + with suppress_warnings() as sup: + sup.filter(DeprecationWarning) + code.interact(local={'np': np}, readfunc=input_func, banner='') + + sys.stdout, sys.stderr = orig_stdout, orig_stderr + + fo.seek(0) + capture = fo.read().strip() + + assert_equal(capture, repr(np.sqrt(2))) + + def test_dragon4(self): + # these tests are adapted from Ryan Juckett's dragon4 implementation, + # see dragon4.c for details. + + fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k) + fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k) + fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k) + fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k) + + preckwd = lambda prec: {'unique': False, 'precision': prec} + + assert_equal(fpos32('1.0'), "1.") + assert_equal(fsci32('1.0'), "1.e+00") + assert_equal(fpos32('10.234'), "10.234") + assert_equal(fpos32('-10.234'), "-10.234") + assert_equal(fsci32('10.234'), "1.0234e+01") + assert_equal(fsci32('-10.234'), "-1.0234e+01") + assert_equal(fpos32('1000.0'), "1000.") + assert_equal(fpos32('1.0', precision=0), "1.") + assert_equal(fsci32('1.0', precision=0), "1.e+00") + assert_equal(fpos32('10.234', precision=0), "10.") + assert_equal(fpos32('-10.234', precision=0), "-10.") + assert_equal(fsci32('10.234', precision=0), "1.e+01") + assert_equal(fsci32('-10.234', precision=0), "-1.e+01") + assert_equal(fpos32('10.234', precision=2), "10.23") + assert_equal(fsci32('-10.234', precision=2), "-1.02e+01") + assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)), + '9.9999999999999995e-08') + assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)), + '9.8813129168249309e-324') + assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)), + '9.9999999999999694e-311') + + + # test rounding + # 3.1415927410 is closest float32 to np.pi + assert_equal(fpos32('3.14159265358979323846', **preckwd(10)), + "3.1415927410") + assert_equal(fsci32('3.14159265358979323846', **preckwd(10)), + "3.1415927410e+00") + assert_equal(fpos64('3.14159265358979323846', **preckwd(10)), + "3.1415926536") + assert_equal(fsci64('3.14159265358979323846', **preckwd(10)), + "3.1415926536e+00") + # 299792448 is closest float32 to 299792458 + assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000") + assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08") + assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000") + assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08") + + assert_equal(fpos32('3.14159265358979323846', **preckwd(25)), + "3.1415927410125732421875000") + assert_equal(fpos64('3.14159265358979323846', **preckwd(50)), + "3.14159265358979311599796346854418516159057617187500") + assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793") + + + # smallest numbers + assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149), + "0.00000000000000000000000000000000000000000000140129846432" + "4817070923729583289916131280261941876515771757068283889791" + "08268586060148663818836212158203125") + assert_equal(fpos64(0.5**(1022 + 52), unique=False, precision=1074), + "0.00000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000049406564584124654417656" + "8792868221372365059802614324764425585682500675507270208751" + "8652998363616359923797965646954457177309266567103559397963" + "9877479601078187812630071319031140452784581716784898210368" + "8718636056998730723050006387409153564984387312473397273169" + "6151400317153853980741262385655911710266585566867681870395" + "6031062493194527159149245532930545654440112748012970999954" + "1931989409080416563324524757147869014726780159355238611550" + "1348035264934720193790268107107491703332226844753335720832" + "4319360923828934583680601060115061698097530783422773183292" + "4790498252473077637592724787465608477820373446969953364701" + "7972677717585125660551199131504891101451037862738167250955" + "8373897335989936648099411642057026370902792427675445652290" + "87538682506419718265533447265625") + + # largest numbers + assert_equal(fpos32(np.finfo(np.float32).max, **preckwd(0)), + "340282346638528859811704183484516925440.") + assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)), + "1797693134862315708145274237317043567980705675258449965989" + "1747680315726078002853876058955863276687817154045895351438" + "2464234321326889464182768467546703537516986049910576551282" + "0762454900903893289440758685084551339423045832369032229481" + "6580855933212334827479782620414472316873817718091929988125" + "0404026184124858368.") + # Warning: In unique mode only the integer digits necessary for + # uniqueness are computed, the rest are 0. Should we change this? + assert_equal(fpos32(np.finfo(np.float32).max, precision=0), + "340282350000000000000000000000000000000.") + + # test trailing zeros + assert_equal(fpos32('1.0', unique=False, precision=3), "1.000") + assert_equal(fpos64('1.0', unique=False, precision=3), "1.000") + assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00") + assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00") + assert_equal(fpos32('1.5', unique=False, precision=3), "1.500") + assert_equal(fpos64('1.5', unique=False, precision=3), "1.500") + assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00") + assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00") + # gh-10713 + assert_equal(fpos64('324', unique=False, precision=5, fractional=False), "324.00") + + def test_dragon4_interface(self): + tps = [np.float16, np.float32, np.float64] + if hasattr(np, 'float128'): + tps.append(np.float128) + + fpos = np.format_float_positional + fsci = np.format_float_scientific + + for tp in tps: + # test padding + assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ") + assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ") + assert_equal(fpos(tp('-10.2'), + pad_left=4, pad_right=4), " -10.2 ") + + # test exp_digits + assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001") + + # test fixed (non-unique) mode + assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000") + assert_equal(fsci(tp('1.0'), unique=False, precision=4), + "1.0000e+00") + + # test trimming + # trim of 'k' or '.' only affects non-unique mode, since unique + # mode will not output trailing 0s. + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'), + "1.0000") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'), + "1.") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'), + "1.2" if tp != np.float16 else "1.2002") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'), + "1.0") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='0'), "1.0") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'), + "1") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='-'), "1") - for res, val in zip(actual, wanted): - assert_(res == val) + def float32_roundtrip(self): + # gh-9360 + x = np.float32(1024 - 2**-14) + y = np.float32(1024 - 2**-13) + assert_(repr(x) != repr(y)) + assert_equal(np.float32(repr(x)), x) + assert_equal(np.float32(repr(y)), y) + def float64_vs_python(self): + # gh-2643, gh-6136, gh-6908 + assert_equal(repr(np.float64(0.1)), repr(0.1)) + assert_(repr(np.float64(0.20000000000000004)) != repr(0.2)) if __name__ == "__main__": run_module_suite() diff -Nru python-numpy-1.13.3/numpy/core/tests/test_shape_base.py python-numpy-1.14.5/numpy/core/tests/test_shape_base.py --- python-numpy-1.13.3/numpy/core/tests/test_shape_base.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_shape_base.py 2018-06-12 18:28:52.000000000 +0000 @@ -4,13 +4,13 @@ import numpy as np from numpy.core import (array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack, newaxis, concatenate, stack) -from numpy.testing import (TestCase, assert_, assert_raises, +from numpy.testing import (assert_, assert_raises, assert_array_equal, assert_equal, run_module_suite, assert_raises_regex, assert_almost_equal) from numpy.compat import long -class TestAtleast1d(TestCase): +class TestAtleast1d(object): def test_0D_array(self): a = array(1) b = array(2) @@ -51,7 +51,7 @@ assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2)) -class TestAtleast2d(TestCase): +class TestAtleast2d(object): def test_0D_array(self): a = array(1) b = array(2) @@ -90,7 +90,7 @@ assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2)) -class TestAtleast3d(TestCase): +class TestAtleast3d(object): def test_0D_array(self): a = array(1) b = array(2) @@ -122,7 +122,7 @@ assert_array_equal(res, desired) -class TestHstack(TestCase): +class TestHstack(object): def test_non_iterable(self): assert_raises(TypeError, hstack, 1) @@ -151,7 +151,7 @@ assert_array_equal(res, desired) -class TestVstack(TestCase): +class TestVstack(object): def test_non_iterable(self): assert_raises(TypeError, vstack, 1) @@ -187,7 +187,7 @@ assert_array_equal(res, desired) -class TestConcatenate(TestCase): +class TestConcatenate(object): def test_exceptions(self): # test axis must be in bounds for ndim in [1, 2, 3]: @@ -208,8 +208,8 @@ np.concatenate((a, b), axis=axis[0]) # OK assert_raises(ValueError, np.concatenate, (a, b), axis=axis[1]) assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2]) - a = np.rollaxis(a, -1) - b = np.rollaxis(b, -1) + a = np.moveaxis(a, -1, 0) + b = np.moveaxis(b, -1, 0) axis.append(axis.pop(0)) # No arrays to concatenate raises ValueError @@ -230,6 +230,12 @@ '0', '1', '2', 'x']) assert_array_equal(r, d) + out = np.zeros(a.size + len(b)) + r = np.concatenate((a, b), axis=None) + rout = np.concatenate((a, b), axis=None, out=out) + assert_(out is rout) + assert_equal(r, rout) + def test_large_concatenate_axis_None(self): # When no axis is given, concatenate uses flattened versions. # This also had a bug with many arrays (see gh-5979). @@ -278,6 +284,34 @@ assert_array_equal(concatenate((a0, a1, a2), -1), res) assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T) + out = res.copy() + rout = concatenate((a0, a1, a2), 2, out=out) + assert_(out is rout) + assert_equal(res, rout) + + def test_bad_out_shape(self): + a = array([1, 2]) + b = array([3, 4]) + + assert_raises(ValueError, concatenate, (a, b), out=np.empty(5)) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1))) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4))) + concatenate((a, b), out=np.empty(4)) + + def test_out_dtype(self): + out = np.empty(4, np.float32) + res = concatenate((array([1, 2]), array([3, 4])), out=out) + assert_(out is res) + + out = np.empty(4, np.complex64) + res = concatenate((array([0.1, 0.2]), array([0.3, 0.4])), out=out) + assert_(out is res) + + # invalid cast + out = np.empty(4, np.int32) + assert_raises(TypeError, concatenate, + (array([0.1, 0.2]), array([0.3, 0.4])), out=out) + def test_stack(): # non-iterable input @@ -333,7 +367,7 @@ stack, [m, m]) -class TestBlock(TestCase): +class TestBlock(object): def test_block_simple_row_wise(self): a_2d = np.ones((2, 2)) b_2d = 2 * a_2d @@ -526,6 +560,28 @@ assert_raises_regex(TypeError, 'tuple', np.block, ([1, 2], [3, 4])) assert_raises_regex(TypeError, 'tuple', np.block, [(1, 2), (3, 4)]) + def test_different_ndims(self): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 1, 3)) + + result = np.block([a, b, c]) + expected = np.array([[[1., 2., 2., 3., 3., 3.]]]) + + assert_equal(result, expected) + + def test_different_ndims_depths(self): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 2, 3)) + + result = np.block([[a, b], [c]]) + expected = np.array([[[1., 2., 2.], + [3., 3., 3.], + [3., 3., 3.]]]) + + assert_equal(result, expected) + if __name__ == "__main__": run_module_suite() diff -Nru python-numpy-1.13.3/numpy/core/tests/test_ufunc.py python-numpy-1.14.5/numpy/core/tests/test_ufunc.py --- python-numpy-1.13.3/numpy/core/tests/test_ufunc.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_ufunc.py 2018-06-12 18:28:52.000000000 +0000 @@ -8,16 +8,16 @@ import numpy.core.operand_flag_tests as opflag_tests from numpy.core.test_rational import rational, test_add, test_add_rationals from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal, assert_raises, + run_module_suite, assert_, assert_equal, assert_raises, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_no_warnings, assert_allclose, ) -class TestUfuncKwargs(TestCase): +class TestUfuncKwargs(object): def test_kwarg_exact(self): assert_raises(TypeError, np.add, 1, 2, castingx='safe') - assert_raises(TypeError, np.add, 1, 2, dtypex=np.int) + assert_raises(TypeError, np.add, 1, 2, dtypex=int) assert_raises(TypeError, np.add, 1, 2, extobjx=[4096]) assert_raises(TypeError, np.add, 1, 2, outx=None) assert_raises(TypeError, np.add, 1, 2, sigx='ii->i') @@ -31,12 +31,12 @@ def test_sig_dtype(self): assert_raises(RuntimeError, np.add, 1, 2, sig='ii->i', - dtype=np.int) + dtype=int) assert_raises(RuntimeError, np.add, 1, 2, signature='ii->i', - dtype=np.int) + dtype=int) -class TestUfunc(TestCase): +class TestUfunc(object): def test_pickle(self): import pickle assert_(pickle.loads(pickle.dumps(np.sin)) is np.sin) @@ -174,22 +174,22 @@ # check unary PyUFunc_O_O msg = "PyUFunc_O_O" - x = np.ones(10, dtype=np.object)[0::2] + x = np.ones(10, dtype=object)[0::2] assert_(np.all(np.abs(x) == 1), msg) # check unary PyUFunc_O_O_method msg = "PyUFunc_O_O_method" - x = np.zeros(10, dtype=np.object)[0::2] + x = np.zeros(10, dtype=object)[0::2] for i in range(len(x)): x[i] = foo() assert_(np.all(np.conjugate(x) == True), msg) # check binary PyUFunc_OO_O msg = "PyUFunc_OO_O" - x = np.ones(10, dtype=np.object)[0::2] + x = np.ones(10, dtype=object)[0::2] assert_(np.all(np.add(x, x) == 2), msg) # check binary PyUFunc_OO_O_method msg = "PyUFunc_OO_O_method" - x = np.zeros(10, dtype=np.object)[0::2] + x = np.zeros(10, dtype=object)[0::2] for i in range(len(x)): x[i] = foo() assert_(np.all(np.logical_xor(x, x)), msg) @@ -437,13 +437,22 @@ assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13) def test_sum(self): - for dt in (np.int, np.float16, np.float32, np.float64, np.longdouble): + for dt in (int, np.float16, np.float32, np.float64, np.longdouble): for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, 128, 1024, 1235): tgt = dt(v * (v + 1) / 2) d = np.arange(1, v + 1, dtype=dt) - assert_almost_equal(np.sum(d), tgt) - assert_almost_equal(np.sum(d[::-1]), tgt) + + # warning if sum overflows, which it does in float16 + overflow = not np.isfinite(tgt) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + assert_almost_equal(np.sum(d), tgt) + assert_equal(len(w), 1 * overflow) + + assert_almost_equal(np.sum(d[::-1]), tgt) + assert_equal(len(w), 2 * overflow) d = np.ones(500, dtype=dt) assert_almost_equal(np.sum(d[::2]), 250.) @@ -670,7 +679,7 @@ assert_equal(ref, True, err_msg="reference check") def test_euclidean_pdist(self): - a = np.arange(12, dtype=np.float).reshape(4, 3) + a = np.arange(12, dtype=float).reshape(4, 3) out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype) umt.euclidean_pdist(a, out) b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1)) @@ -1245,9 +1254,9 @@ assert_array_equal(values, [1, 8, 6, 4]) # Test exception thrown - values = np.array(['a', 1], dtype=np.object) - self.assertRaises(TypeError, np.add.at, values, [0, 1], 1) - assert_array_equal(values, np.array(['a', 1], dtype=np.object)) + values = np.array(['a', 1], dtype=object) + assert_raises(TypeError, np.add.at, values, [0, 1], 1) + assert_array_equal(values, np.array(['a', 1], dtype=object)) # Test multiple output ufuncs raise error, gh-5665 assert_raises(ValueError, np.modf.at, np.arange(10), [1]) diff -Nru python-numpy-1.13.3/numpy/core/tests/test_umath_complex.py python-numpy-1.14.5/numpy/core/tests/test_umath_complex.py --- python-numpy-1.13.3/numpy/core/tests/test_umath_complex.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_umath_complex.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,7 +6,7 @@ import numpy as np import numpy.core.umath as ncu from numpy.testing import ( - TestCase, run_module_suite, assert_equal, assert_array_equal, + run_module_suite, assert_raises, assert_equal, assert_array_equal, assert_almost_equal, dec ) @@ -38,7 +38,7 @@ yield check, f, 1, 0, np.exp(1), 0, False yield check, f, 0, 1, np.cos(1), np.sin(1), False - ref = np.exp(1) * np.complex(np.cos(1), np.sin(1)) + ref = np.exp(1) * complex(np.cos(1), np.sin(1)) yield check, f, 1, 1, ref.real, ref.imag, False @platform_skip @@ -73,7 +73,7 @@ def _check_ninf_inf(dummy): msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): - z = f(np.array(np.complex(-np.inf, np.inf))) + z = f(np.array(complex(-np.inf, np.inf))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform % (z.real, z.imag)) @@ -83,7 +83,7 @@ def _check_inf_inf(dummy): msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): - z = f(np.array(np.complex(np.inf, np.inf))) + z = f(np.array(complex(np.inf, np.inf))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) @@ -93,7 +93,7 @@ def _check_ninf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" with np.errstate(invalid='ignore'): - z = f(np.array(np.complex(-np.inf, np.nan))) + z = f(np.array(complex(-np.inf, np.nan))) if z.real != 0 or z.imag != 0: raise AssertionError(msgform % (z.real, z.imag)) @@ -103,7 +103,7 @@ def _check_inf_nan(dummy): msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" with np.errstate(invalid='ignore'): - z = f(np.array(np.complex(np.inf, np.nan))) + z = f(np.array(complex(np.inf, np.nan))) if not np.isinf(z.real) or not np.isnan(z.imag): raise AssertionError(msgform % (z.real, z.imag)) @@ -129,7 +129,7 @@ yield check, f, np.nan, 0, np.nan, 0 -class TestClog(TestCase): +class TestClog(object): def test_simple(self): x = np.array([1+0j, 1+2j]) y_r = np.log(np.abs(x)) + 1j * np.angle(x) @@ -150,9 +150,9 @@ # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): - x = np.array([np.NZERO], dtype=np.complex) - y = np.complex(-np.inf, np.pi) - self.assertRaises(FloatingPointError, np.log, x) + x = np.array([np.NZERO], dtype=complex) + y = complex(-np.inf, np.pi) + assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) @@ -162,9 +162,9 @@ # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' # floating-point exception. with np.errstate(divide='raise'): - x = np.array([0], dtype=np.complex) - y = np.complex(-np.inf, 0) - self.assertRaises(FloatingPointError, np.log, x) + x = np.array([0], dtype=complex) + y = complex(-np.inf, 0) + assert_raises(FloatingPointError, np.log, x) with np.errstate(divide='ignore'): assert_almost_equal(np.log(x), y) @@ -172,13 +172,13 @@ yl.append(y) # clog(x + i inf returns +inf + i pi /2, for finite x. - x = np.array([complex(1, np.inf)], dtype=np.complex) - y = np.complex(np.inf, 0.5 * np.pi) + x = np.array([complex(1, np.inf)], dtype=complex) + y = complex(np.inf, 0.5 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) - x = np.array([complex(-1, np.inf)], dtype=np.complex) + x = np.array([complex(-1, np.inf)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) @@ -186,9 +186,9 @@ # clog(x + iNaN) returns NaN + iNaN and optionally raises the # 'invalid' floating- point exception, for finite x. with np.errstate(invalid='raise'): - x = np.array([complex(1., np.nan)], dtype=np.complex) - y = np.complex(np.nan, np.nan) - #self.assertRaises(FloatingPointError, np.log, x) + x = np.array([complex(1., np.nan)], dtype=complex) + y = complex(np.nan, np.nan) + #assert_raises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) @@ -196,8 +196,8 @@ yl.append(y) with np.errstate(invalid='raise'): - x = np.array([np.inf + 1j * np.nan], dtype=np.complex) - #self.assertRaises(FloatingPointError, np.log, x) + x = np.array([np.inf + 1j * np.nan], dtype=complex) + #assert_raises(FloatingPointError, np.log, x) with np.errstate(invalid='ignore'): assert_almost_equal(np.log(x), y) @@ -205,70 +205,70 @@ yl.append(y) # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. - x = np.array([-np.inf + 1j], dtype=np.complex) - y = np.complex(np.inf, np.pi) + x = np.array([-np.inf + 1j], dtype=complex) + y = complex(np.inf, np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. - x = np.array([np.inf + 1j], dtype=np.complex) - y = np.complex(np.inf, 0) + x = np.array([np.inf + 1j], dtype=complex) + y = complex(np.inf, 0) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(- inf + i inf) returns +inf + i3pi /4. - x = np.array([complex(-np.inf, np.inf)], dtype=np.complex) - y = np.complex(np.inf, 0.75 * np.pi) + x = np.array([complex(-np.inf, np.inf)], dtype=complex) + y = complex(np.inf, 0.75 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+ inf + i inf) returns +inf + ipi /4. - x = np.array([complex(np.inf, np.inf)], dtype=np.complex) - y = np.complex(np.inf, 0.25 * np.pi) + x = np.array([complex(np.inf, np.inf)], dtype=complex) + y = complex(np.inf, 0.25 * np.pi) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(+/- inf + iNaN) returns +inf + iNaN. - x = np.array([complex(np.inf, np.nan)], dtype=np.complex) - y = np.complex(np.inf, np.nan) + x = np.array([complex(np.inf, np.nan)], dtype=complex) + y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) - x = np.array([complex(-np.inf, np.nan)], dtype=np.complex) + x = np.array([complex(-np.inf, np.nan)], dtype=complex) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iy) returns NaN + iNaN and optionally raises the # 'invalid' floating-point exception, for finite y. - x = np.array([complex(np.nan, 1)], dtype=np.complex) - y = np.complex(np.nan, np.nan) + x = np.array([complex(np.nan, 1)], dtype=complex) + y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + i inf) returns +inf + iNaN. - x = np.array([complex(np.nan, np.inf)], dtype=np.complex) - y = np.complex(np.inf, np.nan) + x = np.array([complex(np.nan, np.inf)], dtype=complex) + y = complex(np.inf, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(NaN + iNaN) returns NaN + iNaN. - x = np.array([complex(np.nan, np.nan)], dtype=np.complex) - y = np.complex(np.nan, np.nan) + x = np.array([complex(np.nan, np.nan)], dtype=complex) + y = complex(np.nan, np.nan) assert_almost_equal(np.log(x), y) xl.append(x) yl.append(y) # clog(conj(z)) = conj(clog(z)). - xa = np.array(xl, dtype=np.complex) - ya = np.array(yl, dtype=np.complex) + xa = np.array(xl, dtype=complex) + ya = np.array(yl, dtype=complex) with np.errstate(divide='ignore'): for i in range(len(xa)): assert_almost_equal(np.log(xa[i].conj()), ya[i].conj()) @@ -286,7 +286,7 @@ yield check_complex_value, np.sqrt, -1, 0, 0, 1 def test_simple_conjugate(self): - ref = np.conj(np.sqrt(np.complex(1, 1))) + ref = np.conj(np.sqrt(complex(1, 1))) def f(z): return np.sqrt(np.conj(z)) @@ -330,7 +330,7 @@ # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) def _check_ninf_nan(dummy): msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" - z = np.sqrt(np.array(np.complex(-np.inf, np.nan))) + z = np.sqrt(np.array(complex(-np.inf, np.nan))) #Fixme: ugly workaround for isinf bug. with np.errstate(invalid='ignore'): if not (np.isnan(z.real) and np.isinf(z.imag)): @@ -350,7 +350,7 @@ # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch # cuts first) -class TestCpow(TestCase): +class TestCpow(object): def setUp(self): self.olderr = np.seterr(invalid='ignore') @@ -406,16 +406,16 @@ def test_fabs(self): # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) - x = np.array([1+0j], dtype=np.complex) + x = np.array([1+0j], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) - x = np.array([complex(1, np.NZERO)], dtype=np.complex) + x = np.array([complex(1, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) - x = np.array([complex(np.inf, np.NZERO)], dtype=np.complex) + x = np.array([complex(np.inf, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) - x = np.array([complex(np.nan, np.NZERO)], dtype=np.complex) + x = np.array([complex(np.nan, np.NZERO)], dtype=complex) assert_array_equal(np.abs(x), np.real(x)) def test_cabs_inf_nan(self): @@ -445,9 +445,9 @@ return np.abs(np.conj(a)) def g(a, b): - return np.abs(np.complex(a, b)) + return np.abs(complex(a, b)) - xa = np.array(x, dtype=np.complex) + xa = np.array(x, dtype=complex) for i in range(len(xa)): ref = g(x[i], y[i]) yield check_real_value, f, x[i], y[i], ref @@ -527,7 +527,7 @@ def check_complex_value(f, x1, y1, x2, y2, exact=True): z1 = np.array([complex(x1, y1)]) - z2 = np.complex(x2, y2) + z2 = complex(x2, y2) with np.errstate(invalid='ignore'): if exact: assert_equal(f(z1), z2) diff -Nru python-numpy-1.13.3/numpy/core/tests/test_umath.py python-numpy-1.14.5/numpy/core/tests/test_umath.py --- python-numpy-1.13.3/numpy/core/tests/test_umath.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_umath.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,15 +6,14 @@ import fnmatch import itertools -from numpy.testing.utils import _gen_alignment_data import numpy.core.umath as ncu from numpy.core import umath_tests as ncu_tests import numpy as np from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal, assert_raises, + run_module_suite, assert_, assert_equal, assert_raises, assert_raises_regex, assert_array_equal, assert_almost_equal, assert_array_almost_equal, dec, assert_allclose, assert_no_warnings, - suppress_warnings + suppress_warnings, _gen_alignment_data, ) @@ -32,7 +31,7 @@ np.seterr(**self.olderr) -class TestConstants(TestCase): +class TestConstants(object): def test_pi(self): assert_allclose(ncu.pi, 3.141592653589793, 1e-15) @@ -43,7 +42,7 @@ assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15) -class TestOut(TestCase): +class TestOut(object): def test_out_subok(self): for subok in (True, False): a = np.array(0.5) @@ -176,7 +175,7 @@ assert_(w[0].category is DeprecationWarning) -class TestComparisons(TestCase): +class TestComparisons(object): def test_ignore_object_identity_in_equal(self): # Check error raised when comparing identical objects whose comparison # is not a simple boolean, e.g., arrays that are compared elementwise. @@ -214,7 +213,18 @@ assert_equal(np.not_equal(a, a), [True]) -class TestDivision(TestCase): +class TestAdd(object): + def test_reduce_alignment(self): + # gh-9876 + # make sure arrays with weird strides work with the optimizations in + # pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a + # 4 byte offset, even though its itemsize is 8. + a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)]) + a['a'] = -1 + assert_equal(a['b'].sum(), 0) + + +class TestDivision(object): def test_division_int(self): # int division should follow Python x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120]) @@ -275,7 +285,7 @@ return (+1, -1) -class TestRemainder(TestCase): +class TestRemainder(object): def test_remainder_basic(self): dt = np.typecodes['AllInteger'] + np.typecodes['Float'] @@ -366,7 +376,7 @@ assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem)) -class TestCbrt(TestCase): +class TestCbrt(object): def test_cbrt_scalar(self): assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5) @@ -379,7 +389,7 @@ assert_equal(np.cbrt(-np.inf), -np.inf) -class TestPower(TestCase): +class TestPower(object): def test_power_float(self): x = np.array([1., 2., 3.]) assert_equal(x**0, [1., 1., 1.]) @@ -518,7 +528,7 @@ assert_raises(ValueError, np.power, one, minusone) -class TestFloat_power(TestCase): +class TestFloat_power(object): def test_type_conversion(self): arg_type = '?bhilBHILefdgFDG' res_type = 'ddddddddddddgDDG' @@ -529,7 +539,7 @@ assert_(res.dtype.name == np.dtype(dtout).name, msg) -class TestLog2(TestCase): +class TestLog2(object): def test_log2_values(self): x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] @@ -560,7 +570,7 @@ assert_(w[2].category is RuntimeWarning) -class TestExp2(TestCase): +class TestExp2(object): def test_exp2_values(self): x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] @@ -612,7 +622,7 @@ assert_(np.isnan(np.logaddexp2(np.nan, np.nan))) -class TestLog(TestCase): +class TestLog(object): def test_log_values(self): x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] @@ -623,7 +633,7 @@ assert_almost_equal(np.log(xf), yf) -class TestExp(TestCase): +class TestExp(object): def test_exp_values(self): x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] @@ -675,7 +685,7 @@ assert_(np.isnan(np.logaddexp(np.nan, np.nan))) -class TestLog1p(TestCase): +class TestLog1p(object): def test_log1p(self): assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) @@ -689,7 +699,7 @@ assert_equal(ncu.log1p(-np.inf), np.nan) -class TestExpm1(TestCase): +class TestExpm1(object): def test_expm1(self): assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1) assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1) @@ -702,7 +712,7 @@ assert_equal(ncu.expm1(-np.inf), -1.) -class TestHypot(TestCase, object): +class TestHypot(object): def test_simple(self): assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2)) assert_almost_equal(ncu.hypot(0, 0), 0) @@ -726,7 +736,7 @@ "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y))) -class TestHypotSpecialValues(TestCase): +class TestHypotSpecialValues(object): def test_nan_outputs(self): assert_hypot_isnan(np.nan, np.nan) assert_hypot_isnan(np.nan, 1) @@ -763,7 +773,7 @@ assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y))) -class TestArctan2SpecialValues(TestCase): +class TestArctan2SpecialValues(object): def test_one_one(self): # atan2(1, 1) returns pi/4. assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi) @@ -832,7 +842,7 @@ assert_arctan2_isnan(np.nan, np.nan) -class TestLdexp(TestCase): +class TestLdexp(object): def _check_ldexp(self, tp): assert_almost_equal(ncu.ldexp(np.array(2., np.float32), np.array(3, tp)), 16.) @@ -898,22 +908,22 @@ # fail if cmp is used instead of rich compare. # Failure cannot be guaranteed. for i in range(1): - x = np.array(float('nan'), np.object) + x = np.array(float('nan'), object) y = 1.0 - z = np.array(float('nan'), np.object) + z = np.array(float('nan'), object) assert_(np.maximum(x, y) == 1.0) assert_(np.maximum(z, y) == 1.0) def test_complex_nans(self): nan = np.nan for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: - arg1 = np.array([0, cnan, cnan], dtype=np.complex) - arg2 = np.array([cnan, 0, cnan], dtype=np.complex) - out = np.array([nan, nan, nan], dtype=np.complex) + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([nan, nan, nan], dtype=complex) assert_equal(np.maximum(arg1, arg2), out) def test_object_array(self): - arg1 = np.arange(5, dtype=np.object) + arg1 = np.arange(5, dtype=object) arg2 = arg1 + 1 assert_equal(np.maximum(arg1, arg2), arg2) @@ -956,22 +966,22 @@ # fail if cmp is used instead of rich compare. # Failure cannot be guaranteed. for i in range(1): - x = np.array(float('nan'), np.object) + x = np.array(float('nan'), object) y = 1.0 - z = np.array(float('nan'), np.object) + z = np.array(float('nan'), object) assert_(np.minimum(x, y) == 1.0) assert_(np.minimum(z, y) == 1.0) def test_complex_nans(self): nan = np.nan for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: - arg1 = np.array([0, cnan, cnan], dtype=np.complex) - arg2 = np.array([cnan, 0, cnan], dtype=np.complex) - out = np.array([nan, nan, nan], dtype=np.complex) + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([nan, nan, nan], dtype=complex) assert_equal(np.minimum(arg1, arg2), out) def test_object_array(self): - arg1 = np.arange(5, dtype=np.object) + arg1 = np.arange(5, dtype=object) arg2 = arg1 + 1 assert_equal(np.minimum(arg1, arg2), arg1) @@ -1012,9 +1022,9 @@ def test_complex_nans(self): nan = np.nan for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: - arg1 = np.array([0, cnan, cnan], dtype=np.complex) - arg2 = np.array([cnan, 0, cnan], dtype=np.complex) - out = np.array([0, 0, nan], dtype=np.complex) + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([0, 0, nan], dtype=complex) assert_equal(np.fmax(arg1, arg2), out) @@ -1054,17 +1064,18 @@ def test_complex_nans(self): nan = np.nan for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: - arg1 = np.array([0, cnan, cnan], dtype=np.complex) - arg2 = np.array([cnan, 0, cnan], dtype=np.complex) - out = np.array([0, 0, nan], dtype=np.complex) + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([0, 0, nan], dtype=complex) assert_equal(np.fmin(arg1, arg2), out) -class TestBool(TestCase): +class TestBool(object): def test_exceptions(self): a = np.ones(1, dtype=np.bool_) assert_raises(TypeError, np.negative, a) assert_raises(TypeError, np.positive, a) + assert_raises(TypeError, np.subtract, a, a) def test_truth_table_logical(self): # 2, 3 and 4 serves as true values @@ -1122,7 +1133,7 @@ assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1) -class TestBitwiseUFuncs(TestCase): +class TestBitwiseUFuncs(object): bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O'] @@ -1207,10 +1218,10 @@ assert_(type(f.reduce(btype)) is bool, msg) -class TestInt(TestCase): +class TestInt(object): def test_logical_not(self): x = np.ones(10, dtype=np.int16) - o = np.ones(10 * 2, dtype=np.bool) + o = np.ones(10 * 2, dtype=bool) tgt = o.copy() tgt[::2] = False os = o[::2] @@ -1218,24 +1229,24 @@ assert_array_equal(o, tgt) -class TestFloatingPoint(TestCase): +class TestFloatingPoint(object): def test_floating_point(self): assert_equal(ncu.FLOATING_POINT_SUPPORT, 1) -class TestDegrees(TestCase): +class TestDegrees(object): def test_degrees(self): assert_almost_equal(ncu.degrees(np.pi), 180.0) assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0) -class TestRadians(TestCase): +class TestRadians(object): def test_radians(self): assert_almost_equal(ncu.radians(180.0), np.pi) assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi) -class TestHeavside(TestCase): +class TestHeavside(object): def test_heaviside(self): x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]]) expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]]) @@ -1257,7 +1268,7 @@ assert_equal(h, expected1.astype(np.float32)) -class TestSign(TestCase): +class TestSign(object): def test_sign(self): a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) out = np.zeros(a.shape) @@ -1274,7 +1285,7 @@ # In reference to github issue #6229 foo = np.array([-.1, 0, .1]) - a = np.sign(foo.astype(np.object)) + a = np.sign(foo.astype(object)) b = np.sign(foo) assert_array_equal(a, b) @@ -1283,11 +1294,11 @@ # In reference to github issue #6229 def test_nan(): foo = np.array([np.nan]) - a = np.sign(foo.astype(np.object)) + a = np.sign(foo.astype(object)) assert_raises(TypeError, test_nan) -class TestMinMax(TestCase): +class TestMinMax(object): def test_minmax_blocked(self): # simd tests on max/min, test all alignments, slow but important # for 2 * vz + 2 * (vs - 1) + 1 (unrolled once) @@ -1298,8 +1309,11 @@ inp[:] = np.arange(inp.size, dtype=dt) inp[i] = np.nan emsg = lambda: '%r\n%s' % (inp, msg) - assert_(np.isnan(inp.max()), msg=emsg) - assert_(np.isnan(inp.min()), msg=emsg) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "invalid value encountered in reduce") + assert_(np.isnan(inp.max()), msg=emsg) + assert_(np.isnan(inp.min()), msg=emsg) inp[i] = 1e10 assert_equal(inp.max(), 1e10, err_msg=msg) @@ -1313,8 +1327,19 @@ assert_equal(d.max(), d[0]) assert_equal(d.min(), d[0]) + def test_reduce_warns(self): + # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus + # and put it before the call to an intrisic function that causes + # invalid status to be set. Also make sure warnings are emitted + for n in (2, 4, 8, 16, 32): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + for r in np.diagflat([np.nan] * n): + assert_equal(np.min(r), np.nan) + assert_equal(len(sup.log), n) + -class TestAbsoluteNegative(TestCase): +class TestAbsoluteNegative(object): def test_abs_neg_blocked(self): # simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1 for dt, sz in [(np.float32, 11), (np.float64, 5)]: @@ -1323,7 +1348,7 @@ tgt = [ncu.absolute(i) for i in inp] np.absolute(inp, out=out) assert_equal(out, tgt, err_msg=msg) - self.assertTrue((out >= 0).all()) + assert_((out >= 0).all()) tgt = [-1*(i) for i in inp] np.negative(inp, out=out) @@ -1356,7 +1381,7 @@ np.abs(np.ones_like(d), out=d) -class TestPositive(TestCase): +class TestPositive(object): def test_valid(self): valid_dtypes = [int, float, complex, object] for dtype in valid_dtypes: @@ -1375,7 +1400,7 @@ np.positive(np.array(['bar'], dtype=object)) -class TestSpecialMethods(TestCase): +class TestSpecialMethods(object): def test_wrap(self): class with_wrap(object): @@ -1392,11 +1417,11 @@ x = ncu.minimum(a, a) assert_equal(x.arr, np.zeros(1)) func, args, i = x.context - self.assertTrue(func is ncu.minimum) - self.assertEqual(len(args), 2) + assert_(func is ncu.minimum) + assert_equal(len(args), 2) assert_equal(args[0], a) assert_equal(args[1], a) - self.assertEqual(i, 0) + assert_equal(i, 0) def test_wrap_with_iterable(self): # test fix for bug #1026: @@ -1412,7 +1437,7 @@ a = with_wrap() x = ncu.multiply(a, (1, 2, 3)) - self.assertTrue(isinstance(x, with_wrap)) + assert_(isinstance(x, with_wrap)) assert_array_equal(x, np.array((1, 2, 3))) def test_priority_with_scalar(self): @@ -1426,7 +1451,7 @@ a = A() x = np.float64(1)*a - self.assertTrue(isinstance(x, A)) + assert_(isinstance(x, A)) assert_array_equal(x, np.array(1)) def test_old_wrap(self): @@ -1467,25 +1492,25 @@ b = B() c = C() f = ncu.minimum - self.assertTrue(type(f(x, x)) is np.ndarray) - self.assertTrue(type(f(x, a)) is A) - self.assertTrue(type(f(x, b)) is B) - self.assertTrue(type(f(x, c)) is C) - self.assertTrue(type(f(a, x)) is A) - self.assertTrue(type(f(b, x)) is B) - self.assertTrue(type(f(c, x)) is C) - - self.assertTrue(type(f(a, a)) is A) - self.assertTrue(type(f(a, b)) is B) - self.assertTrue(type(f(b, a)) is B) - self.assertTrue(type(f(b, b)) is B) - self.assertTrue(type(f(b, c)) is C) - self.assertTrue(type(f(c, b)) is C) - self.assertTrue(type(f(c, c)) is C) - - self.assertTrue(type(ncu.exp(a) is A)) - self.assertTrue(type(ncu.exp(b) is B)) - self.assertTrue(type(ncu.exp(c) is C)) + assert_(type(f(x, x)) is np.ndarray) + assert_(type(f(x, a)) is A) + assert_(type(f(x, b)) is B) + assert_(type(f(x, c)) is C) + assert_(type(f(a, x)) is A) + assert_(type(f(b, x)) is B) + assert_(type(f(c, x)) is C) + + assert_(type(f(a, a)) is A) + assert_(type(f(a, b)) is B) + assert_(type(f(b, a)) is B) + assert_(type(f(b, b)) is B) + assert_(type(f(b, c)) is C) + assert_(type(f(c, b)) is C) + assert_(type(f(c, c)) is C) + + assert_(type(ncu.exp(a) is A)) + assert_(type(ncu.exp(b) is B)) + assert_(type(ncu.exp(c) is C)) def test_failing_wrap(self): @@ -1497,7 +1522,7 @@ raise RuntimeError a = A() - self.assertRaises(RuntimeError, ncu.maximum, a, a) + assert_raises(RuntimeError, ncu.maximum, a, a) def test_none_wrap(self): # Tests that issue #8507 is resolved. Previously, this would segfault @@ -1568,7 +1593,7 @@ raise RuntimeError a = A() - self.assertRaises(RuntimeError, ncu.maximum, a, a) + assert_raises(RuntimeError, ncu.maximum, a, a) def test_array_with_context(self): @@ -1590,10 +1615,10 @@ a = A() ncu.maximum(np.zeros(1), a) - self.assertTrue(a.func is ncu.maximum) + assert_(a.func is ncu.maximum) assert_equal(a.args[0], 0) - self.assertTrue(a.args[1] is a) - self.assertTrue(a.i == 1) + assert_(a.args[1] is a) + assert_(a.i == 1) assert_equal(ncu.maximum(a, B()), 0) assert_equal(ncu.maximum(a, C()), 0) @@ -1851,6 +1876,7 @@ # outer, wrong args assert_raises(TypeError, np.multiply.outer, a) assert_raises(TypeError, np.multiply.outer, a, a, a, a) + assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a') # at res = np.multiply.at(a, [4, 2], 'b0') @@ -2182,7 +2208,7 @@ assert_(a.info, {'inputs': [0, 2]}) -class TestChoose(TestCase): +class TestChoose(object): def test_mixed(self): c = np.array([True, True]) a = np.array([True, True]) @@ -2207,7 +2233,7 @@ else: x = .5 fr = f(x) - fz = f(np.complex(x)) + fz = f(complex(x)) assert_almost_equal(fz.real, fr, err_msg='real part %s' % f) assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f) @@ -2276,7 +2302,7 @@ points = [-1-1j, -1+1j, +1-1j, +1+1j] name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} - atol = 4*np.finfo(np.complex).eps + atol = 4*np.finfo(complex).eps for func in self.funcs: fname = func.__name__.split('.')[-1] cname = name_map.get(fname, fname) @@ -2383,12 +2409,12 @@ self.check_loss_of_precision(np.longcomplex) -class TestAttributes(TestCase): +class TestAttributes(object): def test_attributes(self): add = ncu.add assert_equal(add.__name__, 'add') - self.assertTrue(add.ntypes >= 18) # don't fail if types added - self.assertTrue('ii->i' in add.types) + assert_(add.ntypes >= 18) # don't fail if types added + assert_('ii->i' in add.types) assert_equal(add.nin, 2) assert_equal(add.nout, 1) assert_equal(add.identity, 0) @@ -2402,7 +2428,7 @@ "frexp(x[, out1, out2], / [, out=(None, None)], *, where=True")) -class TestSubclass(TestCase): +class TestSubclass(object): def test_subclass_op(self): @@ -2416,7 +2442,7 @@ assert_equal(a+a, a) def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, - dtype=np.complex): + dtype=complex): """ Check for a branch cut in a function. diff -Nru python-numpy-1.13.3/numpy/core/tests/test_unicode.py python-numpy-1.14.5/numpy/core/tests/test_unicode.py --- python-numpy-1.13.3/numpy/core/tests/test_unicode.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/core/tests/test_unicode.py 2018-06-12 18:28:52.000000000 +0000 @@ -5,7 +5,7 @@ import numpy as np from numpy.compat import unicode from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal, assert_array_equal) + run_module_suite, assert_, assert_equal, assert_array_equal) # Guess the UCS length for this python interpreter if sys.version_info[:2] >= (3, 3): @@ -68,24 +68,24 @@ # Creation tests ############################################################ -class create_zeros(object): +class CreateZeros(object): """Check the creation of zero-valued arrays""" def content_check(self, ua, ua_scalar, nbytes): # Check the length of the unicode base type - self.assertTrue(int(ua.dtype.str[2:]) == self.ulen) + assert_(int(ua.dtype.str[2:]) == self.ulen) # Check the length of the data buffer - self.assertTrue(buffer_length(ua) == nbytes) + assert_(buffer_length(ua) == nbytes) # Small check that data in array element is ok - self.assertTrue(ua_scalar == u'') + assert_(ua_scalar == u'') # Encode to ascii and double check - self.assertTrue(ua_scalar.encode('ascii') == b'') + assert_(ua_scalar.encode('ascii') == b'') # Check buffer lengths for scalars if ucs4: - self.assertTrue(buffer_length(ua_scalar) == 0) + assert_(buffer_length(ua_scalar) == 0) else: - self.assertTrue(buffer_length(ua_scalar) == 0) + assert_(buffer_length(ua_scalar) == 0) def test_zeros0D(self): # Check creation of 0-dimensional objects @@ -105,47 +105,47 @@ self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) -class test_create_zeros_1(create_zeros, TestCase): +class TestCreateZeros_1(CreateZeros): """Check the creation of zero-valued arrays (size 1)""" ulen = 1 -class test_create_zeros_2(create_zeros, TestCase): +class TestCreateZeros_2(CreateZeros): """Check the creation of zero-valued arrays (size 2)""" ulen = 2 -class test_create_zeros_1009(create_zeros, TestCase): +class TestCreateZeros_1009(CreateZeros): """Check the creation of zero-valued arrays (size 1009)""" ulen = 1009 -class create_values(object): +class CreateValues(object): """Check the creation of unicode arrays with values""" def content_check(self, ua, ua_scalar, nbytes): # Check the length of the unicode base type - self.assertTrue(int(ua.dtype.str[2:]) == self.ulen) + assert_(int(ua.dtype.str[2:]) == self.ulen) # Check the length of the data buffer - self.assertTrue(buffer_length(ua) == nbytes) + assert_(buffer_length(ua) == nbytes) # Small check that data in array element is ok - self.assertTrue(ua_scalar == self.ucs_value*self.ulen) + assert_(ua_scalar == self.ucs_value*self.ulen) # Encode to UTF-8 and double check - self.assertTrue(ua_scalar.encode('utf-8') == + assert_(ua_scalar.encode('utf-8') == (self.ucs_value*self.ulen).encode('utf-8')) # Check buffer lengths for scalars if ucs4: - self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen) + assert_(buffer_length(ua_scalar) == 4*self.ulen) else: if self.ucs_value == ucs4_value: # In UCS2, the \U0010FFFF will be represented using a # surrogate *pair* - self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen) + assert_(buffer_length(ua_scalar) == 2*2*self.ulen) else: # In UCS2, the \uFFFF will be represented using a # regular 2-byte word - self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen) + assert_(buffer_length(ua_scalar) == 2*self.ulen) def test_values0D(self): # Check creation of 0-dimensional objects with values @@ -165,37 +165,37 @@ self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) -class test_create_values_1_ucs2(create_values, TestCase): +class TestCreateValues_1_UCS2(CreateValues): """Check the creation of valued arrays (size 1, UCS2 values)""" ulen = 1 ucs_value = ucs2_value -class test_create_values_1_ucs4(create_values, TestCase): +class TestCreateValues_1_UCS4(CreateValues): """Check the creation of valued arrays (size 1, UCS4 values)""" ulen = 1 ucs_value = ucs4_value -class test_create_values_2_ucs2(create_values, TestCase): +class TestCreateValues_2_UCS2(CreateValues): """Check the creation of valued arrays (size 2, UCS2 values)""" ulen = 2 ucs_value = ucs2_value -class test_create_values_2_ucs4(create_values, TestCase): +class TestCreateValues_2_UCS4(CreateValues): """Check the creation of valued arrays (size 2, UCS4 values)""" ulen = 2 ucs_value = ucs4_value -class test_create_values_1009_ucs2(create_values, TestCase): +class TestCreateValues_1009_UCS2(CreateValues): """Check the creation of valued arrays (size 1009, UCS2 values)""" ulen = 1009 ucs_value = ucs2_value -class test_create_values_1009_ucs4(create_values, TestCase): +class TestCreateValues_1009_UCS4(CreateValues): """Check the creation of valued arrays (size 1009, UCS4 values)""" ulen = 1009 ucs_value = ucs4_value @@ -205,32 +205,32 @@ # Assignment tests ############################################################ -class assign_values(object): +class AssignValues(object): """Check the assignment of unicode arrays with values""" def content_check(self, ua, ua_scalar, nbytes): # Check the length of the unicode base type - self.assertTrue(int(ua.dtype.str[2:]) == self.ulen) + assert_(int(ua.dtype.str[2:]) == self.ulen) # Check the length of the data buffer - self.assertTrue(buffer_length(ua) == nbytes) + assert_(buffer_length(ua) == nbytes) # Small check that data in array element is ok - self.assertTrue(ua_scalar == self.ucs_value*self.ulen) + assert_(ua_scalar == self.ucs_value*self.ulen) # Encode to UTF-8 and double check - self.assertTrue(ua_scalar.encode('utf-8') == + assert_(ua_scalar.encode('utf-8') == (self.ucs_value*self.ulen).encode('utf-8')) # Check buffer lengths for scalars if ucs4: - self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen) + assert_(buffer_length(ua_scalar) == 4*self.ulen) else: if self.ucs_value == ucs4_value: # In UCS2, the \U0010FFFF will be represented using a # surrogate *pair* - self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen) + assert_(buffer_length(ua_scalar) == 2*2*self.ulen) else: # In UCS2, the \uFFFF will be represented using a # regular 2-byte word - self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen) + assert_(buffer_length(ua_scalar) == 2*self.ulen) def test_values0D(self): # Check assignment of 0-dimensional objects with values @@ -255,37 +255,37 @@ self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4) -class test_assign_values_1_ucs2(assign_values, TestCase): +class TestAssignValues_1_UCS2(AssignValues): """Check the assignment of valued arrays (size 1, UCS2 values)""" ulen = 1 ucs_value = ucs2_value -class test_assign_values_1_ucs4(assign_values, TestCase): +class TestAssignValues_1_UCS4(AssignValues): """Check the assignment of valued arrays (size 1, UCS4 values)""" ulen = 1 ucs_value = ucs4_value -class test_assign_values_2_ucs2(assign_values, TestCase): +class TestAssignValues_2_UCS2(AssignValues): """Check the assignment of valued arrays (size 2, UCS2 values)""" ulen = 2 ucs_value = ucs2_value -class test_assign_values_2_ucs4(assign_values, TestCase): +class TestAssignValues_2_UCS4(AssignValues): """Check the assignment of valued arrays (size 2, UCS4 values)""" ulen = 2 ucs_value = ucs4_value -class test_assign_values_1009_ucs2(assign_values, TestCase): +class TestAssignValues_1009_UCS2(AssignValues): """Check the assignment of valued arrays (size 1009, UCS2 values)""" ulen = 1009 ucs_value = ucs2_value -class test_assign_values_1009_ucs4(assign_values, TestCase): +class TestAssignValues_1009_UCS4(AssignValues): """Check the assignment of valued arrays (size 1009, UCS4 values)""" ulen = 1009 ucs_value = ucs4_value @@ -295,7 +295,7 @@ # Byteorder tests ############################################################ -class byteorder_values: +class ByteorderValues(object): """Check the byteorder of unicode arrays in round-trip conversions""" def test_values0D(self): @@ -305,7 +305,7 @@ # This changes the interpretation of the data region (but not the # actual data), therefore the returned scalars are not # the same (they are byte-swapped versions of each other). - self.assertTrue(ua[()] != ua2[()]) + assert_(ua[()] != ua2[()]) ua3 = ua2.newbyteorder() # Arrays must be equal after the round-trip assert_equal(ua, ua3) @@ -314,8 +314,8 @@ # Check byteorder of single-dimensional objects ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) ua2 = ua.newbyteorder() - self.assertTrue((ua != ua2).all()) - self.assertTrue(ua[-1] != ua2[-1]) + assert_((ua != ua2).all()) + assert_(ua[-1] != ua2[-1]) ua3 = ua2.newbyteorder() # Arrays must be equal after the round-trip assert_equal(ua, ua3) @@ -325,8 +325,8 @@ ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen) ua2 = ua.newbyteorder() - self.assertTrue((ua != ua2).all()) - self.assertTrue(ua[-1, -1, -1] != ua2[-1, -1, -1]) + assert_((ua != ua2).all()) + assert_(ua[-1, -1, -1] != ua2[-1, -1, -1]) ua3 = ua2.newbyteorder() # Arrays must be equal after the round-trip assert_equal(ua, ua3) @@ -338,8 +338,8 @@ test2 = np.repeat(test1, 2)[::2] for ua in (test1, test2): ua2 = ua.astype(dtype=ua.dtype.newbyteorder()) - self.assertTrue((ua == ua2).all()) - self.assertTrue(ua[-1] == ua2[-1]) + assert_((ua == ua2).all()) + assert_(ua[-1] == ua2[-1]) ua3 = ua2.astype(dtype=ua.dtype) # Arrays must be equal after the round-trip assert_equal(ua, ua3) @@ -353,45 +353,45 @@ # Cast to a longer type with zero padding longer_type = np.dtype('U%s' % (self.ulen+1)).newbyteorder() ua2 = ua.astype(dtype=longer_type) - self.assertTrue((ua == ua2).all()) - self.assertTrue(ua[-1] == ua2[-1]) + assert_((ua == ua2).all()) + assert_(ua[-1] == ua2[-1]) # Cast back again with truncating: ua3 = ua2.astype(dtype=ua.dtype) # Arrays must be equal after the round-trip assert_equal(ua, ua3) -class test_byteorder_1_ucs2(byteorder_values, TestCase): +class TestByteorder_1_UCS2(ByteorderValues): """Check the byteorder in unicode (size 1, UCS2 values)""" ulen = 1 ucs_value = ucs2_value -class test_byteorder_1_ucs4(byteorder_values, TestCase): +class TestByteorder_1_UCS4(ByteorderValues): """Check the byteorder in unicode (size 1, UCS4 values)""" ulen = 1 ucs_value = ucs4_value -class test_byteorder_2_ucs2(byteorder_values, TestCase): +class TestByteorder_2_UCS2(ByteorderValues): """Check the byteorder in unicode (size 2, UCS2 values)""" ulen = 2 ucs_value = ucs2_value -class test_byteorder_2_ucs4(byteorder_values, TestCase): +class TestByteorder_2_UCS4(ByteorderValues): """Check the byteorder in unicode (size 2, UCS4 values)""" ulen = 2 ucs_value = ucs4_value -class test_byteorder_1009_ucs2(byteorder_values, TestCase): +class TestByteorder_1009_UCS2(ByteorderValues): """Check the byteorder in unicode (size 1009, UCS2 values)""" ulen = 1009 ucs_value = ucs2_value -class test_byteorder_1009_ucs4(byteorder_values, TestCase): +class TestByteorder_1009_UCS4(ByteorderValues): """Check the byteorder in unicode (size 1009, UCS4 values)""" ulen = 1009 ucs_value = ucs4_value diff -Nru python-numpy-1.13.3/numpy/ctypeslib.py python-numpy-1.14.5/numpy/ctypeslib.py --- python-numpy-1.13.3/numpy/ctypeslib.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/ctypeslib.py 2018-06-12 17:35:36.000000000 +0000 @@ -164,7 +164,7 @@ return num _flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE', - 'OWNDATA', 'UPDATEIFCOPY'] + 'OWNDATA', 'UPDATEIFCOPY', 'WRITEBACKIFCOPY'] def _flags_fromnum(num): res = [] for key in _flagnames: @@ -244,6 +244,7 @@ - OWNDATA / O - WRITEABLE / W - ALIGNED / A + - WRITEBACKIFCOPY / X - UPDATEIFCOPY / U Returns @@ -283,7 +284,7 @@ if num is None: try: flags = [x.strip().upper() for x in flags] - except: + except Exception: raise TypeError("invalid flags specification") num = _num_fromflags(flags) try: diff -Nru python-numpy-1.13.3/numpy/distutils/ccompiler.py python-numpy-1.14.5/numpy/distutils/ccompiler.py --- python-numpy-1.13.3/numpy/distutils/ccompiler.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/ccompiler.py 2018-06-12 18:28:52.000000000 +0000 @@ -437,7 +437,7 @@ log.info("compiler '%s' is set to %s" % (attrname, attr)) try: self.get_version() - except: + except Exception: pass if log._global_log.threshold<2: print('*'*80) diff -Nru python-numpy-1.13.3/numpy/distutils/command/build_clib.py python-numpy-1.14.5/numpy/distutils/command/build_clib.py --- python-numpy-1.13.3/numpy/distutils/command/build_clib.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/command/build_clib.py 2018-06-12 17:31:56.000000000 +0000 @@ -7,7 +7,7 @@ import shutil from distutils.command.build_clib import build_clib as old_build_clib from distutils.errors import DistutilsSetupError, DistutilsError, \ - DistutilsFileError + DistutilsFileError from numpy.distutils import log from distutils.dep_util import newer_group @@ -19,9 +19,10 @@ _l = old_build_clib.user_options for _i in range(len(_l)): if _l[_i][0] in ['build-clib', 'build-temp']: - _l[_i] = (_l[_i][0]+'=',)+_l[_i][1:] + _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:] # + class build_clib(old_build_clib): description = "build C/C++/F libraries used by Python extensions" @@ -32,7 +33,7 @@ ('inplace', 'i', 'Build in-place'), ('parallel=', 'j', "number of parallel jobs"), - ] + ] boolean_options = old_build_clib.boolean_options + ['inplace'] @@ -75,7 +76,8 @@ for (lib_name, build_info) in self.libraries: l = build_info.get('language', None) - if l and l not in languages: languages.append(l) + if l and l not in languages: + languages.append(l) from distutils.ccompiler import new_compiler self.compiler = new_compiler(compiler=self.compiler, @@ -94,11 +96,11 @@ if self.have_f_sources(): from numpy.distutils.fcompiler import new_fcompiler self._f_compiler = new_fcompiler(compiler=self.fcompiler, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force, - requiref90='f90' in languages, - c_compiler=self.compiler) + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force, + requiref90='f90' in languages, + c_compiler=self.compiler) if self._f_compiler is not None: self._f_compiler.customize(self.distribution) @@ -114,10 +116,10 @@ self.build_libraries(self.libraries) if self.inplace: - for l in self.distribution.installed_libraries: + for l in self.distribution.installed_libraries: libname = self.compiler.library_filename(l.name) source = os.path.join(self.build_clib, libname) - target = os.path.join(l.target_dir, libname) + target = os.path.join(l.target_dir, libname) self.mkpath(l.target_dir) shutil.copy(source, target) @@ -140,21 +142,25 @@ sources = build_info.get('sources') if sources is None or not is_sequence(sources): raise DistutilsSetupError(("in 'libraries' option (library '%s'), " + - "'sources' must be present and must be " + - "a list of source filenames") % lib_name) + "'sources' must be present and must be " + + "a list of source filenames") % lib_name) sources = list(sources) c_sources, cxx_sources, f_sources, fmodule_sources \ - = filter_sources(sources) + = filter_sources(sources) requiref90 = not not fmodule_sources or \ - build_info.get('language', 'c')=='f90' + build_info.get('language', 'c') == 'f90' # save source type information so that build_ext can use it. source_languages = [] - if c_sources: source_languages.append('c') - if cxx_sources: source_languages.append('c++') - if requiref90: source_languages.append('f90') - elif f_sources: source_languages.append('f77') + if c_sources: + source_languages.append('c') + if cxx_sources: + source_languages.append('c++') + if requiref90: + source_languages.append('f90') + elif f_sources: + source_languages.append('f77') build_info['source_languages'] = source_languages lib_file = compiler.library_filename(lib_name, @@ -168,8 +174,8 @@ config_fc = build_info.get('config_fc', {}) if fcompiler is not None and config_fc: - log.info('using additional config_fc from setup script '\ - 'for fortran compiler: %s' \ + log.info('using additional config_fc from setup script ' + 'for fortran compiler: %s' % (config_fc,)) from numpy.distutils.fcompiler import new_fcompiler fcompiler = new_fcompiler(compiler=fcompiler.compiler_type, @@ -186,12 +192,14 @@ # check availability of Fortran compilers if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("library %s has Fortran sources"\ - " but no Fortran compiler found" % (lib_name)) + raise DistutilsError("library %s has Fortran sources" + " but no Fortran compiler found" % (lib_name)) if fcompiler is not None: - fcompiler.extra_f77_compile_args = build_info.get('extra_f77_compile_args') or [] - fcompiler.extra_f90_compile_args = build_info.get('extra_f90_compile_args') or [] + fcompiler.extra_f77_compile_args = build_info.get( + 'extra_f77_compile_args') or [] + fcompiler.extra_f90_compile_args = build_info.get( + 'extra_f90_compile_args') or [] macros = build_info.get('macros') include_dirs = build_info.get('include_dirs') @@ -203,9 +211,10 @@ # where compiled F90 module files are: module_dirs = build_info.get('module_dirs') or [] module_build_dir = os.path.dirname(lib_file) - if requiref90: self.mkpath(module_build_dir) + if requiref90: + self.mkpath(module_build_dir) - if compiler.compiler_type=='msvc': + if compiler.compiler_type == 'msvc': # this hack works around the msvc compiler attributes # problem, msvc uses its own convention :( c_sources += cxx_sources @@ -239,7 +248,7 @@ if requiref90: if fcompiler.module_dir_switch is None: existing_modules = glob('*.mod') - extra_postargs += fcompiler.module_options(\ + extra_postargs += fcompiler.module_options( module_dirs, module_build_dir) if fmodule_sources: @@ -257,14 +266,14 @@ if f in existing_modules: continue t = os.path.join(module_build_dir, f) - if os.path.abspath(f)==os.path.abspath(t): + if os.path.abspath(f) == os.path.abspath(t): continue if os.path.isfile(t): os.remove(t) try: self.move_file(f, module_build_dir) except DistutilsFileError: - log.warn('failed to move %r to %r' \ + log.warn('failed to move %r to %r' % (f, module_build_dir)) if f_sources: @@ -278,13 +287,32 @@ else: f_objects = [] - objects.extend(f_objects) - - # assume that default linker is suitable for - # linking Fortran object files - compiler.create_static_lib(objects, lib_name, - output_dir=self.build_clib, - debug=self.debug) + if f_objects and not fcompiler.can_ccompiler_link(compiler): + # Default linker cannot link Fortran object files, and results + # need to be wrapped later. Instead of creating a real static + # library, just keep track of the object files. + listfn = os.path.join(self.build_clib, + lib_name + '.fobjects') + with open(listfn, 'w') as f: + f.write("\n".join(os.path.abspath(obj) for obj in f_objects)) + + listfn = os.path.join(self.build_clib, + lib_name + '.cobjects') + with open(listfn, 'w') as f: + f.write("\n".join(os.path.abspath(obj) for obj in objects)) + + # create empty "library" file for dependency tracking + lib_fname = os.path.join(self.build_clib, + lib_name + compiler.static_lib_extension) + with open(lib_fname, 'wb') as f: + pass + else: + # assume that default linker is suitable for + # linking Fortran object files + objects.extend(f_objects) + compiler.create_static_lib(objects, lib_name, + output_dir=self.build_clib, + debug=self.debug) # fix library dependencies clib_libraries = build_info.get('libraries', []) diff -Nru python-numpy-1.13.3/numpy/distutils/command/build_ext.py python-numpy-1.14.5/numpy/distutils/command/build_ext.py --- python-numpy-1.13.3/numpy/distutils/command/build_ext.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/command/build_ext.py 2018-06-12 18:28:52.000000000 +0000 @@ -5,27 +5,25 @@ import os import sys +import shutil from glob import glob from distutils.dep_util import newer_group from distutils.command.build_ext import build_ext as old_build_ext from distutils.errors import DistutilsFileError, DistutilsSetupError,\ - DistutilsError + DistutilsError from distutils.file_util import copy_file from numpy.distutils import log from numpy.distutils.exec_command import exec_command -from numpy.distutils.system_info import combine_paths +from numpy.distutils.system_info import combine_paths, system_info from numpy.distutils.misc_util import filter_sources, has_f_sources, \ - has_cxx_sources, get_ext_source_files, \ - get_numpy_include_dirs, is_sequence, get_build_architecture, \ - msvc_version + has_cxx_sources, get_ext_source_files, \ + get_numpy_include_dirs, is_sequence, get_build_architecture, \ + msvc_version from numpy.distutils.command.config_compiler import show_fortran_compilers -try: - set -except NameError: - from sets import Set as set + class build_ext (old_build_ext): @@ -36,12 +34,12 @@ "specify the Fortran compiler type"), ('parallel=', 'j', "number of parallel jobs"), - ] + ] help_options = old_build_ext.help_options + [ ('help-fcompiler', None, "list available Fortran compilers", show_fortran_compilers), - ] + ] def initialize_options(self): old_build_ext.initialize_options(self) @@ -84,11 +82,13 @@ if self.distribution.has_c_libraries(): if self.inplace: if self.distribution.have_run.get('build_clib'): - log.warn('build_clib already run, it is too late to ' \ - 'ensure in-place build of build_clib') - build_clib = self.distribution.get_command_obj('build_clib') + log.warn('build_clib already run, it is too late to ' + 'ensure in-place build of build_clib') + build_clib = self.distribution.get_command_obj( + 'build_clib') else: - build_clib = self.distribution.get_command_obj('build_clib') + build_clib = self.distribution.get_command_obj( + 'build_clib') build_clib.inplace = 1 build_clib.ensure_finalized() build_clib.run() @@ -119,13 +119,18 @@ self.compiler.customize_cmd(self) self.compiler.show_customization() + # Setup directory for storing generated extra DLL files on Windows + self.extra_dll_dir = os.path.join(self.build_temp, 'extra-dll') + if not os.path.isdir(self.extra_dll_dir): + os.makedirs(self.extra_dll_dir) + # Create mapping of libraries built by build_clib: clibs = {} if build_clib is not None: for libname, build_info in build_clib.libraries or []: if libname in clibs and clibs[libname] != build_info: - log.warn('library %r defined more than once,'\ - ' overwriting build_info\n%s... \nwith\n%s...' \ + log.warn('library %r defined more than once,' + ' overwriting build_info\n%s... \nwith\n%s...' % (libname, repr(clibs[libname])[:300], repr(build_info)[:300])) clibs[libname] = build_info # .. and distribution libraries: @@ -181,7 +186,7 @@ elif 'f77' in ext_languages: ext_language = 'f77' else: - ext_language = 'c' # default + ext_language = 'c' # default if l and l != ext_language and ext.language: log.warn('resetting extension %r language from %r to %r.' % (ext.name, l, ext_language)) @@ -196,9 +201,9 @@ # Initialize C++ compiler: if need_cxx_compiler: self._cxx_compiler = new_compiler(compiler=compiler_type, - verbose=self.verbose, - dry_run=self.dry_run, - force=self.force) + verbose=self.verbose, + dry_run=self.dry_run, + force=self.force) compiler = self._cxx_compiler compiler.customize(self.distribution, need_cxx=need_cxx_compiler) compiler.customize_cmd(self) @@ -238,7 +243,7 @@ dry_run=self.dry_run, force=self.force, requiref90=True, - c_compiler = self.compiler) + c_compiler=self.compiler) fcompiler = self._f90_compiler if fcompiler: ctype = fcompiler.compiler_type @@ -256,6 +261,16 @@ # Build extensions self.build_extensions() + # Copy over any extra DLL files + runtime_lib_dir = os.path.join( + self.build_lib, self.distribution.get_name(), '.libs') + for fn in os.listdir(self.extra_dll_dir): + if not fn.lower().endswith('.dll'): + continue + if not os.path.isdir(runtime_lib_dir): + os.makedirs(runtime_lib_dir) + runtime_lib = os.path.join(self.extra_dll_dir, fn) + copy_file(runtime_lib, runtime_lib_dir) def swig_sources(self, sources): # Do nothing. Swig sources have beed handled in build_src command. @@ -299,11 +314,9 @@ macros.append((undef,)) c_sources, cxx_sources, f_sources, fmodule_sources = \ - filter_sources(ext.sources) + filter_sources(ext.sources) - - - if self.compiler.compiler_type=='msvc': + if self.compiler.compiler_type == 'msvc': if cxx_sources: # Needed to compile kiva.agg._agg extension. extra_args.append('/Zm1000') @@ -313,32 +326,34 @@ cxx_sources = [] # Set Fortran/C++ compilers for compilation and linking. - if ext.language=='f90': + if ext.language == 'f90': fcompiler = self._f90_compiler - elif ext.language=='f77': + elif ext.language == 'f77': fcompiler = self._f77_compiler - else: # in case ext.language is c++, for instance + else: # in case ext.language is c++, for instance fcompiler = self._f90_compiler or self._f77_compiler if fcompiler is not None: - fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(ext, 'extra_f77_compile_args') else [] - fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(ext, 'extra_f90_compile_args') else [] + fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr( + ext, 'extra_f77_compile_args') else [] + fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr( + ext, 'extra_f90_compile_args') else [] cxx_compiler = self._cxx_compiler # check for the availability of required compilers if cxx_sources and cxx_compiler is None: - raise DistutilsError("extension %r has C++ sources" \ - "but no C++ compiler found" % (ext.name)) + raise DistutilsError("extension %r has C++ sources" + "but no C++ compiler found" % (ext.name)) if (f_sources or fmodule_sources) and fcompiler is None: - raise DistutilsError("extension %r has Fortran sources " \ - "but no Fortran compiler found" % (ext.name)) + raise DistutilsError("extension %r has Fortran sources " + "but no Fortran compiler found" % (ext.name)) if ext.language in ['f77', 'f90'] and fcompiler is None: - self.warn("extension %r has Fortran libraries " \ - "but no Fortran linker found, using default linker" % (ext.name)) - if ext.language=='c++' and cxx_compiler is None: - self.warn("extension %r has C++ libraries " \ - "but no C++ linker found, using default linker" % (ext.name)) + self.warn("extension %r has Fortran libraries " + "but no Fortran linker found, using default linker" % (ext.name)) + if ext.language == 'c++' and cxx_compiler is None: + self.warn("extension %r has C++ libraries " + "but no C++ linker found, using default linker" % (ext.name)) - kws = {'depends':ext.depends} + kws = {'depends': ext.depends} output_dir = self.build_temp include_dirs = ext.include_dirs + get_numpy_include_dirs() @@ -391,7 +406,7 @@ if f in existing_modules: continue t = os.path.join(module_build_dir, f) - if os.path.abspath(f)==os.path.abspath(t): + if os.path.abspath(f) == os.path.abspath(t): continue if os.path.isfile(t): os.remove(t) @@ -410,7 +425,12 @@ extra_postargs=extra_postargs, depends=ext.depends) - objects = c_objects + f_objects + if f_objects and not fcompiler.can_ccompiler_link(self.compiler): + unlinkable_fobjects = f_objects + objects = c_objects + else: + unlinkable_fobjects = [] + objects = c_objects + f_objects if ext.extra_objects: objects.extend(ext.extra_objects) @@ -423,13 +443,20 @@ if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'): # expand libraries with fcompiler libraries as we are # not using fcompiler linker - self._libs_with_msvc_and_fortran(fcompiler, libraries, library_dirs) + self._libs_with_msvc_and_fortran( + fcompiler, libraries, library_dirs) elif ext.language in ['f77', 'f90'] and fcompiler is not None: linker = fcompiler.link_shared_object - if ext.language=='c++' and cxx_compiler is not None: + if ext.language == 'c++' and cxx_compiler is not None: linker = cxx_compiler.link_shared_object + if fcompiler is not None: + objects, libraries = self._process_unlinkable_fobjects( + objects, libraries, + fcompiler, library_dirs, + unlinkable_fobjects) + linker(objects, ext_filename, libraries=libraries, library_dirs=library_dirs, @@ -444,23 +471,59 @@ build_src = self.get_finalized_command("build_src").build_src build_clib = self.get_finalized_command("build_clib").build_clib objects = self.compiler.compile([os.path.join(build_src, - "gfortran_vs2003_hack.c")], - output_dir=self.build_temp) - self.compiler.create_static_lib(objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug) + "gfortran_vs2003_hack.c")], + output_dir=self.build_temp) + self.compiler.create_static_lib( + objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug) + + def _process_unlinkable_fobjects(self, objects, libraries, + fcompiler, library_dirs, + unlinkable_fobjects): + libraries = list(libraries) + objects = list(objects) + unlinkable_fobjects = list(unlinkable_fobjects) + + # Expand possible fake static libraries to objects + for lib in list(libraries): + for libdir in library_dirs: + fake_lib = os.path.join(libdir, lib + '.fobjects') + if os.path.isfile(fake_lib): + # Replace fake static library + libraries.remove(lib) + with open(fake_lib, 'r') as f: + unlinkable_fobjects.extend(f.read().splitlines()) + + # Expand C objects + c_lib = os.path.join(libdir, lib + '.cobjects') + with open(c_lib, 'r') as f: + objects.extend(f.read().splitlines()) + + # Wrap unlinkable objects to a linkable one + if unlinkable_fobjects: + fobjects = [os.path.relpath(obj) for obj in unlinkable_fobjects] + wrapped = fcompiler.wrap_unlinkable_objects( + fobjects, output_dir=self.build_temp, + extra_dll_dir=self.extra_dll_dir) + objects.extend(wrapped) + + return objects, libraries def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries, c_library_dirs): - if fcompiler is None: return + if fcompiler is None: + return for libname in c_libraries: - if libname.startswith('msvc'): continue + if libname.startswith('msvc'): + continue fileexists = False for libdir in c_library_dirs or []: libfile = os.path.join(libdir, '%s.lib' % (libname)) if os.path.isfile(libfile): fileexists = True break - if fileexists: continue + if fileexists: + continue # make g77-compiled static libs available to MSVC fileexists = False for libdir in c_library_dirs: @@ -474,7 +537,8 @@ c_library_dirs.append(self.build_temp) fileexists = True break - if fileexists: continue + if fileexists: + continue log.warn('could not find library %r in directories %s' % (libname, c_library_dirs)) @@ -502,14 +566,14 @@ if self.build_temp not in c_library_dirs: c_library_dirs.append(self.build_temp) - def get_source_files (self): + def get_source_files(self): self.check_extensions_list(self.extensions) filenames = [] for ext in self.extensions: filenames.extend(get_ext_source_files(ext)) return filenames - def get_outputs (self): + def get_outputs(self): self.check_extensions_list(self.extensions) outputs = [] diff -Nru python-numpy-1.13.3/numpy/distutils/command/config.py python-numpy-1.14.5/numpy/distutils/command/config.py --- python-numpy-1.13.3/numpy/distutils/command/config.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/command/config.py 2018-06-12 18:28:52.000000000 +0000 @@ -441,7 +441,7 @@ src, obj, exe = self._link(body, headers, include_dirs, libraries, library_dirs, lang) grabber.restore() - except: + except Exception: output = grabber.data grabber.restore() raise diff -Nru python-numpy-1.13.3/numpy/distutils/cpuinfo.py python-numpy-1.14.5/numpy/distutils/cpuinfo.py --- python-numpy-1.13.3/numpy/distutils/cpuinfo.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/cpuinfo.py 2018-06-12 17:31:56.000000000 +0000 @@ -35,7 +35,7 @@ except EnvironmentError: e = get_exception() warnings.warn(str(e), UserWarning, stacklevel=stacklevel) - return False, output + return False, "" if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: return True, output return False, output @@ -75,7 +75,7 @@ def _try_call(self, func): try: return func() - except: + except Exception: pass def __getattr__(self, name): @@ -336,7 +336,7 @@ def get_ip(self): try: return self.info.get('MACHINE') - except: pass + except Exception: pass def __machine(self, n): return self.info.get('MACHINE').lower() == 'ip%s' % (n) def _is_IP19(self): return self.__machine(19) @@ -523,7 +523,7 @@ info[-1]["Family"]=int(srch.group("FML")) info[-1]["Model"]=int(srch.group("MDL")) info[-1]["Stepping"]=int(srch.group("STP")) - except: + except Exception: print(sys.exc_info()[1], '(ignoring)') self.__class__.info = info diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/absoft.py python-numpy-1.14.5/numpy/distutils/fcompiler/absoft.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/absoft.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/absoft.py 2018-06-12 17:31:56.000000000 +0000 @@ -154,7 +154,5 @@ if __name__ == '__main__': from distutils import log log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='absoft') - compiler.customize() - print(compiler.get_version()) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='absoft').get_version()) diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/compaq.py python-numpy-1.14.5/numpy/distutils/fcompiler/compaq.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/compaq.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/compaq.py 2018-06-12 17:31:56.000000000 +0000 @@ -122,7 +122,5 @@ if __name__ == '__main__': from distutils import log log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='compaq') - compiler.customize() - print(compiler.get_version()) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='compaq').get_version()) diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/g95.py python-numpy-1.14.5/numpy/distutils/fcompiler/g95.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/g95.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/g95.py 2018-06-12 17:31:56.000000000 +0000 @@ -39,7 +39,6 @@ if __name__ == '__main__': from distutils import log + from numpy.distutils import customized_fcompiler log.set_verbosity(2) - compiler = G95FCompiler() - compiler.customize() - print(compiler.get_version()) + print(customized_fcompiler('g95').get_version()) diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/gnu.py python-numpy-1.14.5/numpy/distutils/fcompiler/gnu.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/gnu.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/gnu.py 2018-06-12 17:31:56.000000000 +0000 @@ -6,37 +6,43 @@ import warnings import platform import tempfile +import hashlib +import base64 from subprocess import Popen, PIPE, STDOUT - +from copy import copy from numpy.distutils.fcompiler import FCompiler from numpy.distutils.exec_command import exec_command -from numpy.distutils.misc_util import msvc_runtime_library from numpy.distutils.compat import get_exception +from numpy.distutils.system_info import system_info compilers = ['GnuFCompiler', 'Gnu95FCompiler'] TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)") # XXX: handle cross compilation + + def is_win64(): return sys.platform == "win32" and platform.architecture()[0] == "64bit" + if is_win64(): #_EXTRAFLAGS = ["-fno-leading-underscore"] _EXTRAFLAGS = [] else: _EXTRAFLAGS = [] + class GnuFCompiler(FCompiler): compiler_type = 'gnu' - compiler_aliases = ('g77',) + compiler_aliases = ('g77', ) description = 'GNU Fortran 77 compiler' def gnu_version_match(self, version_string): """Handle the different versions of GNU fortran compilers""" # Strip warning(s) that may be emitted by gfortran while version_string.startswith('gfortran: warning'): - version_string = version_string[version_string.find('\n')+1:] + version_string = version_string[version_string.find('\n') + 1:] # Gfortran versions from after 2010 will output a simple string # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older @@ -92,7 +98,7 @@ 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"], 'linker_exe' : [None, "-g", "-Wall"] - } + } module_dir_switch = None module_include_switch = None @@ -130,8 +136,8 @@ try: get_makefile_filename = sc.get_makefile_filename except AttributeError: - pass # i.e. PyPy - else: + pass # i.e. PyPy + else: filename = get_makefile_filename() sc.parse_makefile(filename, g) target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3') @@ -154,13 +160,29 @@ return opt def get_libgcc_dir(self): - status, output = exec_command(self.compiler_f77 + - ['-print-libgcc-file-name'], - use_tee=0) + status, output = exec_command( + self.compiler_f77 + ['-print-libgcc-file-name'], use_tee=0) if not status: return os.path.dirname(output) return None + def get_libgfortran_dir(self): + if sys.platform[:5] == 'linux': + libgfortran_name = 'libgfortran.so' + elif sys.platform == 'darwin': + libgfortran_name = 'libgfortran.dylib' + else: + libgfortran_name = None + + libgfortran_dir = None + if libgfortran_name: + find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)] + status, output = exec_command( + self.compiler_f77 + find_lib_arg, use_tee=0) + if not status: + libgfortran_dir = os.path.dirname(output) + return libgfortran_dir + def get_library_dirs(self): opt = [] if sys.platform[:5] != 'linux': @@ -171,12 +193,16 @@ d = os.path.normpath(d) path = os.path.join(d, "lib%s.a" % self.g2c) if not os.path.exists(path): - root = os.path.join(d, *((os.pardir,)*4)) + root = os.path.join(d, *((os.pardir, ) * 4)) d2 = os.path.abspath(os.path.join(root, 'lib')) path = os.path.join(d2, "lib%s.a" % self.g2c) if os.path.exists(path): opt.append(d2) opt.append(d) + # For Macports / Linux, libgfortran and libgcc are not co-located + lib_gfortran_dir = self.get_libgfortran_dir() + if lib_gfortran_dir: + opt.append(lib_gfortran_dir) return opt def get_libraries(self): @@ -194,13 +220,8 @@ opt.append(g2c) c_compiler = self.c_compiler if sys.platform == 'win32' and c_compiler and \ - c_compiler.compiler_type == 'msvc': - # the following code is not needed (read: breaks) when using MinGW - # in case want to link F77 compiled code with MSVC + c_compiler.compiler_type == 'msvc': opt.append('gcc') - runtime_lib = msvc_runtime_library() - if runtime_lib: - opt.append(runtime_lib) if sys.platform == 'darwin': opt.append('cc_dynamic') return opt @@ -242,7 +263,7 @@ class Gnu95FCompiler(GnuFCompiler): compiler_type = 'gnu95' - compiler_aliases = ('gfortran',) + compiler_aliases = ('gfortran', ) description = 'GNU Fortran 95 compiler' def version_match(self, version_string): @@ -257,8 +278,10 @@ # use -mno-cygwin flag for gfortran when Python is not # Cygwin-Python if sys.platform == 'win32': - for key in ['version_cmd', 'compiler_f77', 'compiler_f90', - 'compiler_fix', 'linker_so', 'linker_exe']: + for key in [ + 'version_cmd', 'compiler_f77', 'compiler_f90', + 'compiler_fix', 'linker_so', 'linker_exe' + ]: self.executables[key].append('-mno-cygwin') return v @@ -275,7 +298,7 @@ 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"], 'linker_exe' : [None, "-Wall"] - } + } module_dir_switch = '-J' module_include_switch = '-I' @@ -320,11 +343,15 @@ target = self.get_target() if target: d = os.path.normpath(self.get_libgcc_dir()) - root = os.path.join(d, *((os.pardir,)*4)) + root = os.path.join(d, *((os.pardir, ) * 4)) path = os.path.join(root, "lib") mingwdir = os.path.normpath(path) if os.path.exists(os.path.join(mingwdir, "libmingwex.a")): opt.append(mingwdir) + # For Macports / Linux, libgfortran and libgcc are not co-located + lib_gfortran_dir = self.get_libgfortran_dir() + if lib_gfortran_dir: + opt.append(lib_gfortran_dir) return opt def get_libraries(self): @@ -336,32 +363,148 @@ if c_compiler and c_compiler.compiler_type == "msvc": if "gcc" in opt: i = opt.index("gcc") - opt.insert(i+1, "mingwex") - opt.insert(i+1, "mingw32") - # XXX: fix this mess, does not work for mingw - if is_win64(): - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - return [] - else: - pass + opt.insert(i + 1, "mingwex") + opt.insert(i + 1, "mingw32") + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + return [] + else: + pass return opt def get_target(self): - status, output = exec_command(self.compiler_f77 + - ['-v'], - use_tee=0) + status, output = exec_command(self.compiler_f77 + ['-v'], use_tee=0) if not status: m = TARGET_R.search(output) if m: return m.group(1) return "" - def get_flags_opt(self): + def _hash_files(self, filenames): + h = hashlib.sha1() + for fn in filenames: + with open(fn, 'rb') as f: + while True: + block = f.read(131072) + if not block: + break + h.update(block) + text = base64.b32encode(h.digest()) + if sys.version_info[0] >= 3: + text = text.decode('ascii') + return text.rstrip('=') + + def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir, + chained_dlls, is_archive): + """Create a wrapper shared library for the given objects + + Return an MSVC-compatible lib + """ + + c_compiler = self.c_compiler + if c_compiler.compiler_type != "msvc": + raise ValueError("This method only supports MSVC") + + object_hash = self._hash_files(list(objects) + list(chained_dlls)) + + if is_win64(): + tag = 'win_amd64' + else: + tag = 'win32' + + basename = 'lib' + os.path.splitext( + os.path.basename(objects[0]))[0][:8] + root_name = basename + '.' + object_hash + '.gfortran-' + tag + dll_name = root_name + '.dll' + def_name = root_name + '.def' + lib_name = root_name + '.lib' + dll_path = os.path.join(extra_dll_dir, dll_name) + def_path = os.path.join(output_dir, def_name) + lib_path = os.path.join(output_dir, lib_name) + + if os.path.isfile(lib_path): + # Nothing to do + return lib_path, dll_path + + if is_archive: + objects = (["-Wl,--whole-archive"] + list(objects) + + ["-Wl,--no-whole-archive"]) + self.link_shared_object( + objects, + dll_name, + output_dir=extra_dll_dir, + extra_postargs=list(chained_dlls) + [ + '-Wl,--allow-multiple-definition', + '-Wl,--output-def,' + def_path, + '-Wl,--export-all-symbols', + '-Wl,--enable-auto-import', + '-static', + '-mlong-double-64', + ]) + + # No PowerPC! if is_win64(): - return ['-O0'] + specifier = '/MACHINE:X64' else: - return GnuFCompiler.get_flags_opt(self) + specifier = '/MACHINE:X86' + + # MSVC specific code + lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier] + if not c_compiler.initialized: + c_compiler.initialize() + c_compiler.spawn([c_compiler.lib] + lib_args) + + return lib_path, dll_path + + def can_ccompiler_link(self, compiler): + # MSVC cannot link objects compiled by GNU fortran + return compiler.compiler_type not in ("msvc", ) + + def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): + """ + Convert a set of object files that are not compatible with the default + linker, to a file that is compatible. + """ + if self.c_compiler.compiler_type == "msvc": + # Compile a DLL and return the lib for the DLL as + # the object. Also keep track of previous DLLs that + # we have compiled so that we can link against them. + + # If there are .a archives, assume they are self-contained + # static libraries, and build separate DLLs for each + archives = [] + plain_objects = [] + for obj in objects: + if obj.lower().endswith('.a'): + archives.append(obj) + else: + plain_objects.append(obj) + + chained_libs = [] + chained_dlls = [] + for archive in archives[::-1]: + lib, dll = self._link_wrapper_lib( + [archive], + output_dir, + extra_dll_dir, + chained_dlls=chained_dlls, + is_archive=True) + chained_libs.insert(0, lib) + chained_dlls.insert(0, dll) + + if not plain_objects: + return chained_libs + + lib, dll = self._link_wrapper_lib( + plain_objects, + output_dir, + extra_dll_dir, + chained_dlls=chained_dlls, + is_archive=False) + return [lib] + chained_libs + else: + raise ValueError("Unsupported C compiler") + def _can_target(cmd, arch): """Return true if the architecture supports the -arch flag""" @@ -383,18 +526,14 @@ os.remove(filename) return False + if __name__ == '__main__': from distutils import log + from numpy.distutils import customized_fcompiler log.set_verbosity(2) - compiler = GnuFCompiler() - compiler.customize() - print(compiler.get_version()) - + print(customized_fcompiler('gnu').get_version()) try: - compiler = Gnu95FCompiler() - compiler.customize() - print(compiler.get_version()) + print(customized_fcompiler('g95').get_version()) except Exception: - msg = get_exception() - print(msg) + print(get_exception()) diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/hpux.py python-numpy-1.14.5/numpy/distutils/fcompiler/hpux.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/hpux.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/hpux.py 2018-06-12 17:31:56.000000000 +0000 @@ -39,7 +39,5 @@ if __name__ == '__main__': from distutils import log log.set_verbosity(10) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='hpux') - compiler.customize() - print(compiler.get_version()) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='hpux').get_version()) diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/ibm.py python-numpy-1.14.5/numpy/distutils/fcompiler/ibm.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/ibm.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/ibm.py 2018-06-12 17:31:56.000000000 +0000 @@ -90,7 +90,6 @@ return ['-O3'] if __name__ == '__main__': + from numpy.distutils import customized_fcompiler log.set_verbosity(2) - compiler = IBMFCompiler() - compiler.customize() - print(compiler.get_version()) + print(customized_fcompiler(compiler='ibm').get_version()) diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/__init__.py python-numpy-1.14.5/numpy/distutils/fcompiler/__init__.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/__init__.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/__init__.py 2018-06-12 17:31:56.000000000 +0000 @@ -22,10 +22,6 @@ import sys import re import types -try: - set -except NameError: - from sets import Set as set from numpy.compat import open_latin1 @@ -434,6 +430,7 @@ raise CompilerNotFound() return version + ############################################################ ## Public methods: @@ -701,15 +698,47 @@ else: return hook_name() + def can_ccompiler_link(self, ccompiler): + """ + Check if the given C compiler can link objects produced by + this compiler. + """ + return True + + def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): + """ + Convert a set of object files that are not compatible with the default + linker, to a file that is compatible. + + Parameters + ---------- + objects : list + List of object files to include. + output_dir : str + Output directory to place generated object files. + extra_dll_dir : str + Output directory to place extra DLL files that need to be + included on Windows. + + Returns + ------- + converted_objects : list of str + List of converted object files. + Note that the number of output files is not necessarily + the same as inputs. + + """ + raise NotImplementedError() + ## class FCompiler _default_compilers = ( # sys.platform mappings ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', - 'intelvem', 'intelem')), + 'intelvem', 'intelem', 'flang')), ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), ('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'absoft', 'nag', 'vast', 'compaq', - 'intele', 'intelem', 'gnu', 'g95', 'pathf95')), + 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor')), ('darwin.*', ('gnu95', 'nag', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')), ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), ('irix.*', ('mips', 'gnu', 'gnu95',)), @@ -809,6 +838,8 @@ platform.""" matching_compiler_types = available_fcompilers_for_platform(osname, platform) + log.info("get_default_fcompiler: matching types: '%s'", + matching_compiler_types) compiler_type = _find_existing_fcompiler(matching_compiler_types, osname=osname, platform=platform, diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/intel.py python-numpy-1.14.5/numpy/distutils/fcompiler/intel.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/intel.py 2017-09-24 22:47:22.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/intel.py 2018-06-12 17:31:56.000000000 +0000 @@ -215,7 +215,5 @@ if __name__ == '__main__': from distutils import log log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='intel') - compiler.customize() - print(compiler.get_version()) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='intel').get_version()) diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/lahey.py python-numpy-1.14.5/numpy/distutils/fcompiler/lahey.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/lahey.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/lahey.py 2018-06-12 17:31:56.000000000 +0000 @@ -43,7 +43,5 @@ if __name__ == '__main__': from distutils import log log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='lahey') - compiler.customize() - print(compiler.get_version()) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='lahey').get_version()) diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/mips.py python-numpy-1.14.5/numpy/distutils/fcompiler/mips.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/mips.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/mips.py 2018-06-12 17:31:56.000000000 +0000 @@ -52,7 +52,5 @@ return r if __name__ == '__main__': - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='mips') - compiler.customize() - print(compiler.get_version()) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='mips').get_version()) diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/nag.py python-numpy-1.14.5/numpy/distutils/fcompiler/nag.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/nag.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/nag.py 2018-06-12 17:31:56.000000000 +0000 @@ -1,15 +1,32 @@ from __future__ import division, absolute_import, print_function import sys +import re from numpy.distutils.fcompiler import FCompiler -compilers = ['NAGFCompiler'] +compilers = ['NAGFCompiler', 'NAGFORCompiler'] -class NAGFCompiler(FCompiler): +class BaseNAGFCompiler(FCompiler): + version_pattern = r'NAG.* Release (?P[^(\s]*)' + + def version_match(self, version_string): + m = re.search(self.version_pattern, version_string) + if m: + return m.group('version') + else: + return None + + def get_flags_linker_so(self): + return ["-Wl,-shared"] + def get_flags_opt(self): + return ['-O4'] + def get_flags_arch(self): + return [''] + +class NAGFCompiler(BaseNAGFCompiler): compiler_type = 'nag' description = 'NAGWare Fortran 95 Compiler' - version_pattern = r'NAGWare Fortran 95 compiler Release (?P[^\s]*)' executables = { 'version_cmd' : ["", "-V"], @@ -22,24 +39,46 @@ } def get_flags_linker_so(self): - if sys.platform=='darwin': + if sys.platform == 'darwin': return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] - return ["-Wl,-shared"] - def get_flags_opt(self): - return ['-O4'] + return BaseNAGFCompiler.get_flags_linker_so(self) def get_flags_arch(self): version = self.get_version() if version and version < '5.1': return ['-target=native'] else: - return [''] + return BaseNAGFCompiler.get_flags_arch(self) def get_flags_debug(self): return ['-g', '-gline', '-g90', '-nan', '-C'] +class NAGFORCompiler(BaseNAGFCompiler): + + compiler_type = 'nagfor' + description = 'NAG Fortran Compiler' + + executables = { + 'version_cmd' : ["nagfor", "-V"], + 'compiler_f77' : ["nagfor", "-fixed"], + 'compiler_fix' : ["nagfor", "-fixed"], + 'compiler_f90' : ["nagfor"], + 'linker_so' : ["nagfor"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_flags_debug(self): + version = self.get_version() + if version and version > '6.1': + return ['-g', '-u', '-nan', '-C=all', '-thread_safe', + '-kind=unique', '-Warn=allocation', '-Warn=subnormal'] + else: + return ['-g', '-nan', '-C=all', '-u', '-thread_safe'] + + if __name__ == '__main__': from distutils import log log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='nag') - compiler.customize() + from numpy.distutils import customized_fcompiler + compiler = customized_fcompiler(compiler='nagfor') print(compiler.get_version()) + print(compiler.get_flags_debug()) diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/none.py python-numpy-1.14.5/numpy/distutils/fcompiler/none.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/none.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/none.py 2018-06-12 17:31:56.000000000 +0000 @@ -1,6 +1,7 @@ from __future__ import division, absolute_import, print_function from numpy.distutils.fcompiler import FCompiler +from numpy.distutils import customized_fcompiler compilers = ['NoneFCompiler'] @@ -26,6 +27,4 @@ if __name__ == '__main__': from distutils import log log.set_verbosity(2) - compiler = NoneFCompiler() - compiler.customize() - print(compiler.get_version()) + print(customized_fcompiler(compiler='none').get_version()) diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/pathf95.py python-numpy-1.14.5/numpy/distutils/fcompiler/pathf95.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/pathf95.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/pathf95.py 2018-06-12 17:31:56.000000000 +0000 @@ -31,8 +31,5 @@ if __name__ == '__main__': from distutils import log log.set_verbosity(2) - #compiler = PathScaleFCompiler() - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='pathf95') - compiler.customize() - print(compiler.get_version()) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='pathf95').get_version()) diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/pg.py python-numpy-1.14.5/numpy/distutils/fcompiler/pg.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/pg.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/pg.py 2018-06-12 17:31:56.000000000 +0000 @@ -1,49 +1,55 @@ # http://www.pgroup.com from __future__ import division, absolute_import, print_function -from numpy.distutils.fcompiler import FCompiler +import sys +import os + +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file from sys import platform +from os.path import join, dirname, normpath + +compilers = ['PGroupFCompiler', 'PGroupFlangCompiler'] -compilers = ['PGroupFCompiler'] class PGroupFCompiler(FCompiler): compiler_type = 'pg' description = 'Portland Group Fortran Compiler' - version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' + version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' if platform == 'darwin': executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["pgfortran", "-dynamiclib"], - 'compiler_fix' : ["pgfortran", "-Mfixed", "-dynamiclib"], - 'compiler_f90' : ["pgfortran", "-dynamiclib"], - 'linker_so' : ["libtool"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] + 'version_cmd': ["", "-V"], + 'compiler_f77': ["pgfortran", "-dynamiclib"], + 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"], + 'compiler_f90': ["pgfortran", "-dynamiclib"], + 'linker_so': ["libtool"], + 'archiver': ["ar", "-cr"], + 'ranlib': ["ranlib"] } pic_flags = [''] else: executables = { - 'version_cmd' : ["", "-V"], - 'compiler_f77' : ["pgfortran"], - 'compiler_fix' : ["pgfortran", "-Mfixed"], - 'compiler_f90' : ["pgfortran"], - 'linker_so' : ["pgfortran", "-shared", "-fpic"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] + 'version_cmd': ["", "-V"], + 'compiler_f77': ["pgfortran"], + 'compiler_fix': ["pgfortran", "-Mfixed"], + 'compiler_f90': ["pgfortran"], + 'linker_so': ["pgfortran", "-shared", "-fpic"], + 'archiver': ["ar", "-cr"], + 'ranlib': ["ranlib"] } pic_flags = ['-fpic'] - module_dir_switch = '-module ' module_include_switch = '-I' def get_flags(self): opt = ['-Minform=inform', '-Mnosecond_underscore'] return self.pic_flags + opt + def get_flags_opt(self): return ['-fast'] + def get_flags_debug(self): return ['-g'] @@ -54,10 +60,82 @@ def runtime_library_dir_option(self, dir): return '-R"%s"' % dir + +if sys.version_info >= (3, 5): + import subprocess + import shlex + import functools + + class PGroupFlangCompiler(FCompiler): + compiler_type = 'flang' + description = 'Portland Group Fortran LLVM Compiler' + version_pattern = r'\s*(flang|clang) version (?P[\d.-]+).*' + + ar_exe = 'lib.exe' + possible_executables = ['flang'] + + executables = { + 'version_cmd': ["", "--version"], + 'compiler_f77': ["flang"], + 'compiler_fix': ["flang"], + 'compiler_f90': ["flang"], + 'linker_so': [None], + 'archiver': [ar_exe, "/verbose", "/OUT:"], + 'ranlib': None + } + + library_switch = '/OUT:' # No space after /OUT:! + module_dir_switch = '-module ' # Don't remove ending space! + + def get_libraries(self): + opt = FCompiler.get_libraries(self) + opt.extend(['flang', 'flangrti', 'ompstub']) + return opt + + @functools.lru_cache(maxsize=128) + def get_library_dirs(self): + """List of compiler library directories.""" + opt = FCompiler.get_library_dirs(self) + flang_dir = dirname(self.executables['compiler_f77'][0]) + opt.append(normpath(join(flang_dir, '..', 'lib'))) + + return opt + + def get_flags(self): + return [] + + def get_flags_free(self): + return [] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_opt(self): + return ['-O3'] + + def get_flags_arch(self): + return [] + + def runtime_library_dir_option(self, dir): + raise NotImplementedError + +else: + from numpy.distutils.fcompiler import CompilerNotFound + + # No point in supporting on older Pythons because not ABI compatible + class PGroupFlangCompiler(FCompiler): + compiler_type = 'flang' + description = 'Portland Group Fortran LLVM Compiler' + + def get_version(self): + raise CompilerNotFound('Flang unsupported on Python < 3.5') + + if __name__ == '__main__': from distutils import log log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='pg') - compiler.customize() - print(compiler.get_version()) + from numpy.distutils import customized_fcompiler + if 'flang' in sys.argv: + print(customized_fcompiler(compiler='flang').get_version()) + else: + print(customized_fcompiler(compiler='pg').get_version()) diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/sun.py python-numpy-1.14.5/numpy/distutils/fcompiler/sun.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/sun.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/sun.py 2018-06-12 17:31:56.000000000 +0000 @@ -49,7 +49,5 @@ if __name__ == '__main__': from distutils import log log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='sun') - compiler.customize() - print(compiler.get_version()) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='sun').get_version()) diff -Nru python-numpy-1.13.3/numpy/distutils/fcompiler/vast.py python-numpy-1.14.5/numpy/distutils/fcompiler/vast.py --- python-numpy-1.13.3/numpy/distutils/fcompiler/vast.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/fcompiler/vast.py 2018-06-12 17:31:56.000000000 +0000 @@ -50,7 +50,5 @@ if __name__ == '__main__': from distutils import log log.set_verbosity(2) - from numpy.distutils.fcompiler import new_fcompiler - compiler = new_fcompiler(compiler='vast') - compiler.customize() - print(compiler.get_version()) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='vast').get_version()) diff -Nru python-numpy-1.13.3/numpy/distutils/__init__.py python-numpy-1.14.5/numpy/distutils/__init__.py --- python-numpy-1.13.3/numpy/distutils/__init__.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/__init__.py 2018-06-12 18:28:52.000000000 +0000 @@ -21,3 +21,15 @@ test = _numpy_tester().test except ImportError: pass + + +def customized_fcompiler(plat=None, compiler=None): + from numpy.distutils.fcompiler import new_fcompiler + c = new_fcompiler(plat=plat, compiler=compiler) + c.customize() + return c + +def customized_ccompiler(plat=None, compiler=None): + c = ccompiler.new_compiler(plat=plat, compiler=compiler) + c.customize('') + return c diff -Nru python-numpy-1.13.3/numpy/distutils/mingw32ccompiler.py python-numpy-1.14.5/numpy/distutils/mingw32ccompiler.py --- python-numpy-1.13.3/numpy/distutils/mingw32ccompiler.py 2017-09-24 22:47:22.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/mingw32ccompiler.py 2018-06-12 18:28:52.000000000 +0000 @@ -329,7 +329,8 @@ def _find_dll_in_winsxs(dll_name): # Walk through the WinSxS directory to find the dll. - winsxs_path = os.path.join(os.environ['WINDIR'], 'winsxs') + winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'), + 'winsxs') if not os.path.exists(winsxs_path): return None for root, dirs, files in os.walk(winsxs_path): diff -Nru python-numpy-1.13.3/numpy/distutils/misc_util.py python-numpy-1.14.5/numpy/distutils/misc_util.py --- python-numpy-1.13.3/numpy/distutils/misc_util.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/misc_util.py 2018-06-12 18:28:52.000000000 +0000 @@ -33,11 +33,6 @@ atexit.register(clean_up_temporary_directory) -try: - set -except NameError: - from sets import Set as set - from numpy.distutils.compat import get_exception from numpy.compat import basestring from numpy.compat import npy_load_module @@ -461,7 +456,7 @@ return False try: len(seq) - except: + except Exception: return False return True @@ -1838,7 +1833,7 @@ close_fds=True) sout = p.stdout m = re.match(r'(?P\d+)', sout.read()) - except: + except Exception: pass os.chdir(cwd) if m: @@ -1875,7 +1870,7 @@ close_fds=True) sout = p.stdout m = re.match(r'(?P\d+)', sout.read()) - except: + except Exception: pass os.chdir(cwd) if m: @@ -2070,7 +2065,6 @@ """ self.py_modules.append((self.name, name, generate_config_py)) - def get_info(self,*names): """Get resources information. @@ -2284,9 +2278,21 @@ from distutils.dir_util import mkpath mkpath(os.path.dirname(target)) f = open(target, 'w') - f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0]))) + f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) f.write('# It contains system_info results at the time of building this package.\n') f.write('__all__ = ["get_info","show"]\n\n') + + # For gfortran+msvc combination, extra shared libraries may exist + f.write(""" +import os +import sys + +extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') +if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): + os.environ.setdefault('PATH', '') + os.environ['PATH'] += os.pathsep + extra_dll_dir +""") + for k, i in system_info.saved_results.items(): f.write('%s=%r\n' % (k, i)) f.write(r''' diff -Nru python-numpy-1.13.3/numpy/distutils/system_info.py python-numpy-1.14.5/numpy/distutils/system_info.py --- python-numpy-1.13.3/numpy/distutils/system_info.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/system_info.py 2018-06-12 18:28:52.000000000 +0000 @@ -126,6 +126,7 @@ import re import copy import warnings +import atexit from glob import glob from functools import reduce if sys.version_info[0] < 3: @@ -151,6 +152,7 @@ get_shared_lib_extension) from numpy.distutils.command.config import config as cmd_config from numpy.distutils.compat import get_exception +from numpy.distutils import customized_ccompiler import distutils.ccompiler import tempfile import shutil @@ -210,6 +212,55 @@ default_src_dirs = ['.'] default_x11_lib_dirs = [] default_x11_include_dirs = [] + _include_dirs = [ + 'include', + 'include/suitesparse', + ] + _lib_dirs = [ + 'lib', + ] + + _include_dirs = [d.replace('/', os.sep) for d in _include_dirs] + _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs] + def add_system_root(library_root): + """Add a package manager root to the include directories""" + global default_lib_dirs + global default_include_dirs + + library_root = os.path.normpath(library_root) + + default_lib_dirs.extend( + os.path.join(library_root, d) for d in _lib_dirs) + default_include_dirs.extend( + os.path.join(library_root, d) for d in _include_dirs) + + if sys.version_info >= (3, 3): + # VCpkg is the de-facto package manager on windows for C/C++ + # libraries. If it is on the PATH, then we append its paths here. + # We also don't re-implement shutil.which for Python 2.7 because + # vcpkg doesn't support MSVC 2008. + vcpkg = shutil.which('vcpkg') + if vcpkg: + vcpkg_dir = os.path.dirname(vcpkg) + if platform.architecture() == '32bit': + specifier = 'x86' + else: + specifier = 'x64' + + vcpkg_installed = os.path.join(vcpkg_dir, 'installed') + for vcpkg_root in [ + os.path.join(vcpkg_installed, specifier + '-windows'), + os.path.join(vcpkg_installed, specifier + '-windows-static'), + ]: + add_system_root(vcpkg_root) + + # Conda is another popular package manager that provides libraries + conda = shutil.which('conda') + if conda: + conda_dir = os.path.dirname(conda) + add_system_root(os.path.join(conda_dir, '..', 'Library')) + add_system_root(os.path.join(conda_dir, 'Library')) + else: default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', '/opt/local/lib', '/sw/lib'], platform_bits) @@ -330,6 +381,7 @@ 'openblas': openblas_info, # use blas_opt instead # openblas with embedded lapack 'openblas_lapack': openblas_lapack_info, # use blas_opt instead + 'openblas_clapack': openblas_clapack_info, # use blas_opt instead 'blis': blis_info, # use blas_opt instead 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead 'blas_mkl': blas_mkl_info, # use blas_opt instead @@ -684,9 +736,13 @@ return self.get_libs(key, '') def library_extensions(self): - static_exts = ['.a'] + c = customized_ccompiler() + static_exts = [] + if c.compiler_type != 'msvc': + # MSVC doesn't understand binutils + static_exts.append('.a') if sys.platform == 'win32': - static_exts.append('.lib') # .lib is used by MSVC + static_exts.append('.lib') # .lib is used by MSVC and others if self.search_static_first: exts = static_exts + [so_ext] else: @@ -1480,6 +1536,11 @@ self.set_info(**openblas_info) return + openblas_info = get_info('openblas_clapack') + if openblas_info: + self.set_info(**openblas_info) + return + atlas_info = get_info('atlas_3_10_threads') if not atlas_info: atlas_info = get_info('atlas_3_10') @@ -1687,8 +1748,7 @@ # primitive cblas check by looking for the header and trying to link # cblas or blas res = False - c = distutils.ccompiler.new_compiler() - c.customize('') + c = customized_ccompiler() tmpdir = tempfile.mkdtemp() s = """#include int main(int argc, const char *argv[]) @@ -1739,12 +1799,28 @@ return True def calc_info(self): + c = customized_ccompiler() + lib_dirs = self.get_lib_dirs() openblas_libs = self.get_libs('libraries', self._lib_names) if openblas_libs == self._lib_names: # backward compat with 1.8.0 openblas_libs = self.get_libs('openblas_libs', self._lib_names) + info = self.check_libs(lib_dirs, openblas_libs, []) + + if c.compiler_type == "msvc" and info is None: + from numpy.distutils.fcompiler import new_fcompiler + f = new_fcompiler(c_compiler=c) + if f and f.compiler_type == 'gnu95': + # Try gfortran-compatible library files + info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs) + # Skip lapack check, we'd need build_ext to do it + assume_lapack = True + elif info: + assume_lapack = False + info['language'] = 'c' + if info is None: return @@ -1752,13 +1828,42 @@ extra_info = self.calc_extra_info() dict_append(info, **extra_info) - if not self.check_embedded_lapack(info): + if not (assume_lapack or self.check_embedded_lapack(info)): return - info['language'] = 'c' info['define_macros'] = [('HAVE_CBLAS', None)] self.set_info(**info) + def check_msvc_gfortran_libs(self, library_dirs, libraries): + # First, find the full path to each library directory + library_paths = [] + for library in libraries: + for library_dir in library_dirs: + # MinGW static ext will be .a + fullpath = os.path.join(library_dir, library + '.a') + if os.path.isfile(fullpath): + library_paths.append(fullpath) + break + else: + return None + + # Generate numpy.distutils virtual static library file + tmpdir = os.path.join(os.getcwd(), 'build', 'openblas') + if not os.path.isdir(tmpdir): + os.makedirs(tmpdir) + + info = {'library_dirs': [tmpdir], + 'libraries': ['openblas'], + 'language': 'f77'} + + fake_lib_file = os.path.join(tmpdir, 'openblas.fobjects') + fake_clib_file = os.path.join(tmpdir, 'openblas.cobjects') + with open(fake_lib_file, 'w') as f: + f.write("\n".join(library_paths)) + with open(fake_clib_file, 'w') as f: + pass + + return info class openblas_lapack_info(openblas_info): section = 'openblas' @@ -1768,10 +1873,10 @@ def check_embedded_lapack(self, info): res = False - c = distutils.ccompiler.new_compiler() - c.customize('') + c = customized_ccompiler() + tmpdir = tempfile.mkdtemp() - s = """void zungqr(); + s = """void zungqr_(); int main(int argc, const char *argv[]) { zungqr_(); @@ -1782,8 +1887,10 @@ # Add the additional "extra" arguments try: extra_args = info['extra_link_args'] - except: + except Exception: extra_args = [] + if sys.version_info < (3, 5) and sys.version_info > (3, 0) and c.compiler_type == "msvc": + extra_args.append("/MANIFEST") try: with open(src, 'wt') as f: f.write(s) @@ -1799,6 +1906,8 @@ shutil.rmtree(tmpdir) return res +class openblas_clapack_info(openblas_lapack_info): + _lib_names = ['openblas', 'lapack'] class blis_info(blas_info): section = 'blis' diff -Nru python-numpy-1.13.3/numpy/distutils/tests/test_exec_command.py python-numpy-1.14.5/numpy/distutils/tests/test_exec_command.py --- python-numpy-1.13.3/numpy/distutils/tests/test_exec_command.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/tests/test_exec_command.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,7 +6,7 @@ from numpy.distutils import exec_command from numpy.distutils.exec_command import get_pythonexe -from numpy.testing import TestCase, run_module_suite, tempdir +from numpy.testing import run_module_suite, tempdir, assert_ # In python 3 stdout, stderr are text (unicode compliant) devices, so to # emulate them import StringIO from the io module. @@ -94,94 +94,94 @@ exec_command.exec_command("cd '.'") -class TestExecCommand(TestCase): - def setUp(self): +class TestExecCommand(object): + def setup(self): self.pyexe = get_pythonexe() def check_nt(self, **kws): s, o = exec_command.exec_command('cmd /C echo path=%path%') - self.assertEqual(s, 0) - self.assertNotEqual(o, '') + assert_(s == 0) + assert_(o != '') s, o = exec_command.exec_command( '"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe) - self.assertEqual(s, 0) - self.assertEqual(o, 'win32') + assert_(s == 0) + assert_(o == 'win32') def check_posix(self, **kws): s, o = exec_command.exec_command("echo Hello", **kws) - self.assertEqual(s, 0) - self.assertEqual(o, 'Hello') + assert_(s == 0) + assert_(o == 'Hello') s, o = exec_command.exec_command('echo $AAA', **kws) - self.assertEqual(s, 0) - self.assertEqual(o, '') + assert_(s == 0) + assert_(o == '') s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws) - self.assertEqual(s, 0) - self.assertEqual(o, 'Tere') + assert_(s == 0) + assert_(o == 'Tere') s, o = exec_command.exec_command('echo "$AAA"', **kws) - self.assertEqual(s, 0) - self.assertEqual(o, '') + assert_(s == 0) + assert_(o == '') if 'BBB' not in os.environ: os.environ['BBB'] = 'Hi' s, o = exec_command.exec_command('echo "$BBB"', **kws) - self.assertEqual(s, 0) - self.assertEqual(o, 'Hi') + assert_(s == 0) + assert_(o == 'Hi') s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws) - self.assertEqual(s, 0) - self.assertEqual(o, 'Hey') + assert_(s == 0) + assert_(o == 'Hey') s, o = exec_command.exec_command('echo "$BBB"', **kws) - self.assertEqual(s, 0) - self.assertEqual(o, 'Hi') + assert_(s == 0) + assert_(o == 'Hi') del os.environ['BBB'] s, o = exec_command.exec_command('echo "$BBB"', **kws) - self.assertEqual(s, 0) - self.assertEqual(o, '') + assert_(s == 0) + assert_(o == '') s, o = exec_command.exec_command('this_is_not_a_command', **kws) - self.assertNotEqual(s, 0) - self.assertNotEqual(o, '') + assert_(s != 0) + assert_(o != '') s, o = exec_command.exec_command('echo path=$PATH', **kws) - self.assertEqual(s, 0) - self.assertNotEqual(o, '') + assert_(s == 0) + assert_(o != '') s, o = exec_command.exec_command( '"%s" -c "import sys,os;sys.stderr.write(os.name)"' % self.pyexe, **kws) - self.assertEqual(s, 0) - self.assertEqual(o, 'posix') + assert_(s == 0) + assert_(o == 'posix') def check_basic(self, *kws): s, o = exec_command.exec_command( '"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws) - self.assertNotEqual(s, 0) - self.assertNotEqual(o, '') + assert_(s != 0) + assert_(o != '') s, o = exec_command.exec_command( '"%s" -c "import sys;sys.stderr.write(\'0\');' 'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' % self.pyexe, **kws) - self.assertEqual(s, 0) - self.assertEqual(o, '012') + assert_(s == 0) + assert_(o == '012') s, o = exec_command.exec_command( '"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws) - self.assertEqual(s, 15) - self.assertEqual(o, '') + assert_(s == 15) + assert_(o == '') s, o = exec_command.exec_command( '"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws) - self.assertEqual(s, 0) - self.assertEqual(o, 'Heipa') + assert_(s == 0) + assert_(o == 'Heipa') def check_execute_in(self, **kws): with tempdir() as tmpdir: @@ -194,13 +194,13 @@ s, o = exec_command.exec_command( '"%s" -c "f = open(\'%s\', \'r\'); f.close()"' % (self.pyexe, fn), **kws) - self.assertNotEqual(s, 0) - self.assertNotEqual(o, '') + assert_(s != 0) + assert_(o != '') s, o = exec_command.exec_command( '"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); ' 'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws) - self.assertEqual(s, 0) - self.assertEqual(o, 'Hello') + assert_(s == 0) + assert_(o == 'Hello') def test_basic(self): with redirect_stdout(StringIO()): diff -Nru python-numpy-1.13.3/numpy/distutils/tests/test_fcompiler_gnu.py python-numpy-1.14.5/numpy/distutils/tests/test_fcompiler_gnu.py --- python-numpy-1.13.3/numpy/distutils/tests/test_fcompiler_gnu.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/tests/test_fcompiler_gnu.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,6 +1,6 @@ from __future__ import division, absolute_import, print_function -from numpy.testing import TestCase, assert_, run_module_suite +from numpy.testing import assert_, run_module_suite import numpy.distutils.fcompiler @@ -30,7 +30,7 @@ ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0') ] -class TestG77Versions(TestCase): +class TestG77Versions(object): def test_g77_version(self): fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') for vs, version in g77_version_strings: @@ -43,7 +43,7 @@ v = fc.version_match(vs) assert_(v is None, (vs, v)) -class TestGFortranVersions(TestCase): +class TestGFortranVersions(object): def test_gfortran_version(self): fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') for vs, version in gfortran_version_strings: diff -Nru python-numpy-1.13.3/numpy/distutils/tests/test_fcompiler_intel.py python-numpy-1.14.5/numpy/distutils/tests/test_fcompiler_intel.py --- python-numpy-1.13.3/numpy/distutils/tests/test_fcompiler_intel.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/tests/test_fcompiler_intel.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,7 +1,7 @@ from __future__ import division, absolute_import, print_function import numpy.distutils.fcompiler -from numpy.testing import TestCase, run_module_suite, assert_ +from numpy.testing import run_module_suite, assert_ intel_32bit_version_strings = [ @@ -16,7 +16,7 @@ "running on Intel(R) 64, Version 11.1", '11.1') ] -class TestIntelFCompilerVersions(TestCase): +class TestIntelFCompilerVersions(object): def test_32bit_version(self): fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel') for vs, version in intel_32bit_version_strings: @@ -24,7 +24,7 @@ assert_(v == version) -class TestIntelEM64TFCompilerVersions(TestCase): +class TestIntelEM64TFCompilerVersions(object): def test_64bit_version(self): fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem') for vs, version in intel_64bit_version_strings: diff -Nru python-numpy-1.13.3/numpy/distutils/tests/test_fcompiler_nagfor.py python-numpy-1.14.5/numpy/distutils/tests/test_fcompiler_nagfor.py --- python-numpy-1.13.3/numpy/distutils/tests/test_fcompiler_nagfor.py 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/tests/test_fcompiler_nagfor.py 2018-06-12 18:28:52.000000000 +0000 @@ -0,0 +1,29 @@ +from __future__ import division, absolute_import, print_function + +from numpy.testing import assert_, run_module_suite + +import numpy.distutils.fcompiler + +nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release ' + '6.2(Chiyoda) Build 6200', '6.2'), + ('nagfor', 'NAG Fortran Compiler Release ' + '6.1(Tozai) Build 6136', '6.1'), + ('nagfor', 'NAG Fortran Compiler Release ' + '6.0(Hibiya) Build 1021', '6.0'), + ('nagfor', 'NAG Fortran Compiler Release ' + '5.3.2(971)', '5.3.2'), + ('nag', 'NAGWare Fortran 95 compiler Release 5.1' + '(347,355-367,375,380-383,389,394,399,401-402,407,' + '431,435,437,446,459-460,463,472,494,496,503,508,' + '511,517,529,555,557,565)', '5.1')] + +class TestNagFCompilerVersions(object): + def test_version_match(self): + for comp, vs, version in nag_version_strings: + fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp) + v = fc.version_match(vs) + assert_(v == version) + + +if __name__ == '__main__': + run_module_suite() diff -Nru python-numpy-1.13.3/numpy/distutils/tests/test_misc_util.py python-numpy-1.14.5/numpy/distutils/tests/test_misc_util.py --- python-numpy-1.13.3/numpy/distutils/tests/test_misc_util.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/tests/test_misc_util.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,12 +6,12 @@ appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info ) from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal + run_module_suite, assert_, assert_equal ) ajoin = lambda *paths: join(*((sep,)+paths)) -class TestAppendpath(TestCase): +class TestAppendpath(object): def test_1(self): assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) @@ -35,7 +35,7 @@ assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) -class TestMinrelpath(TestCase): +class TestMinrelpath(object): def test_1(self): n = lambda path: path.replace('/', sep) @@ -49,7 +49,7 @@ assert_equal(minrelpath(n('.././..')), n('../..')) assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) -class TestGpaths(TestCase): +class TestGpaths(object): def test_gpaths(self): local_path = minrelpath(join(dirname(__file__), '..')) @@ -58,7 +58,7 @@ f = gpaths('system_info.py', local_path) assert_(join(local_path, 'system_info.py') == f[0], repr(f)) -class TestSharedExtension(TestCase): +class TestSharedExtension(object): def test_get_shared_lib_extension(self): import sys diff -Nru python-numpy-1.13.3/numpy/distutils/tests/test_npy_pkg_config.py python-numpy-1.14.5/numpy/distutils/tests/test_npy_pkg_config.py --- python-numpy-1.13.3/numpy/distutils/tests/test_npy_pkg_config.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/tests/test_npy_pkg_config.py 2018-06-12 18:28:52.000000000 +0000 @@ -3,7 +3,7 @@ import os from numpy.distutils.npy_pkg_config import read_config, parse_flags -from numpy.testing import TestCase, run_module_suite, temppath +from numpy.testing import run_module_suite, temppath, assert_ simple = """\ [meta] @@ -36,7 +36,7 @@ simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', 'version': '0.1', 'name': 'foo'} -class TestLibraryInfo(TestCase): +class TestLibraryInfo(object): def test_simple(self): with temppath('foo.ini') as path: with open(path, 'w') as f: @@ -44,10 +44,10 @@ pkg = os.path.splitext(path)[0] out = read_config(pkg) - self.assertTrue(out.cflags() == simple_d['cflags']) - self.assertTrue(out.libs() == simple_d['libflags']) - self.assertTrue(out.name == simple_d['name']) - self.assertTrue(out.version == simple_d['version']) + assert_(out.cflags() == simple_d['cflags']) + assert_(out.libs() == simple_d['libflags']) + assert_(out.name == simple_d['name']) + assert_(out.version == simple_d['version']) def test_simple_variable(self): with temppath('foo.ini') as path: @@ -56,34 +56,34 @@ pkg = os.path.splitext(path)[0] out = read_config(pkg) - self.assertTrue(out.cflags() == simple_variable_d['cflags']) - self.assertTrue(out.libs() == simple_variable_d['libflags']) - self.assertTrue(out.name == simple_variable_d['name']) - self.assertTrue(out.version == simple_variable_d['version']) + assert_(out.cflags() == simple_variable_d['cflags']) + assert_(out.libs() == simple_variable_d['libflags']) + assert_(out.name == simple_variable_d['name']) + assert_(out.version == simple_variable_d['version']) out.vars['prefix'] = '/Users/david' - self.assertTrue(out.cflags() == '-I/Users/david/include') + assert_(out.cflags() == '-I/Users/david/include') -class TestParseFlags(TestCase): +class TestParseFlags(object): def test_simple_cflags(self): d = parse_flags("-I/usr/include") - self.assertTrue(d['include_dirs'] == ['/usr/include']) + assert_(d['include_dirs'] == ['/usr/include']) d = parse_flags("-I/usr/include -DFOO") - self.assertTrue(d['include_dirs'] == ['/usr/include']) - self.assertTrue(d['macros'] == ['FOO']) + assert_(d['include_dirs'] == ['/usr/include']) + assert_(d['macros'] == ['FOO']) d = parse_flags("-I /usr/include -DFOO") - self.assertTrue(d['include_dirs'] == ['/usr/include']) - self.assertTrue(d['macros'] == ['FOO']) + assert_(d['include_dirs'] == ['/usr/include']) + assert_(d['macros'] == ['FOO']) def test_simple_lflags(self): d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") - self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - self.assertTrue(d['libraries'] == ['foo', 'bar']) + assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + assert_(d['libraries'] == ['foo', 'bar']) d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") - self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib']) - self.assertTrue(d['libraries'] == ['foo', 'bar']) + assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + assert_(d['libraries'] == ['foo', 'bar']) if __name__ == '__main__': diff -Nru python-numpy-1.13.3/numpy/distutils/tests/test_system_info.py python-numpy-1.14.5/numpy/distutils/tests/test_system_info.py --- python-numpy-1.13.3/numpy/distutils/tests/test_system_info.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/distutils/tests/test_system_info.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,9 +6,10 @@ from subprocess import Popen, PIPE from distutils.errors import DistutilsError -from numpy.distutils import ccompiler -from numpy.testing import TestCase, run_module_suite, assert_, assert_equal -from numpy.testing.decorators import skipif +from numpy.distutils import ccompiler, customized_ccompiler +from numpy.testing import ( + run_module_suite, assert_, assert_equal, dec + ) from numpy.distutils.system_info import system_info, ConfigParser from numpy.distutils.system_info import default_lib_dirs, default_include_dirs @@ -20,9 +21,9 @@ 1 - display warning message 2 - raise error """ - cl = {'temp1': TestTemp1, - 'temp2': TestTemp2 - }.get(name.lower(), test_system_info) + cl = {'temp1': Temp1Info, + 'temp2': Temp2Info + }.get(name.lower(), _system_info) return cl() simple_site = """ @@ -59,15 +60,14 @@ def have_compiler(): """ Return True if there appears to be an executable compiler """ - compiler = ccompiler.new_compiler() - compiler.customize(None) + compiler = customized_ccompiler() try: cmd = compiler.compiler # Unix compilers except AttributeError: try: if not compiler.initialized: compiler.initialize() # MSVC is different - except DistutilsError: + except (DistutilsError, ValueError): return False cmd = [compiler.cc] try: @@ -83,7 +83,7 @@ HAVE_COMPILER = have_compiler() -class test_system_info(system_info): +class _system_info(system_info): def __init__(self, default_lib_dirs=default_lib_dirs, @@ -110,17 +110,19 @@ return info -class TestTemp1(test_system_info): +class Temp1Info(_system_info): + """For testing purposes""" section = 'temp1' -class TestTemp2(test_system_info): +class Temp2Info(_system_info): + """For testing purposes""" section = 'temp2' -class TestSystemInfoReading(TestCase): +class TestSystemInfoReading(object): - def setUp(self): + def setup(self): """ Create the libraries """ # Create 2 sources and 2 libraries self._dir1 = mkdtemp() @@ -162,15 +164,15 @@ # Do each removal separately try: shutil.rmtree(self._dir1) - except: + except Exception: pass try: shutil.rmtree(self._dir2) - except: + except Exception: pass try: os.remove(self._sitecfg) - except: + except Exception: pass def test_all(self): @@ -199,11 +201,10 @@ extra = tsi.calc_extra_info() assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2]) - @skipif(not HAVE_COMPILER) + @dec.skipif(not HAVE_COMPILER) def test_compile1(self): # Compile source and link the first source - c = ccompiler.new_compiler() - c.customize(None) + c = customized_ccompiler() previousDir = os.getcwd() try: # Change directory to not screw up directories @@ -215,13 +216,12 @@ finally: os.chdir(previousDir) - @skipif(not HAVE_COMPILER) - @skipif('msvc' in repr(ccompiler.new_compiler())) + @dec.skipif(not HAVE_COMPILER) + @dec.skipif('msvc' in repr(ccompiler.new_compiler())) def test_compile2(self): # Compile source and link the second source tsi = self.c_temp2 - c = ccompiler.new_compiler() - c.customize(None) + c = customized_ccompiler() extra_link_args = tsi.calc_extra_info()['extra_link_args'] previousDir = os.getcwd() try: diff -Nru python-numpy-1.13.3/numpy/doc/basics.py python-numpy-1.14.5/numpy/doc/basics.py --- python-numpy-1.13.3/numpy/doc/basics.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/doc/basics.py 2018-06-12 17:31:56.000000000 +0000 @@ -114,10 +114,10 @@ >>> d dtype('int32') - >>> np.issubdtype(d, int) + >>> np.issubdtype(d, np.integer) True - >>> np.issubdtype(d, float) + >>> np.issubdtype(d, np.floating) False diff -Nru python-numpy-1.13.3/numpy/doc/constants.py python-numpy-1.14.5/numpy/doc/constants.py --- python-numpy-1.13.3/numpy/doc/constants.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/doc/constants.py 2018-06-12 18:28:52.000000000 +0000 @@ -133,11 +133,11 @@ 0.0 >>> np.isfinite([np.NZERO]) - array([ True], dtype=bool) + array([ True]) >>> np.isnan([np.NZERO]) - array([False], dtype=bool) + array([False]) >>> np.isinf([np.NZERO]) - array([False], dtype=bool) + array([False]) """) @@ -204,11 +204,11 @@ -0.0 >>> np.isfinite([np.PZERO]) - array([ True], dtype=bool) + array([ True]) >>> np.isnan([np.PZERO]) - array([False], dtype=bool) + array([False]) >>> np.isinf([np.PZERO]) - array([False], dtype=bool) + array([False]) """) diff -Nru python-numpy-1.13.3/numpy/doc/creation.py python-numpy-1.14.5/numpy/doc/creation.py --- python-numpy-1.13.3/numpy/doc/creation.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/doc/creation.py 2018-06-12 17:35:36.000000000 +0000 @@ -58,7 +58,7 @@ >>> np.arange(10) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - >>> np.arange(2, 10, dtype=np.float) + >>> np.arange(2, 10, dtype=float) array([ 2., 3., 4., 5., 6., 7., 8., 9.]) >>> np.arange(2, 3, 0.1) array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9]) @@ -103,8 +103,8 @@ check the last section as well) :: - HDF5: PyTables - FITS: PyFITS + HDF5: h5py + FITS: Astropy Examples of formats that cannot be read directly but for which it is not hard to convert are those formats supported by libraries like PIL (able to read and diff -Nru python-numpy-1.13.3/numpy/doc/glossary.py python-numpy-1.14.5/numpy/doc/glossary.py --- python-numpy-1.13.3/numpy/doc/glossary.py 2017-09-24 22:47:22.000000000 +0000 +++ python-numpy-1.14.5/numpy/doc/glossary.py 2018-06-12 18:28:52.000000000 +0000 @@ -11,7 +11,7 @@ vertically downwards across rows (axis 0), and the second running horizontally across columns (axis 1). - Many operation can take place along one of these axes. For example, + Many operations can take place along one of these axes. For example, we can sum each row of an array, in which case we operate along columns, or axis 1:: @@ -233,7 +233,7 @@ >>> mask = (x > 2) >>> mask - array([False, False, False, True, True], dtype=bool) + array([False, False, False, True, True]) >>> x[mask] = -1 >>> x diff -Nru python-numpy-1.13.3/numpy/doc/indexing.py python-numpy-1.14.5/numpy/doc/indexing.py --- python-numpy-1.13.3/numpy/doc/indexing.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/doc/indexing.py 2018-06-12 17:31:56.000000000 +0000 @@ -240,7 +240,7 @@ For example: :: >>> b[:,5] # use a 1-D boolean whose first dim agrees with the first dim of y - array([False, False, False, True, True], dtype=bool) + array([False, False, False, True, True]) >>> y[b[:,5]] array([[21, 22, 23, 24, 25, 26, 27], [28, 29, 30, 31, 32, 33, 34]]) @@ -422,7 +422,7 @@ [37, 40, 43], [46, 49, 52]]) -For this reason it is possible to use the output from the np.where() +For this reason it is possible to use the output from the np.nonzero() function directly as an index since it always returns a tuple of index arrays. diff -Nru python-numpy-1.13.3/numpy/doc/misc.py python-numpy-1.14.5/numpy/doc/misc.py --- python-numpy-1.13.3/numpy/doc/misc.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/doc/misc.py 2018-06-12 18:28:52.000000000 +0000 @@ -14,7 +14,8 @@ Note: cannot use equality to test NaNs. E.g.: :: >>> myarr = np.array([1., 0., np.nan, 3.]) - >>> np.where(myarr == np.nan) + >>> np.nonzero(myarr == np.nan) + (array([], dtype=int64),) >>> np.nan == np.nan # is always False! Use special numpy functions instead. False >>> myarr[myarr == np.nan] = 0. # doesn't work diff -Nru python-numpy-1.13.3/numpy/doc/structured_arrays.py python-numpy-1.14.5/numpy/doc/structured_arrays.py --- python-numpy-1.13.3/numpy/doc/structured_arrays.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/doc/structured_arrays.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,231 +6,546 @@ Introduction ============ -NumPy provides powerful capabilities to create arrays of structured datatype. -These arrays permit one to manipulate the data by named fields. A simple -example will show what is meant.: :: +Structured arrays are ndarrays whose datatype is a composition of simpler +datatypes organized as a sequence of named :term:`fields `. For example, +:: - >>> x = np.array([(1,2.,'Hello'), (2,3.,"World")], - ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')]) + >>> x = np.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)], + ... dtype=[('name', 'U10'), ('age', 'i4'), ('weight', 'f4')]) >>> x - array([(1, 2.0, 'Hello'), (2, 3.0, 'World')], - dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')]) + array([('Rex', 9, 81.0), ('Fido', 3, 27.0)], + dtype=[('name', 'S10'), ('age', '>> x[1] - (2,3.,"World") + ('Fido', 3, 27.0) -Conveniently, one can access any field of the array by indexing using the -string that names that field. :: +You can access and modify individual fields of a structured array by indexing +with the field name:: - >>> y = x['bar'] - >>> y - array([ 2., 3.], dtype=float32) - >>> y[:] = 2*y - >>> y - array([ 4., 6.], dtype=float32) + >>> x['age'] + array([9, 3], dtype=int32) + >>> x['age'] = 5 >>> x - array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], - dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')]) + array([('Rex', 5, 81.0), ('Fido', 5, 27.0)], + dtype=[('name', 'S10'), ('age', '` reference page, and in +summary they are: + +1. A list of tuples, one tuple per field + + Each tuple has the form ``(fieldname, datatype, shape)`` where shape is + optional. ``fieldname`` is a string (or tuple if titles are used, see + :ref:`Field Titles ` below), ``datatype`` may be any object + convertible to a datatype, and ``shape`` is a tuple of integers specifying + subarray shape. + + >>> np.dtype([('x', 'f4'), ('y', np.float32), ('z', 'f4', (2,2))]) + dtype=[('x', '>> np.dtype([('x', 'f4'),('', 'i4'),('z', 'i8')]) + dtype([('x', '` may be used in a string and separated by + commas. The itemsize and byte offsets of the fields are determined + automatically, and the field names are given the default names ``f0``, + ``f1``, etc. :: + + >>> np.dtype('i8,f4,S3') + dtype([('f0', '>> np.dtype('3int8, float32, (2,3)float64') + dtype([('f0', 'i1', 3), ('f1', '>> np.dtype({'names': ['col1', 'col2'], 'formats': ['i4','f4']}) + dtype([('col1', '>> np.dtype({'names': ['col1', 'col2'], + ... 'formats': ['i4','f4'], + ... 'offsets': [0, 4], + ... 'itemsize': 12}) + dtype({'names':['col1','col2'], 'formats':['` below. + +4. A dictionary of field names + + The use of this form of specification is discouraged, but documented here + because older numpy code may use it. The keys of the dictionary are the + field names and the values are tuples specifying type and offset:: + + >>> np.dtype=({'col1': ('i1',0), 'col2': ('f4',1)}) + dtype([(('col1'), 'i1'), (('col2'), '>f4')]) + + This form is discouraged because Python dictionaries do not preserve order + in Python versions before Python 3.6, and the order of the fields in a + structured dtype has meaning. :ref:`Field Titles ` may be + specified by using a 3-tuple, see below. + +Manipulating and Displaying Structured Datatypes +------------------------------------------------ + +The list of field names of a structured datatype can be found in the ``names`` +attribute of the dtype object:: + + >>> d = np.dtype([('x', 'i8'), ('y', 'f4')]) + >>> d.names + ('x', 'y') + +The field names may be modified by assigning to the ``names`` attribute using a +sequence of strings of the same length. + +The dtype object also has a dictionary-like attribute, ``fields``, whose keys +are the field names (and :ref:`Field Titles `, see below) and whose +values are tuples containing the dtype and byte offset of each field. :: + + >>> d.fields + mappingproxy({'x': (dtype('int64'), 0), 'y': (dtype('float32'), 8)}) + +Both the ``names`` and ``fields`` attributes will equal ``None`` for +unstructured arrays. + +The string representation of a structured datatype is shown in the "list of +tuples" form if possible, otherwise numpy falls back to using the more general +dictionary form. + +.. _offsets-and-alignment: + +Automatic Byte Offsets and Alignment +------------------------------------ + +Numpy uses one of two methods to automatically determine the field byte offsets +and the overall itemsize of a structured datatype, depending on whether +``align=True`` was specified as a keyword argument to :func:`numpy.dtype`. + +By default (``align=False``), numpy will pack the fields together such that +each field starts at the byte offset the previous field ended, and the fields +are contiguous in memory. :: + + >>> def print_offsets(d): + ... print("offsets:", [d.fields[name][1] for name in d.names]) + ... print("itemsize:", d.itemsize) + >>> print_offsets(np.dtype('u1,u1,i4,u1,i8,u2')) + offsets: [0, 1, 2, 6, 7, 15] + itemsize: 17 + +If ``align=True`` is set, numpy will pad the structure in the same way many C +compilers would pad a C-struct. Aligned structures can give a performance +improvement in some cases, at the cost of increased datatype size. Padding +bytes are inserted between fields such that each field's byte offset will be a +multiple of that field's alignment, which is usually equal to the field's size +in bytes for simple datatypes, see :c:member:`PyArray_Descr.alignment`. The +structure will also have trailing padding added so that its itemsize is a +multiple of the largest field's alignment. :: + + >>> print_offsets(np.dtype('u1,u1,i4,u1,i8,u2', align=True)) + offsets: [0, 1, 4, 8, 16, 24] + itemsize: 32 + +Note that although almost all modern C compilers pad in this way by default, +padding in C structs is C-implementation-dependent so this memory layout is not +guaranteed to exactly match that of a corresponding struct in a C program. Some +work may be needed, either on the numpy side or the C side, to obtain exact +correspondence. + +If offsets were specified using the optional ``offsets`` key in the +dictionary-based dtype specification, setting ``align=True`` will check that +each field's offset is a multiple of its size and that the itemsize is a +multiple of the largest field size, and raise an exception if not. + +If the offsets of the fields and itemsize of a structured array satisfy the +alignment conditions, the array will have the ``ALIGNED`` :ref:`flag +` set. + +A convenience function :func:`numpy.lib.recfunctions.repack_fields` converts an +aligned dtype or array to a packed one and vice versa. It takes either a dtype +or structured ndarray as an argument, and returns a copy with fields re-packed, +with or without padding bytes. + +.. _titles: + +Field Titles +------------ + +In addition to field names, fields may also have an associated :term:`title`, +an alternate name, which is sometimes used as an additional description or +alias for the field. The title may be used to index an array, just like a +field name. + +To add titles when using the list-of-tuples form of dtype specification, the +field name may be be specified as a tuple of two strings instead of a single +string, which will be the field's title and field name respectively. For +example:: + + >>> np.dtype([(('my title', 'name'), 'f4')]) + +When using the first form of dictionary-based specification, the titles may be +supplied as an extra ``'titles'`` key as described above. When using the second +(discouraged) dictionary-based specification, the title can be supplied by +providing a 3-element tuple ``(datatype, offset, title)`` instead of the usual +2-element tuple:: + + >>> np.dtype({'name': ('i4', 0, 'my title')}) + +The ``dtype.fields`` dictionary will contain :term:`titles` as keys, if any +titles are used. This means effectively that a field with a title will be +represented twice in the fields dictionary. The tuple values for these fields +will also have a third element, the field title. Because of this, and because +the ``names`` attribute preserves the field order while the ``fields`` +attribute may not, it is recommended to iterate through the fields of a dtype +using the ``names`` attribute of the dtype, which will not list titles, as +in:: + + >>> for name in d.names: + ... print(d.fields[name][:2]) + +Union types +----------- + +Structured datatypes are implemented in numpy to have base type +:class:`numpy.void` by default, but it is possible to interpret other numpy +types as structured types using the ``(base_dtype, dtype)`` form of dtype +specification described in +:ref:`Data Type Objects `. Here, ``base_dtype`` is +the desired underlying dtype, and fields and flags will be copied from +``dtype``. This dtype is similar to a 'union' in C. + +Indexing and Assignment to Structured arrays +============================================= + +Assigning data to a Structured Array +------------------------------------ + +There are a number of ways to assign values to a structured array: Using python +tuples, using scalar values, or using other structured arrays. + +Assignment from Python Native Types (Tuples) +``````````````````````````````````````````` + +The simplest way to assign values to a structured array is using python tuples. +Each assigned value should be a tuple of length equal to the number of fields +in the array, and not a list or array as these will trigger numpy's +broadcasting rules. The tuple's elements are assigned to the successive fields +of the array, from left to right:: - >>> x[1] = (-1,-1.,"Master") + >>> x = np.array([(1,2,3),(4,5,6)], dtype='i8,f4,f8') + >>> x[1] = (7,8,9) >>> x - array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')], - dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')]) - >>> y - array([ 4., -1.], dtype=float32) - -Defining Structured Arrays -========================== - -One defines a structured array through the dtype object. There are -**several** alternative ways to define the fields of a record. Some of -these variants provide backward compatibility with Numeric, numarray, or -another module, and should not be used except for such purposes. These -will be so noted. One specifies record structure in -one of four alternative ways, using an argument (as supplied to a dtype -function keyword or a dtype object constructor itself). This -argument must be one of the following: 1) string, 2) tuple, 3) list, or -4) dictionary. Each of these is briefly described below. - -1) String argument. -In this case, the constructor expects a comma-separated list of type -specifiers, optionally with extra shape information. The fields are -given the default names 'f0', 'f1', 'f2' and so on. -The type specifiers can take 4 different forms: :: - - a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f2, f4, f8, c8, c16, a - (representing bytes, ints, unsigned ints, floats, complex and - fixed length strings of specified byte lengths) - b) int8,...,uint8,...,float16, float32, float64, complex64, complex128 - (this time with bit sizes) - c) older Numeric/numarray type specifications (e.g. Float32). - Don't use these in new code! - d) Single character type specifiers (e.g H for unsigned short ints). - Avoid using these unless you must. Details can be found in the - NumPy book - -These different styles can be mixed within the same string (but why would you -want to do that?). Furthermore, each type specifier can be prefixed -with a repetition number, or a shape. In these cases an array -element is created, i.e., an array within a record. That array -is still referred to as a single field. An example: :: + array([(1, 2., 3.), (7, 8., 9.)], + dtype=[('f0', '>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64') - >>> x - array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), - ([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), - ([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])], - dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))]) - -By using strings to define the record structure, it precludes being -able to name the fields in the original definition. The names can -be changed as shown later, however. - -2) Tuple argument: The only relevant tuple case that applies to record -structures is when a structure is mapped to an existing data type. This -is done by pairing in a tuple, the existing data type with a matching -dtype definition (using any of the variants being described here). As -an example (using a definition using a list, so see 3) for further -details): :: +Assignment from Scalars +``````````````````````` - >>> x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')])) - >>> x - array([0, 0, 0]) - >>> x['r'] - array([0, 0, 0], dtype=uint8) - -In this case, an array is produced that looks and acts like a simple int32 array, -but also has definitions for fields that use only one byte of the int32 (a bit -like Fortran equivalencing). - -3) List argument: In this case the record structure is defined with a list of -tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field -('' is permitted), 2) the type of the field, and 3) the shape (optional). -For example:: +A scalar assigned to a structured element will be assigned to all fields. This +happens when a scalar is assigned to a structured array, or when an +unstructured array is assigned to a structured array:: - >>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))]) + >>> x = np.zeros(2, dtype='i8,f4,?,S1') + >>> x[:] = 3 >>> x - array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]), - (0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]), - (0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])], - dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))]) - -4) Dictionary argument: two different forms are permitted. The first consists -of a dictionary with two required keys ('names' and 'formats'), each having an -equal sized list of values. The format list contains any type/shape specifier -allowed in other contexts. The names must be strings. There are two optional -keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to -the required two where offsets contain integer offsets for each field, and -titles are objects containing metadata for each field (these do not have -to be strings), where the value of None is permitted. As an example: :: + array([(3, 3.0, True, b'3'), (3, 3.0, True, b'3')], + dtype=[('f0', '>> x[:] = np.arange(2) + >>> x + array([(0, 0.0, False, b'0'), (1, 1.0, True, b'1')], + dtype=[('f0', '>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']}) + >>> twofield = np.zeros(2, dtype=[('A', 'i4'), ('B', 'i4')]) + >>> onefield = np.zeros(2, dtype=[('A', 'i4')]) + >>> nostruct = np.zeros(2, dtype='i4') + >>> nostruct[:] = twofield + ValueError: Can't cast from structure to non-structure, except if the structure only has a single field. + >>> nostruct[:] = onefield + >>> nostruct + array([0, 0], dtype=int32) + +Assignment from other Structured Arrays +``````````````````````````````````````` + +Assignment between two structured arrays occurs as if the source elements had +been converted to tuples and then assigned to the destination elements. That +is, the first field of the source array is assigned to the first field of the +destination array, and the second field likewise, and so on, regardless of +field names. Structured arrays with a different number of fields cannot be +assigned to each other. Bytes of the destination structure which are not +included in any of the fields are unaffected. :: + + >>> a = np.zeros(3, dtype=[('a', 'i8'), ('b', 'f4'), ('c', 'S3')]) + >>> b = np.ones(3, dtype=[('x', 'f4'), ('y', 'S3'), ('z', 'O')]) + >>> b[:] = a + >>> b + array([(0.0, b'0.0', b''), (0.0, b'0.0', b''), (0.0, b'0.0', b'')], + dtype=[('x', '>> x = np.array([(1,2),(3,4)], dtype=[('foo', 'i8'), ('bar', 'f4')]) + >>> x['foo'] + array([1, 3]) + >>> x['foo'] = 10 >>> x - array([(0, 0.0), (0, 0.0), (0, 0.0)], - dtype=[('col1', '>i4'), ('col2', '>f4')]) + array([(10, 2.), (10, 4.)], + dtype=[('foo', '>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')}) + >>> y = x['bar'] + >>> y[:] = 10 >>> x - array([(0, 0.0), (0, 0.0), (0, 0.0)], - dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')]) + array([(10, 5.), (10, 5.)], + dtype=[('foo', '>> y.dtype, y.shape, y.strides + (dtype('float32'), (2,), (12,)) - >>> x.dtype.names - ('col1', 'col2') - >>> x.dtype.names = ('x', 'y') - >>> x - array([(0, 0.0), (0, 0.0), (0, 0.0)], - dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')]) - >>> x.dtype.names = ('x', 'y', 'z') # wrong number of names - : must replace all names at once with a sequence of length 2 +Accessing Multiple Fields +``````````````````````````` + +One can index and assign to a structured array with a multi-field index, where +the index is a list of field names. -Accessing field titles -==================================== +.. warning:: + The behavior of multi-field indexes will change from Numpy 1.14 to Numpy + 1.15. -The field titles provide a standard place to put associated info for fields. -They do not have to be strings. :: +In Numpy 1.15, the result of indexing with a multi-field index will be a view +into the original array, as follows:: - >>> x.dtype.fields['x'][2] - 'title 1' + >>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')]) + >>> a[['a', 'c']] + array([(0, 0.), (0, 0.), (0, 0.)], + dtype={'names':['a','c'], 'formats':['>> x = np.array([(1.5,2.5,(1.0,2.0)),(3.,4.,(4.,5.)),(1.,3.,(2.,6.))], - dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))]) +.. warning:: + The new behavior in Numpy 1.15 leads to extra "padding" bytes at the + location of unindexed fields. You will need to update any code which depends + on the data having a "packed" layout. For instance code such as:: -Notice that `x` is created with a list of tuples. :: + >>> a[['a','c']].view('i8') # will fail in Numpy 1.15 + ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype - >>> x[['x','y']] - array([(1.5, 2.5), (3.0, 4.0), (1.0, 3.0)], - dtype=[('x', '>> x[['x','value']] - array([(1.5, [[1.0, 2.0], [1.0, 2.0]]), (3.0, [[4.0, 5.0], [4.0, 5.0]]), - (1.0, [[2.0, 6.0], [2.0, 6.0]])], - dtype=[('x', '>> x[['y','x']] - array([(2.5, 1.5), (4.0, 3.0), (3.0, 1.0)], - dtype=[('y', '>> from numpy.lib.recfunctions import repack_fields + >>> repack_fields(a[['a','c']]).view('i8') # supported 1.14 and 1.15 + array([0, 0, 0]) -Filling structured arrays -========================= +Assigning to an array with a multi-field index will behave the same in Numpy +1.14 and Numpy 1.15. In both versions the assignment will modify the original +array:: -Structured arrays can be filled by field or row by row. :: + >>> a[['a', 'c']] = (2, 3) + >>> a + array([(2, 0, 3.0), (2, 0, 3.0), (2, 0, 3.0)], + dtype=[('a', '>> arr = np.zeros((5,), dtype=[('var1','f8'),('var2','f8')]) - >>> arr['var1'] = np.arange(5) +This obeys the structured array assignment rules described above. For example, +this means that one can swap the values of two fields using appropriate +multi-field indexes:: -If you fill it in row by row, it takes a take a tuple -(but not a list or array!):: + >>> a[['a', 'c']] = a[['c', 'a']] - >>> arr[0] = (10,20) - >>> arr - array([(10.0, 20.0), (1.0, 0.0), (2.0, 0.0), (3.0, 0.0), (4.0, 0.0)], - dtype=[('var1', '>> x = np.array([(1, 2., 3.)], dtype='i,f,f') + >>> scalar = x[0] + >>> scalar + (1, 2., 3.) + >>> type(scalar) + numpy.void + +Unlike other numpy scalars, structured scalars are mutable and act like views +into the original array, such that modifying the scalar will modify the +original array. Structured scalars also support access and assignment by field +name:: + + >>> x = np.array([(1,2),(3,4)], dtype=[('foo', 'i8'), ('bar', 'f4')]) + >>> s = x[0] + >>> s['bar'] = 100 + >>> x + array([(1, 100.), (3, 4.)], + dtype=[('foo', '>> scalar = np.array([(1, 2., 3.)], dtype='i,f,f')[0] + >>> scalar[0] + 1 + >>> scalar[1] = 4 + +Thus, tuples might be thought of as the native Python equivalent to numpy's +structured types, much like native python integers are the equivalent to +numpy's integer types. Structured scalars may be converted to a tuple by +calling :func:`ndarray.item`:: + + >>> scalar.item(), type(scalar.item()) + ((1, 2.0, 3.0), tuple) + +Viewing Structured Arrays Containing Objects +-------------------------------------------- + +In order to prevent clobbering object pointers in fields of +:class:`numpy.object` type, numpy currently does not allow views of structured +arrays containing objects. + +Structure Comparison +-------------------- + +If the dtypes of two void structured arrays are equal, testing the equality of +the arrays will result in a boolean array with the dimensions of the original +arrays, with elements set to ``True`` where all fields of the corresponding +structures are equal. Structured dtypes are equal if the field names, +dtypes and titles are the same, ignoring endianness, and the fields are in +the same order:: + + >>> a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')]) + >>> b = np.ones(2, dtype=[('a', 'i4'), ('b', 'i4')]) + >>> a == b + array([False, False]) + +Currently, if the dtypes of two void structured arrays are not equivalent the +comparison fails, returning the scalar value ``False``. This behavior is +deprecated as of numpy 1.10 and will raise an error or perform elementwise +comparison in the future. + +The ``<`` and ``>`` operators always return ``False`` when comparing void +structured arrays, and arithmetic and bitwise operations are not supported. Record Arrays ============= -For convenience, numpy provides "record arrays" which allow one to access -fields of structured arrays by attribute rather than by index. Record arrays -are structured arrays wrapped using a subclass of ndarray, -:class:`numpy.recarray`, which allows field access by attribute on the array -object, and record arrays also use a special datatype, :class:`numpy.record`, -which allows field access by attribute on the individual elements of the array. +As an optional convenience numpy provides an ndarray subclass, +:class:`numpy.recarray`, and associated helper functions in the +:mod:`numpy.rec` submodule, that allows access to fields of structured arrays +by attribute instead of only by index. Record arrays also use a special +datatype, :class:`numpy.record`, that allows field access by attribute on the +structured scalars obtained from the array. -The simplest way to create a record array is with :func:`numpy.rec.array`: :: +The simplest way to create a record array is with :func:`numpy.rec.array`:: - >>> recordarr = np.rec.array([(1,2.,'Hello'),(2,3.,"World")], + >>> recordarr = np.rec.array([(1,2.,'Hello'),(2,3.,"World")], ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')]) >>> recordarr.bar array([ 2., 3.], dtype=float32) >>> recordarr[1:2] - rec.array([(2, 3.0, 'World')], + rec.array([(2, 3.0, 'World')], dtype=[('foo', '>> recordarr[1:2].foo array([2], dtype=int32) @@ -239,27 +554,28 @@ >>> recordarr[1].baz 'World' -numpy.rec.array can convert a wide variety of arguments into record arrays, -including normal structured arrays: :: +:func:`numpy.rec.array` can convert a wide variety of arguments into record +arrays, including structured arrays:: - >>> arr = array([(1,2.,'Hello'),(2,3.,"World")], + >>> arr = array([(1,2.,'Hello'),(2,3.,"World")], ... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')]) >>> recordarr = np.rec.array(arr) -The numpy.rec module provides a number of other convenience functions for +The :mod:`numpy.rec` module provides a number of other convenience functions for creating record arrays, see :ref:`record array creation routines `. A record array representation of a structured array can be obtained using the -appropriate :ref:`view`: :: +appropriate :ref:`view`:: - >>> arr = np.array([(1,2.,'Hello'),(2,3.,"World")], + >>> arr = np.array([(1,2.,'Hello'),(2,3.,"World")], ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')]) - >>> recordarr = arr.view(dtype=dtype((np.record, arr.dtype)), + >>> recordarr = arr.view(dtype=dtype((np.record, arr.dtype)), ... type=np.recarray) -For convenience, viewing an ndarray as type `np.recarray` will automatically -convert to `np.record` datatype, so the dtype can be left out of the view: :: +For convenience, viewing an ndarray as type :class:`np.recarray` will +automatically convert to :class:`np.record` datatype, so the dtype can be left +out of the view:: >>> recordarr = arr.view(np.recarray) >>> recordarr.dtype @@ -267,14 +583,14 @@ To get back to a plain ndarray both the dtype and type must be reset. The following view does so, taking into account the unusual case that the -recordarr was not a structured type: :: +recordarr was not a structured type:: >>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray) Record array fields accessed by index or by attribute are returned as a record array if the field has a structured type but as a plain ndarray otherwise. :: - >>> recordarr = np.rec.array([('Hello', (1,2)),("World", (3,4))], + >>> recordarr = np.rec.array([('Hello', (1,2)),("World", (3,4))], ... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])]) >>> type(recordarr.foo) @@ -283,8 +599,7 @@ Note that if a field has the same name as an ndarray attribute, the ndarray attribute takes precedence. Such fields will be inaccessible by attribute but -may still be accessed by index. - +will still be accessible by index. """ from __future__ import division, absolute_import, print_function diff -Nru python-numpy-1.13.3/numpy/f2py/auxfuncs.py python-numpy-1.14.5/numpy/f2py/auxfuncs.py --- python-numpy-1.13.3/numpy/f2py/auxfuncs.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/f2py/auxfuncs.py 2018-06-12 17:31:56.000000000 +0000 @@ -552,7 +552,7 @@ pass -class throw_error: +class throw_error(object): def __init__(self, mess): self.mess = mess diff -Nru python-numpy-1.13.3/numpy/f2py/capi_maps.py python-numpy-1.14.5/numpy/f2py/capi_maps.py --- python-numpy-1.13.3/numpy/f2py/capi_maps.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/f2py/capi_maps.py 2018-06-12 18:28:52.000000000 +0000 @@ -328,7 +328,7 @@ ret['size'] = '*'.join(dim) try: ret['size'] = repr(eval(ret['size'])) - except: + except Exception: pass ret['dims'] = ','.join(dim) ret['rank'] = repr(len(dim)) @@ -485,7 +485,7 @@ else: v = eval(v, {}, {}) ret['init.r'], ret['init.i'] = str(v.real), str(v.imag) - except: + except Exception: raise ValueError( 'getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) if isarray(var): diff -Nru python-numpy-1.13.3/numpy/f2py/cb_rules.py python-numpy-1.14.5/numpy/f2py/cb_rules.py --- python-numpy-1.13.3/numpy/f2py/cb_rules.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/f2py/cb_rules.py 2018-06-12 17:31:56.000000000 +0000 @@ -44,6 +44,7 @@ \tPyTupleObject *capi_arglist = #name#_args_capi; \tPyObject *capi_return = NULL; \tPyObject *capi_tmp = NULL; +\tPyObject *capi_arglist_list = NULL; \tint capi_j,capi_i = 0; \tint capi_longjmp_ok = 1; #decl# @@ -85,13 +86,31 @@ \t\tgoto capi_fail; \t} #setdims# +#ifdef PYPY_VERSION +#define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) +\tcapi_arglist_list = PySequence_List(capi_arglist); +\tif (capi_arglist_list == NULL) goto capi_fail; +#else +#define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) +#endif #pyobjfrom# +#undef CAPI_ARGLIST_SETITEM +#ifdef PYPY_VERSION +\tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist_list); +#else \tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); +#endif \tCFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); #ifdef F2PY_REPORT_ATEXIT f2py_cb_start_call_clock(); #endif +#ifdef PYPY_VERSION +\tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist_list); +\tPy_DECREF(capi_arglist_list); +\tcapi_arglist_list = NULL; +#else \tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist); +#endif #ifdef F2PY_REPORT_ATEXIT f2py_cb_stop_call_clock(); #endif @@ -119,6 +138,7 @@ capi_fail: \tfprintf(stderr,\"Call-back #name# failed.\\n\"); \tPy_XDECREF(capi_return); +\tPy_XDECREF(capi_arglist_list); \tif (capi_longjmp_ok) \t\tlongjmp(#name#_jmpbuf,-1); capi_return_pt: @@ -318,11 +338,11 @@ }, { 'pyobjfrom': [{isintent_in: """\ \tif (#name#_nofargs>capi_i) -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1(#varname_i#))) +\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1(#varname_i#))) \t\t\tgoto capi_fail;"""}, {isintent_inout: """\ \tif (#name#_nofargs>capi_i) -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi))) +\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi))) \t\t\tgoto capi_fail;"""}], 'need': [{isintent_in: 'pyobj_from_#ctype#1'}, {isintent_inout: 'pyarr_from_p_#ctype#1'}, @@ -343,12 +363,12 @@ 'pyobjfrom': [{debugcapi: '\tfprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname_i#,#varname_i#_cb_len);'}, {isintent_in: """\ \tif (#name#_nofargs>capi_i) -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len))) +\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len))) \t\t\tgoto capi_fail;"""}, {isintent_inout: """\ \tif (#name#_nofargs>capi_i) { \t\tint #varname_i#_cb_dims[] = {#varname_i#_cb_len}; -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims))) +\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims))) \t\t\tgoto capi_fail; \t}"""}], 'need': [{isintent_in: 'pyobj_from_#ctype#1size'}, @@ -367,17 +387,21 @@ 'pyobjfrom': [{debugcapi: '\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'}, {isintent_c: """\ \tif (#name#_nofargs>capi_i) { -\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_ARRAY_CARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */ +\t\tint itemsize_ = #atype# == NPY_STRING ? 1 : 0; +\t\t/*XXX: Hmm, what will destroy this array??? */ +\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,itemsize_,NPY_ARRAY_CARRAY,NULL); """, l_not(isintent_c): """\ \tif (#name#_nofargs>capi_i) { -\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_ARRAY_FARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */ +\t\tint itemsize_ = #atype# == NPY_STRING ? 1 : 0; +\t\t/*XXX: Hmm, what will destroy this array??? */ +\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,itemsize_,NPY_ARRAY_FARRAY,NULL); """, }, """ \t\tif (tmp_arr==NULL) \t\t\tgoto capi_fail; -\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,(PyObject *)tmp_arr)) +\t\tif (CAPI_ARGLIST_SETITEM(capi_i++,(PyObject *)tmp_arr)) \t\t\tgoto capi_fail; }"""], '_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)), diff -Nru python-numpy-1.13.3/numpy/f2py/cfuncs.py python-numpy-1.14.5/numpy/f2py/cfuncs.py --- python-numpy-1.13.3/numpy/f2py/cfuncs.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/f2py/cfuncs.py 2018-06-12 18:28:52.000000000 +0000 @@ -99,8 +99,8 @@ #ifdef DEBUGCFUNCS #define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); #define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ -\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ -\tfprintf(stderr,\"\\n\"); + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); #else #define CFUNCSMESS(mess) #define CFUNCSMESSPY(mess,obj) @@ -219,18 +219,18 @@ """ cppmacros['SWAP'] = """\ #define SWAP(a,b,t) {\\ -\tt *c;\\ -\tc = a;\\ -\ta = b;\\ -\tb = c;} + t *c;\\ + c = a;\\ + a = b;\\ + b = c;} """ # cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & # NPY_ARRAY_C_CONTIGUOUS)' cppmacros['PRINTPYOBJERR'] = """\ #define PRINTPYOBJERR(obj)\\ -\tfprintf(stderr,\"#modulename#.error is related to \");\\ -\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ -\tfprintf(stderr,\"\\n\"); + fprintf(stderr,\"#modulename#.error is related to \");\\ + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); """ cppmacros['MINMAX'] = """\ #ifndef max @@ -331,7 +331,7 @@ /* New SciPy */ #define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break; #define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break; -#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: (PyArray_DESCR(arr)->f->setitem)(pyobj_from_ ## ctype ## 1(*v),PyArray_DATA(arr)); break; +#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break; #define TRYPYARRAYTEMPLATE(ctype,typecode) \\ PyArrayObject *arr = NULL;\\ @@ -357,7 +357,7 @@ case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\ case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ - case NPY_OBJECT: (PyArray_DESCR(arr)->f->setitem)(pyobj_from_ ## ctype ## 1(*v),PyArray_DATA(arr), arr); break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\ default: return -2;\\ };\\ return 1 @@ -365,7 +365,7 @@ needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\ -#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: (PyArray_DESCR(arr)->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),PyArray_DATA(arr), arr); break; +#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break; #define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ PyArrayObject *arr = NULL;\\ if (!obj) return -2;\\ @@ -394,66 +394,66 @@ case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\ case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;*(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;break;\\ - case NPY_OBJECT: (PyArray_DESCR(arr)->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),PyArray_DATA(arr), arr); break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\ default: return -2;\\ };\\ return -1; """ # cppmacros['NUMFROMARROBJ']="""\ # define NUMFROMARROBJ(typenum,ctype) \\ -# \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ -# \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ -# \tif (arr) {\\ -# \t\tif (PyArray_TYPE(arr)==NPY_OBJECT) {\\ -# \t\t\tif (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ -# \t\t\tgoto capi_fail;\\ -# \t\t} else {\\ -# \t\t\t(PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\ -# \t\t}\\ -# \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ -# \t\treturn 1;\\ -# \t} +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } # """ # XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ # cppmacros['CNUMFROMARROBJ']="""\ # define CNUMFROMARROBJ(typenum,ctype) \\ -# \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ -# \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ -# \tif (arr) {\\ -# \t\tif (PyArray_TYPE(arr)==NPY_OBJECT) {\\ -# \t\t\tif (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ -# \t\t\tgoto capi_fail;\\ -# \t\t} else {\\ -# \t\t\t(PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\ -# \t\t}\\ -# \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ -# \t\treturn 1;\\ -# \t} +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } # """ needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR'] cppmacros['GETSTRFROMPYTUPLE'] = """\ #define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ -\t\tPyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ -\t\tif (rv_cb_str == NULL)\\ -\t\t\tgoto capi_fail;\\ -\t\tif (PyString_Check(rv_cb_str)) {\\ -\t\t\tstr[len-1]='\\0';\\ -\t\t\tSTRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\ -\t\t} else {\\ -\t\t\tPRINTPYOBJERR(rv_cb_str);\\ -\t\t\tPyErr_SetString(#modulename#_error,\"string object expected\");\\ -\t\t\tgoto capi_fail;\\ -\t\t}\\ -\t} + PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ + if (rv_cb_str == NULL)\\ + goto capi_fail;\\ + if (PyString_Check(rv_cb_str)) {\\ + str[len-1]='\\0';\\ + STRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\ + } else {\\ + PRINTPYOBJERR(rv_cb_str);\\ + PyErr_SetString(#modulename#_error,\"string object expected\");\\ + goto capi_fail;\\ + }\\ + } """ cppmacros['GETSCALARFROMPYTUPLE'] = """\ #define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ -\t\tif ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ -\t\tif (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ -\t\t\tgoto capi_fail;\\ -\t} + if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ + if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ + goto capi_fail;\\ + } """ cppmacros['FAILNULL'] = """\\ @@ -471,12 +471,12 @@ """ cppmacros['STRINGMALLOC'] = """\ #define STRINGMALLOC(str,len)\\ -\tif ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\ -\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ -\t\tgoto capi_fail;\\ -\t} else {\\ -\t\t(str)[len] = '\\0';\\ -\t} + if ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\ + PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ + goto capi_fail;\\ + } else {\\ + (str)[len] = '\\0';\\ + } """ cppmacros['STRINGFREE'] = """\ #define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) @@ -504,39 +504,39 @@ """ cppmacros['CHECKGENERIC'] = """\ #define CHECKGENERIC(check,tcheck,name) \\ -\tif (!(check)) {\\ -\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ cppmacros['CHECKARRAY'] = """\ #define CHECKARRAY(check,tcheck,name) \\ -\tif (!(check)) {\\ -\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ cppmacros['CHECKSTRING'] = """\ #define CHECKSTRING(check,tcheck,name,show,var)\\ -\tif (!(check)) {\\ -\t\tchar errstring[256];\\ -\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ -\t\tPyErr_SetString(#modulename#_error, errstring);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ + PyErr_SetString(#modulename#_error, errstring);\\ + /*goto capi_fail;*/\\ + } else """ cppmacros['CHECKSCALAR'] = """\ #define CHECKSCALAR(check,tcheck,name,show,var)\\ -\tif (!(check)) {\\ -\t\tchar errstring[256];\\ -\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ -\t\tPyErr_SetString(#modulename#_error,errstring);\\ -\t\t/*goto capi_fail;*/\\ -\t} else """ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ + PyErr_SetString(#modulename#_error,errstring);\\ + /*goto capi_fail;*/\\ + } else """ # cppmacros['CHECKDIMS']="""\ # define CHECKDIMS(dims,rank) \\ -# \tfor (int i=0;i<(rank);i++)\\ -# \t\tif (dims[i]<0) {\\ -# \t\t\tfprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ -# \t\t\tgoto capi_fail;\\ -# \t\t} +# for (int i=0;i<(rank);i++)\\ +# if (dims[i]<0) {\\ +# fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ +# goto capi_fail;\\ +# } # """ cppmacros[ 'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' @@ -549,17 +549,17 @@ cfuncs['calcarrindex'] = """\ static int calcarrindex(int *i,PyArrayObject *arr) { -\tint k,ii = i[0]; -\tfor (k=1; k < PyArray_NDIM(arr); k++) -\t\tii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */ -\treturn ii; + int k,ii = i[0]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */ + return ii; }""" cfuncs['calcarrindextr'] = """\ static int calcarrindextr(int *i,PyArrayObject *arr) { -\tint k,ii = i[PyArray_NDIM(arr)-1]; -\tfor (k=1; k < PyArray_NDIM(arr); k++) -\t\tii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */ -\treturn ii; + int k,ii = i[PyArray_NDIM(arr)-1]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */ + return ii; }""" cfuncs['forcomb'] = """\ static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; @@ -604,543 +604,543 @@ needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string'] cfuncs['try_pyarr_from_string'] = """\ static int try_pyarr_from_string(PyObject *obj,const string str) { -\tPyArrayObject *arr = NULL; -\tif (PyArray_Check(obj) && (!((arr = (PyArrayObject *)obj) == NULL))) -\t\t{ STRINGCOPYN(PyArray_DATA(arr),str,PyArray_NBYTES(arr)); } -\treturn 1; + PyArrayObject *arr = NULL; + if (PyArray_Check(obj) && (!((arr = (PyArrayObject *)obj) == NULL))) + { STRINGCOPYN(PyArray_DATA(arr),str,PyArray_NBYTES(arr)); } + return 1; capi_fail: -\tPRINTPYOBJERR(obj); -\tPyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\"); -\treturn 0; + PRINTPYOBJERR(obj); + PyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\"); + return 0; } """ needs['string_from_pyobj'] = ['string', 'STRINGMALLOC', 'STRINGCOPYN'] cfuncs['string_from_pyobj'] = """\ static int string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess) { -\tPyArrayObject *arr = NULL; -\tPyObject *tmp = NULL; + PyArrayObject *arr = NULL; + PyObject *tmp = NULL; #ifdef DEBUGCFUNCS fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(char*)str,*len,(char *)inistr,obj); #endif -\tif (obj == Py_None) { -\t\tif (*len == -1) -\t\t\t*len = strlen(inistr); /* Will this cause problems? */ -\t\tSTRINGMALLOC(*str,*len); -\t\tSTRINGCOPYN(*str,inistr,*len+1); -\t\treturn 1; -\t} -\tif (PyArray_Check(obj)) { -\t\tif ((arr = (PyArrayObject *)obj) == NULL) -\t\t\tgoto capi_fail; -\t\tif (!ISCONTIGUOUS(arr)) { -\t\t\tPyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\"); -\t\t\tgoto capi_fail; -\t\t} -\t\tif (*len == -1) -\t\t\t*len = (PyArray_ITEMSIZE(arr))*PyArray_SIZE(arr); -\t\tSTRINGMALLOC(*str,*len); -\t\tSTRINGCOPYN(*str,PyArray_DATA(arr),*len+1); -\t\treturn 1; -\t} -\tif (PyString_Check(obj)) { -\t\ttmp = obj; -\t\tPy_INCREF(tmp); -\t} + if (obj == Py_None) { + if (*len == -1) + *len = strlen(inistr); /* Will this cause problems? */ + STRINGMALLOC(*str,*len); + STRINGCOPYN(*str,inistr,*len+1); + return 1; + } + if (PyArray_Check(obj)) { + if ((arr = (PyArrayObject *)obj) == NULL) + goto capi_fail; + if (!ISCONTIGUOUS(arr)) { + PyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\"); + goto capi_fail; + } + if (*len == -1) + *len = (PyArray_ITEMSIZE(arr))*PyArray_SIZE(arr); + STRINGMALLOC(*str,*len); + STRINGCOPYN(*str,PyArray_DATA(arr),*len+1); + return 1; + } + if (PyString_Check(obj)) { + tmp = obj; + Py_INCREF(tmp); + } #if PY_VERSION_HEX >= 0x03000000 -\telse if (PyUnicode_Check(obj)) { -\t\ttmp = PyUnicode_AsASCIIString(obj); -\t} -\telse { -\t\tPyObject *tmp2; -\t\ttmp2 = PyObject_Str(obj); -\t\tif (tmp2) { -\t\t\ttmp = PyUnicode_AsASCIIString(tmp2); -\t\t\tPy_DECREF(tmp2); -\t\t} -\t\telse { -\t\t\ttmp = NULL; -\t\t} -\t} -#else -\telse { -\t\ttmp = PyObject_Str(obj); -\t} -#endif -\tif (tmp == NULL) goto capi_fail; -\tif (*len == -1) -\t\t*len = PyString_GET_SIZE(tmp); -\tSTRINGMALLOC(*str,*len); -\tSTRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1); -\tPy_DECREF(tmp); -\treturn 1; + else if (PyUnicode_Check(obj)) { + tmp = PyUnicode_AsASCIIString(obj); + } + else { + PyObject *tmp2; + tmp2 = PyObject_Str(obj); + if (tmp2) { + tmp = PyUnicode_AsASCIIString(tmp2); + Py_DECREF(tmp2); + } + else { + tmp = NULL; + } + } +#else + else { + tmp = PyObject_Str(obj); + } +#endif + if (tmp == NULL) goto capi_fail; + if (*len == -1) + *len = PyString_GET_SIZE(tmp); + STRINGMALLOC(*str,*len); + STRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1); + Py_DECREF(tmp); + return 1; capi_fail: -\tPy_XDECREF(tmp); -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; + Py_XDECREF(tmp); + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) err = #modulename#_error; + PyErr_SetString(err,errmess); + } + return 0; } """ needs['char_from_pyobj'] = ['int_from_pyobj'] cfuncs['char_from_pyobj'] = """\ static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) { -\tint i=0; -\tif (int_from_pyobj(&i,obj,errmess)) { -\t\t*v = (char)i; -\t\treturn 1; -\t} -\treturn 0; + int i=0; + if (int_from_pyobj(&i,obj,errmess)) { + *v = (char)i; + return 1; + } + return 0; } """ needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char'] cfuncs['signed_char_from_pyobj'] = """\ static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) { -\tint i=0; -\tif (int_from_pyobj(&i,obj,errmess)) { -\t\t*v = (signed_char)i; -\t\treturn 1; -\t} -\treturn 0; + int i=0; + if (int_from_pyobj(&i,obj,errmess)) { + *v = (signed_char)i; + return 1; + } + return 0; } """ needs['short_from_pyobj'] = ['int_from_pyobj'] cfuncs['short_from_pyobj'] = """\ static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) { -\tint i=0; -\tif (int_from_pyobj(&i,obj,errmess)) { -\t\t*v = (short)i; -\t\treturn 1; -\t} -\treturn 0; + int i=0; + if (int_from_pyobj(&i,obj,errmess)) { + *v = (short)i; + return 1; + } + return 0; } """ cfuncs['int_from_pyobj'] = """\ static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyInt_Check(obj)) { -\t\t*v = (int)PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\ttmp = PyNumber_Int(obj); -\tif (tmp) { -\t\t*v = PyInt_AS_LONG(tmp); -\t\tPy_DECREF(tmp); -\t\treturn 1; -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; + PyObject* tmp = NULL; + if (PyInt_Check(obj)) { + *v = (int)PyInt_AS_LONG(obj); + return 1; + } + tmp = PyNumber_Int(obj); + if (tmp) { + *v = PyInt_AS_LONG(tmp); + Py_DECREF(tmp); + return 1; + } + if (PyComplex_Check(obj)) + tmp = PyObject_GetAttrString(obj,\"real\"); + else if (PyString_Check(obj) || PyUnicode_Check(obj)) + /*pass*/; + else if (PySequence_Check(obj)) + tmp = PySequence_GetItem(obj,0); + if (tmp) { + PyErr_Clear(); + if (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) err = #modulename#_error; + PyErr_SetString(err,errmess); + } + return 0; } """ cfuncs['long_from_pyobj'] = """\ static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyInt_Check(obj)) { -\t\t*v = PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\ttmp = PyNumber_Int(obj); -\tif (tmp) { -\t\t*v = PyInt_AS_LONG(tmp); -\t\tPy_DECREF(tmp); -\t\treturn 1; -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; + PyObject* tmp = NULL; + if (PyInt_Check(obj)) { + *v = PyInt_AS_LONG(obj); + return 1; + } + tmp = PyNumber_Int(obj); + if (tmp) { + *v = PyInt_AS_LONG(tmp); + Py_DECREF(tmp); + return 1; + } + if (PyComplex_Check(obj)) + tmp = PyObject_GetAttrString(obj,\"real\"); + else if (PyString_Check(obj) || PyUnicode_Check(obj)) + /*pass*/; + else if (PySequence_Check(obj)) + tmp = PySequence_GetItem(obj,0); + if (tmp) { + PyErr_Clear(); + if (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) err = #modulename#_error; + PyErr_SetString(err,errmess); + } + return 0; } """ needs['long_long_from_pyobj'] = ['long_long'] cfuncs['long_long_from_pyobj'] = """\ static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyLong_Check(obj)) { -\t\t*v = PyLong_AsLongLong(obj); -\t\treturn (!PyErr_Occurred()); -\t} -\tif (PyInt_Check(obj)) { -\t\t*v = (long_long)PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\ttmp = PyNumber_Long(obj); -\tif (tmp) { -\t\t*v = PyLong_AsLongLong(tmp); -\t\tPy_DECREF(tmp); -\t\treturn (!PyErr_Occurred()); -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; + PyObject* tmp = NULL; + if (PyLong_Check(obj)) { + *v = PyLong_AsLongLong(obj); + return (!PyErr_Occurred()); + } + if (PyInt_Check(obj)) { + *v = (long_long)PyInt_AS_LONG(obj); + return 1; + } + tmp = PyNumber_Long(obj); + if (tmp) { + *v = PyLong_AsLongLong(tmp); + Py_DECREF(tmp); + return (!PyErr_Occurred()); + } + if (PyComplex_Check(obj)) + tmp = PyObject_GetAttrString(obj,\"real\"); + else if (PyString_Check(obj) || PyUnicode_Check(obj)) + /*pass*/; + else if (PySequence_Check(obj)) + tmp = PySequence_GetItem(obj,0); + if (tmp) { + PyErr_Clear(); + if (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) err = #modulename#_error; + PyErr_SetString(err,errmess); + } + return 0; } """ needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double'] cfuncs['long_double_from_pyobj'] = """\ static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) { -\tdouble d=0; -\tif (PyArray_CheckScalar(obj)){ -\t\tif PyArray_IsScalar(obj, LongDouble) { -\t\t\tPyArray_ScalarAsCtype(obj, v); -\t\t\treturn 1; -\t\t} -\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_LONGDOUBLE) { -\t\t\t(*v) = *((npy_longdouble *)PyArray_DATA(obj)); -\t\t\treturn 1; -\t\t} -\t} -\tif (double_from_pyobj(&d,obj,errmess)) { -\t\t*v = (long_double)d; -\t\treturn 1; -\t} -\treturn 0; + double d=0; + if (PyArray_CheckScalar(obj)){ + if PyArray_IsScalar(obj, LongDouble) { + PyArray_ScalarAsCtype(obj, v); + return 1; + } + else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_LONGDOUBLE) { + (*v) = *((npy_longdouble *)PyArray_DATA(obj)); + return 1; + } + } + if (double_from_pyobj(&d,obj,errmess)) { + *v = (long_double)d; + return 1; + } + return 0; } """ cfuncs['double_from_pyobj'] = """\ static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) { -\tPyObject* tmp = NULL; -\tif (PyFloat_Check(obj)) { + PyObject* tmp = NULL; + if (PyFloat_Check(obj)) { #ifdef __sgi -\t\t*v = PyFloat_AsDouble(obj); + *v = PyFloat_AsDouble(obj); #else -\t\t*v = PyFloat_AS_DOUBLE(obj); + *v = PyFloat_AS_DOUBLE(obj); #endif -\t\treturn 1; -\t} -\ttmp = PyNumber_Float(obj); -\tif (tmp) { + return 1; + } + tmp = PyNumber_Float(obj); + if (tmp) { #ifdef __sgi -\t\t*v = PyFloat_AsDouble(tmp); + *v = PyFloat_AsDouble(tmp); #else -\t\t*v = PyFloat_AS_DOUBLE(tmp); + *v = PyFloat_AS_DOUBLE(tmp); #endif -\t\tPy_DECREF(tmp); -\t\treturn 1; -\t} -\tif (PyComplex_Check(obj)) -\t\ttmp = PyObject_GetAttrString(obj,\"real\"); -\telse if (PyString_Check(obj) || PyUnicode_Check(obj)) -\t\t/*pass*/; -\telse if (PySequence_Check(obj)) -\t\ttmp = PySequence_GetItem(obj,0); -\tif (tmp) { -\t\tPyErr_Clear(); -\t\tif (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} -\t\tPy_DECREF(tmp); -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) err = #modulename#_error; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; + Py_DECREF(tmp); + return 1; + } + if (PyComplex_Check(obj)) + tmp = PyObject_GetAttrString(obj,\"real\"); + else if (PyString_Check(obj) || PyUnicode_Check(obj)) + /*pass*/; + else if (PySequence_Check(obj)) + tmp = PySequence_GetItem(obj,0); + if (tmp) { + PyErr_Clear(); + if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) err = #modulename#_error; + PyErr_SetString(err,errmess); + } + return 0; } """ needs['float_from_pyobj'] = ['double_from_pyobj'] cfuncs['float_from_pyobj'] = """\ static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) { -\tdouble d=0.0; -\tif (double_from_pyobj(&d,obj,errmess)) { -\t\t*v = (float)d; -\t\treturn 1; -\t} -\treturn 0; + double d=0.0; + if (double_from_pyobj(&d,obj,errmess)) { + *v = (float)d; + return 1; + } + return 0; } """ needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double', 'complex_double_from_pyobj'] cfuncs['complex_long_double_from_pyobj'] = """\ static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) { -\tcomplex_double cd={0.0,0.0}; -\tif (PyArray_CheckScalar(obj)){ -\t\tif PyArray_IsScalar(obj, CLongDouble) { -\t\t\tPyArray_ScalarAsCtype(obj, v); -\t\t\treturn 1; -\t\t} -\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { -\t\t\t(*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real; -\t\t\t(*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag; -\t\t\treturn 1; -\t\t} -\t} -\tif (complex_double_from_pyobj(&cd,obj,errmess)) { -\t\t(*v).r = (long_double)cd.r; -\t\t(*v).i = (long_double)cd.i; -\t\treturn 1; -\t} -\treturn 0; + complex_double cd={0.0,0.0}; + if (PyArray_CheckScalar(obj)){ + if PyArray_IsScalar(obj, CLongDouble) { + PyArray_ScalarAsCtype(obj, v); + return 1; + } + else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { + (*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real; + (*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag; + return 1; + } + } + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (long_double)cd.r; + (*v).i = (long_double)cd.i; + return 1; + } + return 0; } """ needs['complex_double_from_pyobj'] = ['complex_double'] cfuncs['complex_double_from_pyobj'] = """\ static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) { -\tPy_complex c; -\tif (PyComplex_Check(obj)) { -\t\tc=PyComplex_AsCComplex(obj); -\t\t(*v).r=c.real, (*v).i=c.imag; -\t\treturn 1; -\t} -\tif (PyArray_IsScalar(obj, ComplexFloating)) { -\t\tif (PyArray_IsScalar(obj, CFloat)) { -\t\t\tnpy_cfloat new; -\t\t\tPyArray_ScalarAsCtype(obj, &new); -\t\t\t(*v).r = (double)new.real; -\t\t\t(*v).i = (double)new.imag; -\t\t} -\t\telse if (PyArray_IsScalar(obj, CLongDouble)) { -\t\t\tnpy_clongdouble new; -\t\t\tPyArray_ScalarAsCtype(obj, &new); -\t\t\t(*v).r = (double)new.real; -\t\t\t(*v).i = (double)new.imag; -\t\t} -\t\telse { /* if (PyArray_IsScalar(obj, CDouble)) */ -\t\t\tPyArray_ScalarAsCtype(obj, v); -\t\t} -\t\treturn 1; -\t} -\tif (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ -\t\tPyObject *arr; -\t\tif (PyArray_Check(obj)) { -\t\t\tarr = PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE); -\t\t} -\t\telse { -\t\t\tarr = PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE)); -\t\t} -\t\tif (arr==NULL) return 0; -\t\t(*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real; -\t\t(*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag; -\t\treturn 1; -\t} -\t/* Python does not provide PyNumber_Complex function :-( */ -\t(*v).i=0.0; -\tif (PyFloat_Check(obj)) { + Py_complex c; + if (PyComplex_Check(obj)) { + c=PyComplex_AsCComplex(obj); + (*v).r=c.real, (*v).i=c.imag; + return 1; + } + if (PyArray_IsScalar(obj, ComplexFloating)) { + if (PyArray_IsScalar(obj, CFloat)) { + npy_cfloat new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)new.real; + (*v).i = (double)new.imag; + } + else if (PyArray_IsScalar(obj, CLongDouble)) { + npy_clongdouble new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)new.real; + (*v).i = (double)new.imag; + } + else { /* if (PyArray_IsScalar(obj, CDouble)) */ + PyArray_ScalarAsCtype(obj, v); + } + return 1; + } + if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ + PyObject *arr; + if (PyArray_Check(obj)) { + arr = PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE); + } + else { + arr = PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE)); + } + if (arr==NULL) return 0; + (*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real; + (*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag; + return 1; + } + /* Python does not provide PyNumber_Complex function :-( */ + (*v).i=0.0; + if (PyFloat_Check(obj)) { #ifdef __sgi -\t\t(*v).r = PyFloat_AsDouble(obj); + (*v).r = PyFloat_AsDouble(obj); #else -\t\t(*v).r = PyFloat_AS_DOUBLE(obj); + (*v).r = PyFloat_AS_DOUBLE(obj); #endif -\t\treturn 1; -\t} -\tif (PyInt_Check(obj)) { -\t\t(*v).r = (double)PyInt_AS_LONG(obj); -\t\treturn 1; -\t} -\tif (PyLong_Check(obj)) { -\t\t(*v).r = PyLong_AsDouble(obj); -\t\treturn (!PyErr_Occurred()); -\t} -\tif (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) { -\t\tPyObject *tmp = PySequence_GetItem(obj,0); -\t\tif (tmp) { -\t\t\tif (complex_double_from_pyobj(v,tmp,errmess)) { -\t\t\t\tPy_DECREF(tmp); -\t\t\t\treturn 1; -\t\t\t} -\t\t\tPy_DECREF(tmp); -\t\t} -\t} -\t{ -\t\tPyObject* err = PyErr_Occurred(); -\t\tif (err==NULL) -\t\t\terr = PyExc_TypeError; -\t\tPyErr_SetString(err,errmess); -\t} -\treturn 0; + return 1; + } + if (PyInt_Check(obj)) { + (*v).r = (double)PyInt_AS_LONG(obj); + return 1; + } + if (PyLong_Check(obj)) { + (*v).r = PyLong_AsDouble(obj); + return (!PyErr_Occurred()); + } + if (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) { + PyObject *tmp = PySequence_GetItem(obj,0); + if (tmp) { + if (complex_double_from_pyobj(v,tmp,errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) + err = PyExc_TypeError; + PyErr_SetString(err,errmess); + } + return 0; } """ needs['complex_float_from_pyobj'] = [ 'complex_float', 'complex_double_from_pyobj'] cfuncs['complex_float_from_pyobj'] = """\ static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) { -\tcomplex_double cd={0.0,0.0}; -\tif (complex_double_from_pyobj(&cd,obj,errmess)) { -\t\t(*v).r = (float)cd.r; -\t\t(*v).i = (float)cd.i; -\t\treturn 1; -\t} -\treturn 0; + complex_double cd={0.0,0.0}; + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (float)cd.r; + (*v).i = (float)cd.i; + return 1; + } + return 0; } """ needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE'] cfuncs[ - 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n\tTRYPYARRAYTEMPLATE(char,\'c\');\n}\n' + 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n' needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char'] cfuncs[ - 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n\tTRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' + 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char'] cfuncs[ - 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n\tTRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' + 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE'] cfuncs[ - 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n\tTRYPYARRAYTEMPLATE(short,\'s\');\n}\n' + 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n' needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE'] cfuncs[ - 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n\tTRYPYARRAYTEMPLATE(int,\'i\');\n}\n' + 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n' needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE'] cfuncs[ - 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n\tTRYPYARRAYTEMPLATE(long,\'l\');\n}\n' + 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n' needs['try_pyarr_from_long_long'] = [ 'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long'] cfuncs[ - 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n\tTRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' + 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE'] cfuncs[ - 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n\tTRYPYARRAYTEMPLATE(float,\'f\');\n}\n' + 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n' needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE'] cfuncs[ - 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n\tTRYPYARRAYTEMPLATE(double,\'d\');\n}\n' + 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n' needs['try_pyarr_from_complex_float'] = [ 'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] cfuncs[ - 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' + 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' needs['try_pyarr_from_complex_double'] = [ 'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] cfuncs[ - 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' + 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] cfuncs['create_cb_arglist'] = """\ static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) { -\tPyObject *tmp = NULL; -\tPyObject *tmp_fun = NULL; -\tint tot,opt,ext,siz,i,di=0; -\tCFUNCSMESS(\"create_cb_arglist\\n\"); -\ttot=opt=ext=siz=0; -\t/* Get the total number of arguments */ -\tif (PyFunction_Check(fun)) -\t\ttmp_fun = fun; -\telse { -\t\tdi = 1; -\t\tif (PyObject_HasAttrString(fun,\"im_func\")) { -\t\t\ttmp_fun = PyObject_GetAttrString(fun,\"im_func\"); -\t\t} -\t\telse if (PyObject_HasAttrString(fun,\"__call__\")) { -\t\t\ttmp = PyObject_GetAttrString(fun,\"__call__\"); -\t\t\tif (PyObject_HasAttrString(tmp,\"im_func\")) -\t\t\t\ttmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); -\t\t\telse { -\t\t\t\ttmp_fun = fun; /* built-in function */ -\t\t\t\ttot = maxnofargs; -\t\t\t\tif (xa != NULL) -\t\t\t\t\ttot += PyTuple_Size((PyObject *)xa); -\t\t\t} -\t\t\tPy_XDECREF(tmp); -\t\t} -\t\telse if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { -\t\t\ttot = maxnofargs; -\t\t\tif (xa != NULL) -\t\t\t\ttot += PyTuple_Size((PyObject *)xa); -\t\t\ttmp_fun = fun; -\t\t} -\t\telse if (F2PyCapsule_Check(fun)) { -\t\t\ttot = maxnofargs; -\t\t\tif (xa != NULL) -\t\t\t\text = PyTuple_Size((PyObject *)xa); -\t\t\tif(ext>0) { -\t\t\t\tfprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\"); -\t\t\t\tgoto capi_fail; -\t\t\t} -\t\t\ttmp_fun = fun; -\t\t} -\t} + PyObject *tmp = NULL; + PyObject *tmp_fun = NULL; + int tot,opt,ext,siz,i,di=0; + CFUNCSMESS(\"create_cb_arglist\\n\"); + tot=opt=ext=siz=0; + /* Get the total number of arguments */ + if (PyFunction_Check(fun)) + tmp_fun = fun; + else { + di = 1; + if (PyObject_HasAttrString(fun,\"im_func\")) { + tmp_fun = PyObject_GetAttrString(fun,\"im_func\"); + } + else if (PyObject_HasAttrString(fun,\"__call__\")) { + tmp = PyObject_GetAttrString(fun,\"__call__\"); + if (PyObject_HasAttrString(tmp,\"im_func\")) + tmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); + else { + tmp_fun = fun; /* built-in function */ + tot = maxnofargs; + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + } + Py_XDECREF(tmp); + } + else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { + tot = maxnofargs; + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + tmp_fun = fun; + } + else if (F2PyCapsule_Check(fun)) { + tot = maxnofargs; + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + if(ext>0) { + fprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\"); + goto capi_fail; + } + tmp_fun = fun; + } + } if (tmp_fun==NULL) { fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":Py_TYPE(fun)->tp_name)); goto capi_fail; } #if PY_VERSION_HEX >= 0x03000000 -\tif (PyObject_HasAttrString(tmp_fun,\"__code__\")) { -\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) + if (PyObject_HasAttrString(tmp_fun,\"__code__\")) { + if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) #else -\tif (PyObject_HasAttrString(tmp_fun,\"func_code\")) { -\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) + if (PyObject_HasAttrString(tmp_fun,\"func_code\")) { + if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) #endif -\t\t\ttot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di; -\t\tPy_XDECREF(tmp); -\t} -\t/* Get the number of optional arguments */ + tot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di; + Py_XDECREF(tmp); + } + /* Get the number of optional arguments */ #if PY_VERSION_HEX >= 0x03000000 -\tif (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) { -\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) + if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) { + if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) #else -\tif (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) { -\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\"))) + if (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) { + if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\"))) #endif -\t\t\topt = PyTuple_Size(tmp); -\t\tPy_XDECREF(tmp); -\t} -\t/* Get the number of extra arguments */ -\tif (xa != NULL) -\t\text = PyTuple_Size((PyObject *)xa); -\t/* Calculate the size of call-backs argument list */ -\tsiz = MIN(maxnofargs+ext,tot); -\t*nofargs = MAX(0,siz-ext); + opt = PyTuple_Size(tmp); + Py_XDECREF(tmp); + } + /* Get the number of extra arguments */ + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + /* Calculate the size of call-backs argument list */ + siz = MIN(maxnofargs+ext,tot); + *nofargs = MAX(0,siz-ext); #ifdef DEBUGCFUNCS -\tfprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs); + fprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs); #endif -\tif (siz 0 and line: - if line[0] != '!' and line.strip(): - n -= 1 - if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&': - result = 1 - break + with open(file, 'r') as f: line = f.readline() - f.close() + n = 15 # the number of non-comment lines to scan for hints + if _has_f_header(line): + n = 0 + elif _has_f90_header(line): + n = 0 + result = 1 + while n > 0 and line: + if line[0] != '!' and line.strip(): + n -= 1 + if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&': + result = 1 + break + line = f.readline() return result @@ -1036,13 +1035,13 @@ try: del groupcache[groupcounter]['vars'][name][ groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] - except: + except Exception: pass if block in ['function', 'subroutine']: # set global attributes try: groupcache[groupcounter]['vars'][name] = appenddecl( groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars']['']) - except: + except Exception: pass if case == 'callfun': # return type if result and result in groupcache[groupcounter]['vars']: @@ -1052,7 +1051,7 @@ # if groupcounter>1: # name is interfaced try: groupcache[groupcounter - 2]['interfaced'].append(name) - except: + except Exception: pass if block == 'function': t = typespattern[0].match(m.group('before') + ' ' + name) @@ -1174,7 +1173,7 @@ for e in markoutercomma(ll).split('@,@'): try: k, initexpr = [x.strip() for x in e.split('=')] - except: + except Exception: outmess( 'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll)) continue @@ -1251,7 +1250,7 @@ if '-' in r: try: begc, endc = [x.strip() for x in r.split('-')] - except: + except Exception: outmess( 'analyzeline: expected "-" instead of "%s" in range list of implicit statement\n' % r) continue @@ -1790,7 +1789,7 @@ try: filepositiontext = 'In: %s:%s\n' % (block['from'], block['name']) - except: + except Exception: pass @@ -2108,7 +2107,7 @@ try: c = int(myeval(e, {}, {})) return 0, c, None - except: + except Exception: pass if getlincoef_re_1.match(e): return 1, 0, e @@ -2150,7 +2149,7 @@ c2 = myeval(ee, {}, {}) if (a * 0.5 + b == c and a * 1.5 + b == c2): return a, b, x - except: + except Exception: pass break return None, None, None @@ -2162,11 +2161,11 @@ edl = [] try: edl.append(myeval(dl[0], {}, {})) - except: + except Exception: edl.append(dl[0]) try: edl.append(myeval(dl[1], {}, {})) - except: + except Exception: edl.append(dl[1]) if isinstance(edl[0], int): p1 = 1 - edl[0] @@ -2186,7 +2185,7 @@ d = '%s-(%s)+1' % (dl[1], dl[0]) try: return repr(myeval(d, {}, {})), None, None - except: + except Exception: pass d1, d2 = getlincoef(dl[0], args), getlincoef(dl[1], args) if None not in [d1[0], d2[0]]: @@ -2579,7 +2578,7 @@ l = vars[n]['charselector']['len'] try: l = str(eval(l, {}, params)) - except: + except Exception: pass vars[n]['charselector']['len'] = l @@ -2588,7 +2587,7 @@ l = vars[n]['kindselector']['kind'] try: l = str(eval(l, {}, params)) - except: + except Exception: pass vars[n]['kindselector']['kind'] = l @@ -2819,7 +2818,7 @@ try: kindselect['kind'] = eval( kindselect['kind'], {}, params) - except: + except Exception: pass vars[n]['kindselector'] = kindselect if charselect: @@ -3230,7 +3229,7 @@ try: v = eval(v) v = '(%s,%s)' % (v.real, v.imag) - except: + except Exception: pass vardef = '%s :: %s=%s' % (vardef, a, v) else: @@ -3335,8 +3334,7 @@ if pyffilename: outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0) pyf = crack2fortran(postlist) - f = open(pyffilename, 'w') - f.write(pyf) - f.close() + with open(pyffilename, 'w') as f: + f.write(pyf) if showblocklist: show(postlist) diff -Nru python-numpy-1.13.3/numpy/f2py/f2py_testing.py python-numpy-1.14.5/numpy/f2py/f2py_testing.py --- python-numpy-1.13.3/numpy/f2py/f2py_testing.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/f2py/f2py_testing.py 2018-06-12 17:31:56.000000000 +0000 @@ -3,7 +3,7 @@ import sys import re -from numpy.testing.utils import jiffies, memusage +from numpy.testing import jiffies, memusage def cmdline(): diff -Nru python-numpy-1.13.3/numpy/f2py/__init__.py python-numpy-1.14.5/numpy/f2py/__init__.py --- python-numpy-1.13.3/numpy/f2py/__init__.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/f2py/__init__.py 2018-06-12 18:28:52.000000000 +0000 @@ -69,6 +69,6 @@ f.close() return status -from numpy.testing.nosetester import _numpy_tester +from numpy.testing import _numpy_tester test = _numpy_tester().test bench = _numpy_tester().bench diff -Nru python-numpy-1.13.3/numpy/f2py/rules.py python-numpy-1.14.5/numpy/f2py/rules.py --- python-numpy-1.13.3/numpy/f2py/rules.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/f2py/rules.py 2018-06-12 17:31:56.000000000 +0000 @@ -107,16 +107,14 @@ #################### Rules for C/API module ################# +generationtime = int(os.environ.get('SOURCE_DATE_EPOCH', time.time())) module_rules = { 'modulebody': """\ /* File: #modulename#module.c * This file is auto-generated with f2py (version:#f2py_version#). * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, * written by Pearu Peterson . - * See http://cens.ioc.ee/projects/f2py2e/ - * Generation date: """ + time.asctime(time.localtime(time.time())) + """ - * $R""" + """evision:$ - * $D""" + """ate:$ + * Generation date: """ + time.asctime(time.gmtime(generationtime)) + """ * Do not edit this file directly unless you know what you are doing!!! */ diff -Nru python-numpy-1.13.3/numpy/f2py/src/fortranobject.c python-numpy-1.14.5/numpy/f2py/src/fortranobject.c --- python-numpy-1.13.3/numpy/f2py/src/fortranobject.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/f2py/src/fortranobject.c 2018-06-12 18:28:52.000000000 +0000 @@ -590,21 +590,21 @@ * $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $ */ +static int check_and_fix_dimensions(const PyArrayObject* arr, + const int rank, + npy_intp *dims); + static int -count_nonpos(const int rank, - const npy_intp *dims) { +count_negative_dimensions(const int rank, + const npy_intp *dims) { int i=0,r=0; while (i 0) { int i; strcpy(mess, "failed to create intent(cache|hide)|optional array" "-- must have defined dimensions but got ("); @@ -719,8 +719,8 @@ /* intent(cache) */ if (PyArray_ISONESEGMENT(arr) && PyArray_ITEMSIZE(arr)>=elsize) { - if (check_and_fix_dimensions(arr,rank,dims)) { - return NULL; /*XXX: set exception */ + if (check_and_fix_dimensions(arr, rank, dims)) { + return NULL; } if (intent & F2PY_INTENT_OUT) Py_INCREF(arr); @@ -741,8 +741,8 @@ /* here we have always intent(in) or intent(inout) or intent(inplace) */ - if (check_and_fix_dimensions(arr,rank,dims)) { - return NULL; /*XXX: set exception */ + if (check_and_fix_dimensions(arr, rank, dims)) { + return NULL; } /* printf("intent alignement=%d\n", F2PY_GET_ALIGNMENT(intent)); @@ -842,8 +842,9 @@ | NPY_ARRAY_FORCECAST, NULL); if (arr==NULL) return NULL; - if (check_and_fix_dimensions(arr,rank,dims)) - return NULL; /*XXX: set exception */ + if (check_and_fix_dimensions(arr, rank, dims)) { + return NULL; + } return arr; } @@ -854,11 +855,16 @@ /*****************************************/ static -int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *dims) { +int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp *dims) +{ /* - This function fills in blanks (that are -1\'s) in dims list using + This function fills in blanks (that are -1's) in dims list using the dimensions from arr. It also checks that non-blank dims will match with the corresponding values in arr dimensions. + + Returns 0 if the function is successful. + + If an error condition is detected, an exception is set and 1 is returned. */ const npy_intp arr_size = (PyArray_NDIM(arr))?PyArray_Size((PyObject *)arr):1; #ifdef DEBUG_COPY_ND_ARRAY @@ -876,9 +882,10 @@ d = PyArray_DIM(arr,i); if (dims[i] >= 0) { if (d>1 && dims[i]!=d) { - fprintf(stderr,"%d-th dimension must be fixed to %" NPY_INTP_FMT - " but got %" NPY_INTP_FMT "\n", - i,dims[i], d); + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n", + i, dims[i], d); return 1; } if (!dims[i]) dims[i] = 1; @@ -889,9 +896,10 @@ } for(i=PyArray_NDIM(arr);i1) { - fprintf(stderr,"%d-th dimension must be %" NPY_INTP_FMT - " but got 0 (not defined).\n", - i,dims[i]); + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be %" NPY_INTP_FMT + " but got 0 (not defined).\n", + i, dims[i]); return 1; } else if (free_axe<0) free_axe = i; @@ -902,9 +910,11 @@ new_size *= dims[free_axe]; } if (new_size != arr_size) { - fprintf(stderr,"unexpected array size: new_size=%" NPY_INTP_FMT - ", got array with arr_size=%" NPY_INTP_FMT " (maybe too many free" - " indices)\n", new_size,arr_size); + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT + " (maybe too many free indices)\n", + new_size, arr_size); return 1; } } else if (rank==PyArray_NDIM(arr)) { @@ -915,9 +925,10 @@ d = PyArray_DIM(arr,i); if (dims[i]>=0) { if (d > 1 && d!=dims[i]) { - fprintf(stderr,"%d-th dimension must be fixed to %" NPY_INTP_FMT - " but got %" NPY_INTP_FMT "\n", - i,dims[i],d); + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n", + i, dims[i], d); return 1; } if (!dims[i]) dims[i] = 1; @@ -925,8 +936,10 @@ new_size *= dims[i]; } if (new_size != arr_size) { - fprintf(stderr,"unexpected array size: new_size=%" NPY_INTP_FMT - ", got array with arr_size=%" NPY_INTP_FMT "\n", new_size,arr_size); + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT "\n", + new_size, arr_size); return 1; } } else { /* [[1,2]] -> [[1],[2]] */ @@ -938,8 +951,10 @@ if (PyArray_DIM(arr,i)>1) ++effrank; if (dims[rank-1]>=0) if (effrank>rank) { - fprintf(stderr,"too many axes: %d (effrank=%d), expected rank=%d\n", - PyArray_NDIM(arr),effrank,rank); + PyErr_Format(PyExc_ValueError, + "too many axes: %d (effrank=%d), " + "expected rank=%d\n", + PyArray_NDIM(arr), effrank, rank); return 1; } @@ -949,9 +964,11 @@ else d = PyArray_DIM(arr,j++); if (dims[i]>=0) { if (d>1 && d!=dims[i]) { - fprintf(stderr,"%d-th dimension must be fixed to %" NPY_INTP_FMT - " but got %" NPY_INTP_FMT " (real index=%d)\n", - i,dims[i],d,j-1); + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT + " (real index=%d)\n", + i, dims[i], d, j-1); return 1; } if (!dims[i]) dims[i] = 1; @@ -967,13 +984,28 @@ } for (i=0,size=1;i= 3: @@ -19,13 +21,13 @@ return np.sum(x*np.exp(phase), axis=1) -class TestFFTShift(TestCase): +class TestFFTShift(object): def test_fft_n(self): - self.assertRaises(ValueError, np.fft.fft, [1, 2, 3], 0) + assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0) -class TestFFT1D(TestCase): +class TestFFT1D(object): def test_fft(self): x = random(30) + 1j*random(30) @@ -145,7 +147,7 @@ assert_array_almost_equal(x_norm, np.linalg.norm(tmp)) -class TestFFTThreadSafe(TestCase): +class TestFFTThreadSafe(object): threads = 16 input_shape = (800, 200) diff -Nru python-numpy-1.13.3/numpy/fft/tests/test_helper.py python-numpy-1.14.5/numpy/fft/tests/test_helper.py --- python-numpy-1.13.3/numpy/fft/tests/test_helper.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/fft/tests/test_helper.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,13 +6,15 @@ from __future__ import division, absolute_import, print_function import numpy as np -from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal +from numpy.testing import ( + run_module_suite, assert_array_almost_equal, assert_equal, + ) from numpy import fft from numpy import pi from numpy.fft.helper import _FFTCache -class TestFFTShift(TestCase): +class TestFFTShift(object): def test_definition(self): x = [0, 1, 2, 3, 4, -4, -3, -2, -1] @@ -40,7 +42,7 @@ fft.ifftshift(shifted, axes=(0,))) -class TestFFTFreq(TestCase): +class TestFFTFreq(object): def test_definition(self): x = [0, 1, 2, 3, 4, -4, -3, -2, -1] @@ -51,7 +53,7 @@ assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x) -class TestRFFTFreq(TestCase): +class TestRFFTFreq(object): def test_definition(self): x = [0, 1, 2, 3, 4] @@ -62,7 +64,7 @@ assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x) -class TestIRFFTN(TestCase): +class TestIRFFTN(object): def test_not_last_axis_success(self): ar, ai = np.random.random((2, 16, 8, 32)) @@ -74,7 +76,7 @@ fft.irfftn(a, axes=axes) -class TestFFTCache(TestCase): +class TestFFTCache(object): def test_basic_behaviour(self): c = _FFTCache(max_size_in_mb=1, max_item_count=4) @@ -90,7 +92,7 @@ np.zeros(2, dtype=np.float32)) # Nothing should be left. - self.assertEqual(len(c._dict), 0) + assert_equal(len(c._dict), 0) # Now put everything in twice so it can be retrieved once and each will # still have one item left. @@ -101,7 +103,7 @@ np.ones(2, dtype=np.float32)) assert_array_almost_equal(c.pop_twiddle_factors(2), np.zeros(2, dtype=np.float32)) - self.assertEqual(len(c._dict), 2) + assert_equal(len(c._dict), 2) def test_automatic_pruning(self): # That's around 2600 single precision samples. @@ -109,27 +111,27 @@ c.put_twiddle_factors(1, np.ones(200, dtype=np.float32)) c.put_twiddle_factors(2, np.ones(200, dtype=np.float32)) - self.assertEqual(list(c._dict.keys()), [1, 2]) + assert_equal(list(c._dict.keys()), [1, 2]) # This is larger than the limit but should still be kept. c.put_twiddle_factors(3, np.ones(3000, dtype=np.float32)) - self.assertEqual(list(c._dict.keys()), [1, 2, 3]) + assert_equal(list(c._dict.keys()), [1, 2, 3]) # Add one more. c.put_twiddle_factors(4, np.ones(3000, dtype=np.float32)) # The other three should no longer exist. - self.assertEqual(list(c._dict.keys()), [4]) + assert_equal(list(c._dict.keys()), [4]) # Now test the max item count pruning. c = _FFTCache(max_size_in_mb=0.01, max_item_count=2) c.put_twiddle_factors(2, np.empty(2)) c.put_twiddle_factors(1, np.empty(2)) # Can still be accessed. - self.assertEqual(list(c._dict.keys()), [2, 1]) + assert_equal(list(c._dict.keys()), [2, 1]) c.put_twiddle_factors(3, np.empty(2)) # 1 and 3 can still be accessed - c[2] has been touched least recently # and is thus evicted. - self.assertEqual(list(c._dict.keys()), [1, 3]) + assert_equal(list(c._dict.keys()), [1, 3]) # One last test. We will add a single large item that is slightly # bigger then the cache size. Some small items can still be added. @@ -138,18 +140,18 @@ c.put_twiddle_factors(2, np.ones(2, dtype=np.float32)) c.put_twiddle_factors(3, np.ones(2, dtype=np.float32)) c.put_twiddle_factors(4, np.ones(2, dtype=np.float32)) - self.assertEqual(list(c._dict.keys()), [1, 2, 3, 4]) + assert_equal(list(c._dict.keys()), [1, 2, 3, 4]) # One more big item. This time it is 6 smaller ones but they are # counted as one big item. for _ in range(6): c.put_twiddle_factors(5, np.ones(500, dtype=np.float32)) # '1' no longer in the cache. Rest still in the cache. - self.assertEqual(list(c._dict.keys()), [2, 3, 4, 5]) + assert_equal(list(c._dict.keys()), [2, 3, 4, 5]) # Another big item - should now be the only item in the cache. c.put_twiddle_factors(6, np.ones(4000, dtype=np.float32)) - self.assertEqual(list(c._dict.keys()), [6]) + assert_equal(list(c._dict.keys()), [6]) if __name__ == "__main__": diff -Nru python-numpy-1.13.3/numpy/_globals.py python-numpy-1.14.5/numpy/_globals.py --- python-numpy-1.13.3/numpy/_globals.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/_globals.py 2018-06-12 18:28:52.000000000 +0000 @@ -53,7 +53,7 @@ pass -class _NoValue: +class _NoValue(object): """Special keyword value. This class may be used as the default value assigned to a deprecated diff -Nru python-numpy-1.13.3/numpy/_import_tools.py python-numpy-1.14.5/numpy/_import_tools.py --- python-numpy-1.13.3/numpy/_import_tools.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/_import_tools.py 2018-06-12 17:31:56.000000000 +0000 @@ -303,8 +303,7 @@ lines.append(line) line = tab line += ' ' + word - else: - lines.append(line) + lines.append(line) return '\n'.join(lines) def get_pkgdocs(self): diff -Nru python-numpy-1.13.3/numpy/__init__.py python-numpy-1.14.5/numpy/__init__.py --- python-numpy-1.13.3/numpy/__init__.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/__init__.py 2018-06-12 18:28:52.000000000 +0000 @@ -148,9 +148,9 @@ # We don't actually use this ourselves anymore, but I'm not 100% sure that # no-one else in the world is using it (though I hope not) - from .testing import Tester - test = testing.nosetester._numpy_tester().test - bench = testing.nosetester._numpy_tester().bench + from .testing import Tester, _numpy_tester + test = _numpy_tester().test + bench = _numpy_tester().bench # Allow distributors to run custom init code from . import _distributor_init @@ -197,3 +197,27 @@ # but do not use them, we define them here for backward compatibility. oldnumeric = 'removed' numarray = 'removed' + + def _sanity_check(): + """ + Quick sanity checks for common bugs caused by environment. + There are some cases (e.g., the wrong BLAS ABI) that cause wrong + results under specific runtime conditions that are not necessarily + achieved during test suite runs, and it is useful to catch those early. + + See https://github.com/numpy/numpy/issues/8577 and other + similar bug reports. + + """ + try: + x = ones(2, dtype=float32) + if not abs(x.dot(x) - 2.0) < 1e-5: + raise AssertionError() + except AssertionError: + msg = ("The current Numpy installation ({!r}) fails to " + "pass simple sanity checks. This can be caused for example " + "by incorrect BLAS library being linked in.") + raise RuntimeError(msg.format(__file__)) + + _sanity_check() + del _sanity_check diff -Nru python-numpy-1.13.3/numpy/lib/arraypad.py python-numpy-1.14.5/numpy/lib/arraypad.py --- python-numpy-1.13.3/numpy/lib/arraypad.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/arraypad.py 2018-06-12 18:28:52.000000000 +0000 @@ -1208,7 +1208,7 @@ length to the vector argument with padded values replaced. It has the following signature:: - padding_func(vector, iaxis_pad_width, iaxis, **kwargs) + padding_func(vector, iaxis_pad_width, iaxis, kwargs) where @@ -1222,32 +1222,32 @@ the end of vector. iaxis : int The axis currently being calculated. - kwargs : misc + kwargs : dict Any keyword arguments the function requires. Examples -------- >>> a = [1, 2, 3, 4, 5] - >>> np.lib.pad(a, (2,3), 'constant', constant_values=(4, 6)) + >>> np.pad(a, (2,3), 'constant', constant_values=(4, 6)) array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6]) - >>> np.lib.pad(a, (2, 3), 'edge') + >>> np.pad(a, (2, 3), 'edge') array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5]) - >>> np.lib.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) + >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) - >>> np.lib.pad(a, (2,), 'maximum') + >>> np.pad(a, (2,), 'maximum') array([5, 5, 1, 2, 3, 4, 5, 5, 5]) - >>> np.lib.pad(a, (2,), 'mean') + >>> np.pad(a, (2,), 'mean') array([3, 3, 1, 2, 3, 4, 5, 3, 3]) - >>> np.lib.pad(a, (2,), 'median') + >>> np.pad(a, (2,), 'median') array([3, 3, 1, 2, 3, 4, 5, 3, 3]) >>> a = [[1, 2], [3, 4]] - >>> np.lib.pad(a, ((3, 2), (2, 3)), 'minimum') + >>> np.pad(a, ((3, 2), (2, 3)), 'minimum') array([[1, 1, 1, 2, 1, 1, 1], [1, 1, 1, 2, 1, 1, 1], [1, 1, 1, 2, 1, 1, 1], @@ -1257,36 +1257,42 @@ [1, 1, 1, 2, 1, 1, 1]]) >>> a = [1, 2, 3, 4, 5] - >>> np.lib.pad(a, (2, 3), 'reflect') + >>> np.pad(a, (2, 3), 'reflect') array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) - >>> np.lib.pad(a, (2, 3), 'reflect', reflect_type='odd') + >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd') array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) - >>> np.lib.pad(a, (2, 3), 'symmetric') + >>> np.pad(a, (2, 3), 'symmetric') array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) - >>> np.lib.pad(a, (2, 3), 'symmetric', reflect_type='odd') + >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd') array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) - >>> np.lib.pad(a, (2, 3), 'wrap') + >>> np.pad(a, (2, 3), 'wrap') array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) - >>> def padwithtens(vector, pad_width, iaxis, kwargs): - ... vector[:pad_width[0]] = 10 - ... vector[-pad_width[1]:] = 10 + >>> def pad_with(vector, pad_width, iaxis, kwargs): + ... pad_value = kwargs.get('padder', 10) + ... vector[:pad_width[0]] = pad_value + ... vector[-pad_width[1]:] = pad_value ... return vector - >>> a = np.arange(6) >>> a = a.reshape((2, 3)) - - >>> np.lib.pad(a, 2, padwithtens) + >>> np.pad(a, 2, pad_with) array([[10, 10, 10, 10, 10, 10, 10], [10, 10, 10, 10, 10, 10, 10], [10, 10, 0, 1, 2, 10, 10], [10, 10, 3, 4, 5, 10, 10], [10, 10, 10, 10, 10, 10, 10], [10, 10, 10, 10, 10, 10, 10]]) + >>> np.pad(a, 2, pad_with, padder=100) + array([[100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 0, 1, 2, 100, 100], + [100, 100, 3, 4, 5, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100]]) """ if not np.asarray(pad_width).dtype.kind == 'i': raise TypeError('`pad_width` must be of integral type.') diff -Nru python-numpy-1.13.3/numpy/lib/arraysetops.py python-numpy-1.14.5/numpy/lib/arraysetops.py --- python-numpy-1.13.3/numpy/lib/arraysetops.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/arraysetops.py 2018-06-12 18:28:52.000000000 +0000 @@ -135,16 +135,18 @@ return_counts : bool, optional If True, also return the number of times each unique item appears in `ar`. + .. versionadded:: 1.9.0 - axis : int or None, optional - The axis to operate on. If None, `ar` will be flattened beforehand. - Otherwise, duplicate items will be removed along the provided axis, - with all the other axes belonging to the each of the unique elements. - Object arrays or structured arrays that contain objects are not - supported if the `axis` kwarg is used. - .. versionadded:: 1.13.0 + axis : int or None, optional + The axis to operate on. If None, `ar` will be flattened. If an integer, + the subarrays indexed by the given axis will be flattened and treated + as the elements of a 1-D array with the dimension of the given axis, + see the notes for more details. Object arrays or structured arrays + that contain objects are not supported if the `axis` kwarg is used. The + default is None. + .. versionadded:: 1.13.0 Returns ------- @@ -166,6 +168,17 @@ numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. + Notes + ----- + When an axis is specified the subarrays indexed by the axis are sorted. + This is done by making the specified axis the first dimension of the array + and then flattening the subarrays in C order. The flattened subarrays are + then viewed as a structured type with each element given a label, with the + effect that we end up with a 1-D array of structured types that can be + treated in the same way as any other 1-D array. The result is that the + flattened subarrays are sorted in lexicographic order starting with the + first element. + Examples -------- >>> np.unique([1, 1, 2, 2, 3, 3]) @@ -217,14 +230,7 @@ ar = ar.reshape(orig_shape[0], -1) ar = np.ascontiguousarray(ar) - if ar.dtype.char in (np.typecodes['AllInteger'] + - np.typecodes['Datetime'] + 'S'): - # Optimization: Creating a view of your data with a np.void data type of - # size the number of bytes in a full row. Handles any type where items - # have a unique binary representation, i.e. 0 is only 0, not +0 and -0. - dtype = np.dtype((np.void, ar.dtype.itemsize * ar.shape[1])) - else: - dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])] + dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])] try: consolidated = ar.view(dtype) @@ -263,9 +269,9 @@ else: ret = (ar,) if return_index: - ret += (np.empty(0, np.bool),) + ret += (np.empty(0, np.intp),) if return_inverse: - ret += (np.empty(0, np.bool),) + ret += (np.empty(0, np.intp),) if return_counts: ret += (np.empty(0, np.intp),) return ret @@ -375,11 +381,8 @@ return aux aux.sort() -# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 flag = np.concatenate(([True], aux[1:] != aux[:-1], [True])) -# flag2 = ediff1d( flag ) == 0 - flag2 = flag[1:] == flag[:-1] - return aux[flag2] + return aux[flag[1:] & flag[:-1]] def in1d(ar1, ar2, assume_unique=False, invert=False): @@ -438,12 +441,12 @@ >>> states = [0, 2] >>> mask = np.in1d(test, states) >>> mask - array([ True, False, True, False, True], dtype=bool) + array([ True, False, True, False, True]) >>> test[mask] array([0, 2, 0]) >>> mask = np.in1d(test, states, invert=True) >>> mask - array([False, True, False, True, False], dtype=bool) + array([False, True, False, True, False]) >>> test[mask] array([1, 5]) """ @@ -451,14 +454,20 @@ ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() - # This code is significantly faster when the condition is satisfied. - if len(ar2) < 10 * len(ar1) ** 0.145: + # Check if one of the arrays may contain arbitrary objects + contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject + + # This code is run when + # a) the first condition is true, making the code significantly faster + # b) the second condition is true (i.e. `ar1` or `ar2` may contain + # arbitrary objects), since then sorting is not guaranteed to work + if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: if invert: - mask = np.ones(len(ar1), dtype=np.bool) + mask = np.ones(len(ar1), dtype=bool) for a in ar2: mask &= (ar1 != a) else: - mask = np.zeros(len(ar1), dtype=np.bool) + mask = np.zeros(len(ar1), dtype=bool) for a in ar2: mask |= (ar1 == a) return mask @@ -549,13 +558,13 @@ >>> mask = np.isin(element, test_elements) >>> mask array([[ False, True], - [ True, False]], dtype=bool) + [ True, False]]) >>> element[mask] array([2, 4]) >>> mask = np.isin(element, test_elements, invert=True) >>> mask array([[ True, False], - [ False, True]], dtype=bool) + [ False, True]]) >>> element[mask] array([0, 6]) @@ -565,13 +574,13 @@ >>> test_set = {1, 2, 4, 8} >>> np.isin(element, test_set) array([[ False, False], - [ False, False]], dtype=bool) + [ False, False]]) Casting the set to a list gives the expected result: >>> np.isin(element, list(test_set)) array([[ False, True], - [ True, False]], dtype=bool) + [ True, False]]) """ element = np.asarray(element) return in1d(element, test_elements, assume_unique=assume_unique, @@ -611,7 +620,7 @@ >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) array([1, 2, 3, 4, 6]) """ - return unique(np.concatenate((ar1, ar2))) + return unique(np.concatenate((ar1, ar2), axis=None)) def setdiff1d(ar1, ar2, assume_unique=False): """ diff -Nru python-numpy-1.13.3/numpy/lib/_datasource.py python-numpy-1.14.5/numpy/lib/_datasource.py --- python-numpy-1.13.3/numpy/lib/_datasource.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/_datasource.py 2018-06-12 17:31:56.000000000 +0000 @@ -15,7 +15,7 @@ - URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt' DataSource files can also be compressed or uncompressed. Currently only -gzip and bz2 are supported. +gzip, bz2 and xz are supported. Example:: @@ -38,13 +38,99 @@ import os import sys import shutil +import io _open = open +def _check_mode(mode, encoding, newline): + """Check mode and that encoding and newline are compatible. + + Parameters + ---------- + mode : str + File open mode. + encoding : str + File encoding. + newline : str + Newline for text files. + + """ + if "t" in mode: + if "b" in mode: + raise ValueError("Invalid mode: %r" % (mode,)) + else: + if encoding is not None: + raise ValueError("Argument 'encoding' not supported in binary mode") + if newline is not None: + raise ValueError("Argument 'newline' not supported in binary mode") + + +def _python2_bz2open(fn, mode, encoding, newline): + """Wrapper to open bz2 in text mode. + + Parameters + ---------- + fn : str + File name + mode : {'r', 'w'} + File mode. Note that bz2 Text files are not supported. + encoding : str + Ignored, text bz2 files not supported in Python2. + newline : str + Ignored, text bz2 files not supported in Python2. + """ + import bz2 + + _check_mode(mode, encoding, newline) + + if "t" in mode: + # BZ2File is missing necessary functions for TextIOWrapper + raise ValueError("bz2 text files not supported in python2") + else: + return bz2.BZ2File(fn, mode) + +def _python2_gzipopen(fn, mode, encoding, newline): + """ Wrapper to open gzip in text mode. + + Parameters + ---------- + fn : str, bytes, file + File path or opened file. + mode : str + File mode. The actual files are opened as binary, but will decoded + using the specified `encoding` and `newline`. + encoding : str + Encoding to be used when reading/writing as text. + newline : str + Newline to be used when reading/writing as text. + + """ + import gzip + # gzip is lacking read1 needed for TextIOWrapper + class GzipWrap(gzip.GzipFile): + def read1(self, n): + return self.read(n) + + _check_mode(mode, encoding, newline) + + gz_mode = mode.replace("t", "") + + if isinstance(fn, (str, bytes)): + binary_file = GzipWrap(fn, gz_mode) + elif hasattr(fn, "read") or hasattr(fn, "write"): + binary_file = GzipWrap(None, gz_mode, fileobj=fn) + else: + raise TypeError("filename must be a str or bytes object, or a file") + + if "t" in mode: + return io.TextIOWrapper(binary_file, encoding, newline=newline) + else: + return binary_file + # Using a class instead of a module-level dictionary # to reduce the initial 'import numpy' overhead by -# deferring the import of bz2 and gzip until needed +# deferring the import of lzma, bz2 and gzip until needed # TODO: .zip support, .tar support? class _FileOpeners(object): @@ -55,7 +141,7 @@ supported file format. Attribute lookup is implemented in such a way that an instance of `_FileOpeners` itself can be indexed with the keys of that dictionary. Currently uncompressed files as well as files - compressed with ``gzip`` or ``bz2`` compression are supported. + compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported. Notes ----- @@ -65,7 +151,7 @@ Examples -------- >>> np.lib._datasource._file_openers.keys() - [None, '.bz2', '.gz'] + [None, '.bz2', '.gz', '.xz', '.lzma'] >>> np.lib._datasource._file_openers['.gz'] is gzip.open True @@ -73,21 +159,39 @@ def __init__(self): self._loaded = False - self._file_openers = {None: open} + self._file_openers = {None: io.open} def _load(self): if self._loaded: return + try: import bz2 - self._file_openers[".bz2"] = bz2.BZ2File + if sys.version_info[0] >= 3: + self._file_openers[".bz2"] = bz2.open + else: + self._file_openers[".bz2"] = _python2_bz2open except ImportError: pass + try: import gzip - self._file_openers[".gz"] = gzip.open + if sys.version_info[0] >= 3: + self._file_openers[".gz"] = gzip.open + else: + self._file_openers[".gz"] = _python2_gzipopen except ImportError: pass + + try: + import lzma + self._file_openers[".xz"] = lzma.open + self._file_openers[".lzma"] = lzma.open + except (ImportError, AttributeError): + # There are incompatible backports of lzma that do not have the + # lzma.open attribute, so catch that as well as ImportError. + pass + self._loaded = True def keys(self): @@ -102,7 +206,7 @@ ------- keys : list The keys are None for uncompressed files and the file extension - strings (i.e. ``'.gz'``, ``'.bz2'``) for supported compression + strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression methods. """ @@ -115,7 +219,7 @@ _file_openers = _FileOpeners() -def open(path, mode='r', destpath=os.curdir): +def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None): """ Open `path` with `mode` and return the file object. @@ -134,6 +238,11 @@ Path to the directory where the source file gets downloaded to for use. If `destpath` is None, a temporary directory will be created. The default path is the current directory. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `io.open` uses. + newline : {None, str}, optional + Newline to use when reading text file. Returns ------- @@ -148,7 +257,7 @@ """ ds = DataSource(destpath) - return ds.open(path, mode) + return ds.open(path, mode, encoding=encoding, newline=newline) class DataSource (object): @@ -458,7 +567,7 @@ return False return False - def open(self, path, mode='r'): + def open(self, path, mode='r', encoding=None, newline=None): """ Open and return file-like object. @@ -473,6 +582,11 @@ Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to append. Available modes depend on the type of object specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `io.open` uses. + newline : {None, str}, optional + Newline to use when reading text file. Returns ------- @@ -496,7 +610,8 @@ _fname, ext = self._splitzipext(found) if ext == 'bz2': mode.replace("+", "") - return _file_openers[ext](found, mode=mode) + return _file_openers[ext](found, mode=mode, + encoding=encoding, newline=newline) else: raise IOError("%s not found." % path) @@ -619,7 +734,7 @@ """ return DataSource.exists(self, self._fullpath(path)) - def open(self, path, mode='r'): + def open(self, path, mode='r', encoding=None, newline=None): """ Open and return file-like object prepending Repository base URL. @@ -636,6 +751,11 @@ Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to append. Available modes depend on the type of object specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `io.open` uses. + newline : {None, str}, optional + Newline to use when reading text file. Returns ------- @@ -643,7 +763,8 @@ File object. """ - return DataSource.open(self, self._fullpath(path), mode) + return DataSource.open(self, self._fullpath(path), mode, + encoding=encoding, newline=newline) def listdir(self): """ diff -Nru python-numpy-1.13.3/numpy/lib/financial.py python-numpy-1.14.5/numpy/lib/financial.py --- python-numpy-1.13.3/numpy/lib/financial.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/financial.py 2018-06-12 17:31:56.000000000 +0000 @@ -7,9 +7,13 @@ broadcasting and being able to be called with scalars or arrays (or other sequences). +Functions support the :class:`decimal.Decimal` type unless +otherwise stated. """ from __future__ import division, absolute_import, print_function +from decimal import Decimal + import numpy as np __all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate', @@ -32,7 +36,6 @@ except (KeyError, TypeError): return [_when_to_num[x] for x in when] - def fv(rate, nper, pmt, pv, when='end'): """ Compute the future value. @@ -117,10 +120,8 @@ when = _convert_when(when) (rate, nper, pmt, pv, when) = map(np.asarray, [rate, nper, pmt, pv, when]) temp = (1+rate)**nper - miter = np.broadcast(rate, nper, pmt, pv, when) - zer = np.zeros(miter.shape) - fact = np.where(rate == zer, nper + zer, - (1 + rate*when)*(temp - 1)/rate + zer) + fact = np.where(rate == 0, nper, + (1 + rate*when)*(temp - 1)/rate) return -(pv*temp + pmt*fact) def pmt(rate, nper, pv, fv=0, when='end'): @@ -209,17 +210,18 @@ when = _convert_when(when) (rate, nper, pv, fv, when) = map(np.array, [rate, nper, pv, fv, when]) temp = (1 + rate)**nper - mask = (rate == 0.0) - masked_rate = np.where(mask, 1.0, rate) - z = np.zeros(np.broadcast(masked_rate, nper, pv, fv, when).shape) - fact = np.where(mask != z, nper + z, - (1 + masked_rate*when)*(temp - 1)/masked_rate + z) + mask = (rate == 0) + masked_rate = np.where(mask, 1, rate) + fact = np.where(mask != 0, nper, + (1 + masked_rate*when)*(temp - 1)/masked_rate) return -(fv + pv*temp) / fact def nper(rate, pmt, pv, fv=0, when='end'): """ Compute the number of periodic payments. + :class:`decimal.Decimal` type is not supported. + Parameters ---------- rate : array_like @@ -271,20 +273,18 @@ use_zero_rate = False with np.errstate(divide="raise"): try: - z = pmt*(1.0+rate*when)/rate + z = pmt*(1+rate*when)/rate except FloatingPointError: use_zero_rate = True if use_zero_rate: - return (-fv + pv) / (pmt + 0.0) + return (-fv + pv) / pmt else: - A = -(fv + pv)/(pmt+0.0) - B = np.log((-fv+z) / (pv+z))/np.log(1.0+rate) - miter = np.broadcast(rate, pmt, pv, fv, when) - zer = np.zeros(miter.shape) - return np.where(rate == zer, A + zer, B + zer) + 0.0 + A = -(fv + pv)/(pmt+0) + B = np.log((-fv+z) / (pv+z))/np.log(1+rate) + return np.where(rate == 0, A, B) -def ipmt(rate, per, nper, pv, fv=0.0, when='end'): +def ipmt(rate, per, nper, pv, fv=0, when='end'): """ Compute the interest portion of a payment. @@ -374,7 +374,7 @@ ipmt = _rbl(rate, per, total_pmt, pv, when)*rate try: ipmt = np.where(when == 1, ipmt/(1 + rate), ipmt) - ipmt = np.where(np.logical_and(when == 1, per == 1), 0.0, ipmt) + ipmt = np.where(np.logical_and(when == 1, per == 1), 0, ipmt) except IndexError: pass return ipmt @@ -388,7 +388,7 @@ """ return fv(rate, (per - 1), pmt, pv, when) -def ppmt(rate, per, nper, pv, fv=0.0, when='end'): +def ppmt(rate, per, nper, pv, fv=0, when='end'): """ Compute the payment against loan principal. @@ -416,7 +416,7 @@ total = pmt(rate, nper, pv, fv, when) return total - ipmt(rate, per, nper, pv, fv, when) -def pv(rate, nper, pmt, fv=0.0, when='end'): +def pv(rate, nper, pmt, fv=0, when='end'): """ Compute the present value. @@ -505,9 +505,7 @@ when = _convert_when(when) (rate, nper, pmt, fv, when) = map(np.asarray, [rate, nper, pmt, fv, when]) temp = (1+rate)**nper - miter = np.broadcast(rate, nper, pmt, fv, when) - zer = np.zeros(miter.shape) - fact = np.where(rate == zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer) + fact = np.where(rate == 0, nper, (1+rate*when)*(temp-1)/rate) return -(fv + pmt*fact)/temp # Computed with Sage @@ -529,7 +527,7 @@ # where # g(r) is the formula # g'(r) is the derivative with respect to r. -def rate(nper, pmt, pv, fv, when='end', guess=0.10, tol=1e-6, maxiter=100): +def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100): """ Compute the rate of interest per period. @@ -545,10 +543,10 @@ Future value when : {{'begin', 1}, {'end', 0}}, {string, int}, optional When payments are due ('begin' (1) or 'end' (0)) - guess : float, optional - Starting guess for solving the rate of interest - tol : float, optional - Required tolerance for the solution + guess : Number, optional + Starting guess for solving the rate of interest, default 0.1 + tol : Number, optional + Required tolerance for the solution, default 1e-6 maxiter : int, optional Maximum iterations in finding the solution @@ -573,15 +571,26 @@ """ when = _convert_when(when) + default_type = Decimal if isinstance(pmt, Decimal) else float + + # Handle casting defaults to Decimal if/when pmt is a Decimal and + # guess and/or tol are not given default values + if guess is None: + guess = default_type('0.1') + + if tol is None: + tol = default_type('1e-6') + (nper, pmt, pv, fv, when) = map(np.asarray, [nper, pmt, pv, fv, when]) + rn = guess - iter = 0 + iterator = 0 close = False - while (iter < maxiter) and not close: + while (iterator < maxiter) and not close: rnp1 = rn - _g_div_gp(rn, nper, pmt, pv, fv, when) diff = abs(rnp1-rn) close = np.all(diff < tol) - iter += 1 + iterator += 1 rn = rnp1 if not close: # Return nan's in array of the same shape as rn @@ -597,6 +606,8 @@ that gives a net present value of 0.0; for a more complete explanation, see Notes below. + :class:`decimal.Decimal` type is not supported. + Parameters ---------- values : array_like, shape(N,) @@ -650,6 +661,11 @@ (Compare with the Example given for numpy.lib.financial.npv) """ + # `np.roots` call is why this function does not support Decimal type. + # + # Ultimately Decimal support needs to be added to np.roots, which has + # greater implications on the entire linear algebra module and how it does + # eigenvalue computations. res = np.roots(values[::-1]) mask = (res.imag == 0) & (res.real > 0) if not mask.any(): @@ -657,7 +673,7 @@ res = res[mask].real # NPV(rate) = 0 can have more than one solution so we return # only the solution closest to zero. - rate = 1.0/res - 1 + rate = 1/res - 1 rate = rate.item(np.argmin(np.abs(rate))) return rate @@ -727,12 +743,19 @@ Modified internal rate of return """ - values = np.asarray(values, dtype=np.double) + values = np.asarray(values) n = values.size + + # Without this explicit cast the 1/(n - 1) computation below + # becomes a float, which causes TypeError when using Decimal + # values. + if isinstance(finance_rate, Decimal): + n = Decimal(n) + pos = values > 0 neg = values < 0 if not (pos.any() and neg.any()): return np.nan numer = np.abs(npv(reinvest_rate, values*pos)) denom = np.abs(npv(finance_rate, values*neg)) - return (numer/denom)**(1.0/(n - 1))*(1 + reinvest_rate) - 1 + return (numer/denom)**(1/(n - 1))*(1 + reinvest_rate) - 1 diff -Nru python-numpy-1.13.3/numpy/lib/format.py python-numpy-1.14.5/numpy/lib/format.py --- python-numpy-1.13.3/numpy/lib/format.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/format.py 2018-06-12 18:28:52.000000000 +0000 @@ -100,9 +100,9 @@ The next HEADER_LEN bytes form the header data describing the array's format. It is an ASCII string which contains a Python literal expression of a dictionary. It is terminated by a newline (``\\n``) and padded with -spaces (``\\x20``) to make the total length of -``magic string + 4 + HEADER_LEN`` be evenly divisible by 16 for alignment -purposes. +spaces (``\\x20``) to make the total of +``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible +by 64 for alignment purposes. The dictionary contains three keys: @@ -163,6 +163,7 @@ MAGIC_PREFIX = b'\x93NUMPY' MAGIC_LEN = len(MAGIC_PREFIX) + 2 +ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes # difference between version 1.0 and 2.0 is a 4 byte (I) header length @@ -304,27 +305,33 @@ header.append("'%s': %s, " % (key, repr(value))) header.append("}") header = "".join(header) - # Pad the header with spaces and a final newline such that the magic - # string, the header-length short and the header are aligned on a - # 16-byte boundary. Hopefully, some system, possibly memory-mapping, - # can take advantage of our premature optimization. - current_header_len = MAGIC_LEN + 2 + len(header) + 1 # 1 for the newline - topad = 16 - (current_header_len % 16) - header = header + ' '*topad + '\n' header = asbytes(_filter_header(header)) - hlen = len(header) - if hlen < 256*256 and version in (None, (1, 0)): + hlen = len(header) + 1 # 1 for newline + padlen_v1 = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(' mx: + first_edge, last_edge = [mi + 0.0 for mi in range] + if first_edge > last_edge: raise ValueError( 'max must be larger than min in range parameter.') - if not np.all(np.isfinite([mn, mx])): + if not np.all(np.isfinite([first_edge, last_edge])): raise ValueError( 'range parameter must be finite.') - if mn == mx: - mn -= 0.5 - mx += 0.5 + if first_edge == last_edge: + first_edge -= 0.5 + last_edge += 0.5 + + # density overrides the normed keyword + if density is not None: + normed = False + + # parse the overloaded bins argument + n_equal_bins = None + bin_edges = None if isinstance(bins, basestring): + bin_name = bins # if `bins` is a string for an automatic method, # this will replace it with the number of bins calculated - if bins not in _hist_bin_selectors: - raise ValueError("{0} not a valid estimator for bins".format(bins)) + if bin_name not in _hist_bin_selectors: + raise ValueError( + "{!r} is not a valid estimator for `bins`".format(bin_name)) if weights is not None: raise TypeError("Automated estimation of the number of " "bins is not supported for weighted data") @@ -684,22 +694,47 @@ b = a # Update the reference if the range needs truncation if range is not None: - keep = (a >= mn) - keep &= (a <= mx) + keep = (a >= first_edge) + keep &= (a <= last_edge) if not np.logical_and.reduce(keep): b = a[keep] if b.size == 0: - bins = 1 + n_equal_bins = 1 else: # Do not call selectors on empty arrays - width = _hist_bin_selectors[bins](b) + width = _hist_bin_selectors[bin_name](b) if width: - bins = int(np.ceil((mx - mn) / width)) + n_equal_bins = int(np.ceil((last_edge - first_edge) / width)) else: # Width can be zero for some estimators, e.g. FD when # the IQR of the data is zero. - bins = 1 + n_equal_bins = 1 + + elif np.ndim(bins) == 0: + try: + n_equal_bins = operator.index(bins) + except TypeError: + raise TypeError( + '`bins` must be an integer, a string, or an array') + if n_equal_bins < 1: + raise ValueError('`bins` must be positive, when an integer') + + elif np.ndim(bins) == 1: + bin_edges = np.asarray(bins) + if np.any(bin_edges[:-1] > bin_edges[1:]): + raise ValueError( + '`bins` must increase monotonically, when an array') + + else: + raise ValueError('`bins` must be 1d, when an array') + + del bins + + # compute the bins if only the count was specified + if n_equal_bins is not None: + bin_edges = linspace( + first_edge, last_edge, n_equal_bins + 1, endpoint=True) # Histogram is an integer or a float array depending on the weights. if weights is None: @@ -711,27 +746,24 @@ # computing histograms, to minimize memory usage. BLOCK = 65536 - if not iterable(bins): - if np.isscalar(bins) and bins < 1: - raise ValueError( - '`bins` should be a positive integer.') - # At this point, if the weights are not integer, floating point, or - # complex, we have to use the slow algorithm. - if weights is not None and not (np.can_cast(weights.dtype, np.double) or - np.can_cast(weights.dtype, np.complex)): - bins = linspace(mn, mx, bins + 1, endpoint=True) + # The fast path uses bincount, but that only works for certain types + # of weight + simple_weights = ( + weights is None or + np.can_cast(weights.dtype, np.double) or + np.can_cast(weights.dtype, complex) + ) - if not iterable(bins): + if n_equal_bins is not None and simple_weights: + # Fast algorithm for equal bins # We now convert values of a to bin indices, under the assumption of # equal bin widths (which is valid here). # Initialize empty histogram - n = np.zeros(bins, ntype) - # Pre-compute histogram scaling factor - norm = bins / (mx - mn) + n = np.zeros(n_equal_bins, ntype) - # Compute the bin edges for potential correction. - bin_edges = linspace(mn, mx, bins + 1, endpoint=True) + # Pre-compute histogram scaling factor + norm = n_equal_bins / (last_edge - first_edge) # We iterate over blocks here for two reasons: the first is that for # large arrays, it is actually faster (for example for a 10^8 array it @@ -745,20 +777,20 @@ tmp_w = weights[i:i + BLOCK] # Only include values in the right range - keep = (tmp_a >= mn) - keep &= (tmp_a <= mx) + keep = (tmp_a >= first_edge) + keep &= (tmp_a <= last_edge) if not np.logical_and.reduce(keep): tmp_a = tmp_a[keep] if tmp_w is not None: tmp_w = tmp_w[keep] tmp_a_data = tmp_a.astype(float) - tmp_a = tmp_a_data - mn + tmp_a = tmp_a_data - first_edge tmp_a *= norm - # Compute the bin indices, and for values that lie exactly on mx we - # need to subtract one + # Compute the bin indices, and for values that lie exactly on + # last_edge we need to subtract one indices = tmp_a.astype(np.intp) - indices[indices == bins] -= 1 + indices[indices == n_equal_bins] -= 1 # The index computation is not guaranteed to give exactly # consistent results within ~1 ULP of the bin edges. @@ -766,35 +798,26 @@ indices[decrement] -= 1 # The last bin includes the right edge. The other bins do not. increment = ((tmp_a_data >= bin_edges[indices + 1]) - & (indices != bins - 1)) + & (indices != n_equal_bins - 1)) indices[increment] += 1 # We now compute the histogram using bincount if ntype.kind == 'c': n.real += np.bincount(indices, weights=tmp_w.real, - minlength=bins) + minlength=n_equal_bins) n.imag += np.bincount(indices, weights=tmp_w.imag, - minlength=bins) + minlength=n_equal_bins) else: n += np.bincount(indices, weights=tmp_w, - minlength=bins).astype(ntype) - - # Rename the bin edges for return. - bins = bin_edges + minlength=n_equal_bins).astype(ntype) else: - bins = asarray(bins) - if np.any(bins[:-1] > bins[1:]): - raise ValueError( - 'bins must increase monotonically.') - - # Initialize empty histogram - n = np.zeros(bins.shape, ntype) - + # Compute via cumulative histogram + cum_n = np.zeros(bin_edges.shape, ntype) if weights is None: for i in arange(0, len(a), BLOCK): sa = sort(a[i:i+BLOCK]) - n += np.r_[sa.searchsorted(bins[:-1], 'left'), - sa.searchsorted(bins[-1], 'right')] + cum_n += np.r_[sa.searchsorted(bin_edges[:-1], 'left'), + sa.searchsorted(bin_edges[-1], 'right')] else: zero = array(0, dtype=ntype) for i in arange(0, len(a), BLOCK): @@ -803,27 +826,22 @@ sorting_index = np.argsort(tmp_a) sa = tmp_a[sorting_index] sw = tmp_w[sorting_index] - cw = np.concatenate(([zero, ], sw.cumsum())) - bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), - sa.searchsorted(bins[-1], 'right')] - n += cw[bin_index] - - - n = np.diff(n) - - if density is not None: - if density: - db = array(np.diff(bins), float) - return n/db/n.sum(), bins - else: - return n, bins - else: + cw = np.concatenate(([zero], sw.cumsum())) + bin_index = np.r_[sa.searchsorted(bin_edges[:-1], 'left'), + sa.searchsorted(bin_edges[-1], 'right')] + cum_n += cw[bin_index] + + n = np.diff(cum_n) + + if density: + db = array(np.diff(bin_edges), float) + return n/db/n.sum(), bin_edges + elif normed: # deprecated, buggy behavior. Remove for NumPy 2.0.0 - if normed: - db = array(np.diff(bins), float) - return n/(n*db).sum(), bins - else: - return n, bins + db = array(np.diff(bin_edges), float) + return n/(n*db).sum(), bin_edges + else: + return n, bin_edges def histogramdd(sample, bins=10, range=None, normed=False, weights=None): @@ -974,7 +992,7 @@ on_edge = (around(sample[:, i], decimal) == around(edges[i][-1], decimal)) # Shift these points one bin to the left. - Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1 + Ncount[i][nonzero(on_edge & not_smaller_than_edge)[0]] -= 1 # Flattened histogram matrix (1D) # Reshape is used so that overlarge arrays @@ -1236,12 +1254,12 @@ The length of `condlist` must correspond to that of `funclist`. If one extra function is given, i.e. if - ``len(funclist) - len(condlist) == 1``, then that extra function + ``len(funclist) == len(condlist) + 1``, then that extra function is the default value, used wherever all conditions are false. funclist : list of callables, f(x,*args,**kw), or scalars Each function is evaluated over `x` wherever its corresponding - condition is True. It should take an array as input and give an array - or a scalar value as output. If, instead of a callable, + condition is True. It should take a 1d array as input and give an 1d + array or a scalar value as output. If, instead of a callable, a scalar is provided then a constant function (``lambda x: scalar``) is assumed. args : tuple, optional @@ -1305,33 +1323,24 @@ """ x = asanyarray(x) n2 = len(funclist) - if (isscalar(condlist) or not (isinstance(condlist[0], list) or - isinstance(condlist[0], ndarray))): - if not isscalar(condlist) and x.size == 1 and x.ndim == 0: - condlist = [[c] for c in condlist] - else: - condlist = [condlist] + + # undocumented: single condition is promoted to a list of one condition + if isscalar(condlist) or ( + not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): + condlist = [condlist] + condlist = array(condlist, dtype=bool) n = len(condlist) - # This is a hack to work around problems with NumPy's - # handling of 0-d arrays and boolean indexing with - # numpy.bool_ scalars - zerod = False - if x.ndim == 0: - x = x[None] - zerod = True + if n == n2 - 1: # compute the "otherwise" condition. - totlist = np.logical_or.reduce(condlist, axis=0) - # Only able to stack vertically if the array is 1d or less - if x.ndim <= 1: - condlist = np.vstack([condlist, ~totlist]) - else: - condlist = [asarray(c, dtype=bool) for c in condlist] - totlist = condlist[0] - for k in range(1, n): - totlist |= condlist[k] - condlist.append(~totlist) + condelse = ~np.any(condlist, axis=0, keepdims=True) + condlist = np.concatenate([condlist, condelse], axis=0) n += 1 + elif n != n2: + raise ValueError( + "with {} condition(s), either {} or {} functions are expected" + .format(n, n, n+1) + ) y = zeros(x.shape, x.dtype) for k in range(n): @@ -1342,8 +1351,7 @@ vals = x[condlist[k]] if vals.size > 0: y[condlist[k]] = item(vals, *args, **kw) - if zerod: - y = y.squeeze() + return y @@ -1550,7 +1558,7 @@ Examples -------- - >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) + >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) >>> np.gradient(f) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(f, 2) @@ -1566,7 +1574,7 @@ Or a non uniform one: - >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=np.float) + >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) >>> np.gradient(f, x) array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5]) @@ -1574,7 +1582,7 @@ axis. In this example the first array stands for the gradient in rows and the second one in columns direction: - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) [array([[ 2., 2., -1.], [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], [ 1. , 1. , 1. ]])] @@ -1584,7 +1592,7 @@ >>> dx = 2. >>> y = [1., 1.5, 3.5] - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), dx, y) + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) [array([[ 1. , 1. , -0.5], [ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ], [ 2. , 1.7, 0.5]])] @@ -1601,7 +1609,7 @@ The `axis` keyword can be used to specify a subset of axes of which the gradient is calculated - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0) + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0) array([[ 2., 2., -1.], [ 2., 2., -1.]]) @@ -1728,35 +1736,30 @@ slice3 = [slice(None)]*N slice4 = [slice(None)]*N - otype = f.dtype.char - if otype not in ['f', 'd', 'F', 'D', 'm', 'M']: - otype = 'd' - - # Difference of datetime64 elements results in timedelta64 - if otype == 'M': - # Need to use the full dtype name because it contains unit information - otype = f.dtype.name.replace('datetime', 'timedelta') - elif otype == 'm': - # Needs to keep the specific units, can't be a general unit - otype = f.dtype - - # Convert datetime64 data into ints. Make dummy variable `y` - # that is a view of ints if the data is datetime64, otherwise - # just set y equal to the array `f`. - if f.dtype.char in ["M", "m"]: - y = f.view('int64') + otype = f.dtype + if otype.type is np.datetime64: + # the timedelta dtype with the same unit information + otype = np.dtype(otype.name.replace('datetime', 'timedelta')) + # view as timedelta to allow addition + f = f.view(otype) + elif otype.type is np.timedelta64: + pass + elif np.issubdtype(otype, np.inexact): + pass else: - y = f + # all other types convert to floating point + otype = np.double - for i, axis in enumerate(axes): - if y.shape[axis] < edge_order + 1: + for axis, ax_dx in zip(axes, dx): + if f.shape[axis] < edge_order + 1: raise ValueError( "Shape of array too small to calculate a numerical gradient, " "at least (edge_order + 1) elements are required.") # result allocation - out = np.empty_like(y, dtype=otype) + out = np.empty_like(f, dtype=otype) - uniform_spacing = np.ndim(dx[i]) == 0 + # spacing for the current axis + uniform_spacing = np.ndim(ax_dx) == 0 # Numerical differentiation: 2nd order interior slice1[axis] = slice(1, -1) @@ -1765,10 +1768,10 @@ slice4[axis] = slice(2, None) if uniform_spacing: - out[slice1] = (f[slice4] - f[slice2]) / (2. * dx[i]) + out[slice1] = (f[slice4] - f[slice2]) / (2. * ax_dx) else: - dx1 = dx[i][0:-1] - dx2 = dx[i][1:] + dx1 = ax_dx[0:-1] + dx2 = ax_dx[1:] a = -(dx2)/(dx1 * (dx1 + dx2)) b = (dx2 - dx1) / (dx1 * dx2) c = dx1 / (dx2 * (dx1 + dx2)) @@ -1784,16 +1787,16 @@ slice1[axis] = 0 slice2[axis] = 1 slice3[axis] = 0 - dx_0 = dx[i] if uniform_spacing else dx[i][0] - # 1D equivalent -- out[0] = (y[1] - y[0]) / (x[1] - x[0]) - out[slice1] = (y[slice2] - y[slice3]) / dx_0 + dx_0 = ax_dx if uniform_spacing else ax_dx[0] + # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) + out[slice1] = (f[slice2] - f[slice3]) / dx_0 slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 - dx_n = dx[i] if uniform_spacing else dx[i][-1] - # 1D equivalent -- out[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2]) - out[slice1] = (y[slice2] - y[slice3]) / dx_n + dx_n = ax_dx if uniform_spacing else ax_dx[-1] + # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) + out[slice1] = (f[slice2] - f[slice3]) / dx_n # Numerical differentiation: 2nd order edges else: @@ -1802,34 +1805,34 @@ slice3[axis] = 1 slice4[axis] = 2 if uniform_spacing: - a = -1.5 / dx[i] - b = 2. / dx[i] - c = -0.5 / dx[i] + a = -1.5 / ax_dx + b = 2. / ax_dx + c = -0.5 / ax_dx else: - dx1 = dx[i][0] - dx2 = dx[i][1] + dx1 = ax_dx[0] + dx2 = ax_dx[1] a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) b = (dx1 + dx2) / (dx1 * dx2) c = - dx1 / (dx2 * (dx1 + dx2)) - # 1D equivalent -- out[0] = a * y[0] + b * y[1] + c * y[2] - out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] + # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] + out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] slice1[axis] = -1 slice2[axis] = -3 slice3[axis] = -2 slice4[axis] = -1 if uniform_spacing: - a = 0.5 / dx[i] - b = -2. / dx[i] - c = 1.5 / dx[i] + a = 0.5 / ax_dx + b = -2. / ax_dx + c = 1.5 / ax_dx else: - dx1 = dx[i][-2] - dx2 = dx[i][-1] + dx1 = ax_dx[-2] + dx2 = ax_dx[-1] a = (dx2) / (dx1 * (dx1 + dx2)) b = - (dx2 + dx1) / (dx1 * dx2) c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] - out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] + out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] outvals.append(out) @@ -1847,7 +1850,7 @@ def diff(a, n=1, axis=-1): """ - Calculate the n-th discrete difference along given axis. + Calculate the n-th discrete difference along the given axis. The first difference is given by ``out[n] = a[n+1] - a[n]`` along the given axis, higher differences are calculated by using `diff` @@ -1858,16 +1861,21 @@ a : array_like Input array n : int, optional - The number of times values are differenced. + The number of times values are differenced. If zero, the input + is returned as-is. axis : int, optional - The axis along which the difference is taken, default is the last axis. + The axis along which the difference is taken, default is the + last axis. Returns ------- diff : ndarray The n-th differences. The shape of the output is the same as `a` except along `axis` where the dimension is smaller by `n`. The - type of the output is the same as that of the input. + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. A notable exception is `datetime64`, which + results in a `timedelta64` output array. See Also -------- @@ -1875,13 +1883,13 @@ Notes ----- - For boolean arrays, the preservation of type means that the result - will contain `False` when consecutive elements are the same and - `True` when they differ. - - For unsigned integer arrays, the results will also be unsigned. This should - not be surprising, as the result is consistent with calculating the - difference directly: + Type is preserved for boolean arrays, so the result will contain + `False` when consecutive elements are the same and `True` when they + differ. + + For unsigned integer arrays, the results will also be unsigned. This + should not be surprising, as the result is consistent with + calculating the difference directly: >>> u8_arr = np.array([1, 0], dtype=np.uint8) >>> np.diff(u8_arr) @@ -1889,8 +1897,8 @@ >>> u8_arr[1,...] - u8_arr[0,...] array(255, np.uint8) - If this is not desirable, then the array should be cast to a larger integer - type first: + If this is not desirable, then the array should be cast to a larger + integer type first: >>> i16_arr = u8_arr.astype(np.int16) >>> np.diff(i16_arr) @@ -1911,24 +1919,33 @@ >>> np.diff(x, axis=0) array([[-1, 2, 0, -2]]) + >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) + >>> np.diff(x) + array([1, 1], dtype='timedelta64[D]') + """ if n == 0: return a if n < 0: raise ValueError( "order must be non-negative but got " + repr(n)) + a = asanyarray(a) nd = a.ndim - slice1 = [slice(None)]*nd - slice2 = [slice(None)]*nd + axis = normalize_axis_index(axis, nd) + + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd slice1[axis] = slice(1, None) slice2[axis] = slice(None, -1) slice1 = tuple(slice1) slice2 = tuple(slice2) - if n > 1: - return diff(a[slice1]-a[slice2], n-1, axis=axis) - else: - return a[slice1]-a[slice2] + + op = not_equal if a.dtype == np.bool_ else subtract + for _ in range(n): + a = op(a[slice1], a[slice2]) + + return a def interp(x, xp, fp, left=None, right=None, period=None): @@ -2074,6 +2091,7 @@ else: return interp_func(x, xp, fp, left, right).item() + def angle(z, deg=0): """ Return the angle of the complex argument. @@ -2096,8 +2114,6 @@ arctan2 absolute - - Examples -------- >>> np.angle([1.0, 1.0j, 1+1j]) # in radians @@ -2317,7 +2333,7 @@ >>> condition array([[ True, False, False, True], [False, False, True, False], - [False, True, False, False]], dtype=bool) + [False, True, False, False]]) >>> np.extract(condition, arr) array([0, 3, 6, 9]) @@ -2607,7 +2623,7 @@ >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) - >>> vfunc = np.vectorize(myfunc, otypes=[np.float]) + >>> vfunc = np.vectorize(myfunc, otypes=[float]) >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) @@ -2987,7 +3003,7 @@ >>> x = [-2.1, -1, 4.3] >>> y = [3, 1.1, 0.12] - >>> X = np.vstack((x,y)) + >>> X = np.stack((x, y), axis=0) >>> print(np.cov(X)) [[ 11.71 -4.286 ] [ -4.286 2.14413333]] @@ -3025,7 +3041,7 @@ y = array(y, copy=False, ndmin=2, dtype=dtype) if not rowvar and y.shape[0] != 1: y = y.T - X = np.vstack((X, y)) + X = np.concatenate((X, y), axis=0) if ddof is None: if bias == 0: @@ -3036,7 +3052,7 @@ # Get the product of frequencies and weights w = None if fweights is not None: - fweights = np.asarray(fweights, dtype=np.float) + fweights = np.asarray(fweights, dtype=float) if not np.all(fweights == np.around(fweights)): raise TypeError( "fweights must be integer") @@ -3051,7 +3067,7 @@ "fweights cannot be negative") w = fweights if aweights is not None: - aweights = np.asarray(aweights, dtype=np.float) + aweights = np.asarray(aweights, dtype=float) if aweights.ndim > 1: raise RuntimeError( "cannot handle multidimensional aweights") @@ -4010,8 +4026,9 @@ # merge reduced axis a = a.reshape(a.shape[:nkeep] + (-1,)) kwargs['axis'] = -1 + keepdim = tuple(keepdim) else: - keepdim = [1] * a.ndim + keepdim = (1,) * a.ndim r = func(a, **kwargs) return r, keepdim @@ -4273,10 +4290,7 @@ overwrite_input=overwrite_input, interpolation=interpolation) if keepdims: - if q.ndim == 0: - return r.reshape(k) - else: - return r.reshape([len(q)] + k) + return r.reshape(q.shape + k) else: return r @@ -4345,7 +4359,7 @@ ap.partition(indices, axis=axis) # ensure axis with qth is first - ap = np.rollaxis(ap, axis, 0) + ap = np.moveaxis(ap, axis, 0) axis = 0 # Check if the array contains any nan's @@ -4378,9 +4392,9 @@ ap.partition(concatenate((indices_below, indices_above)), axis=axis) # ensure axis with qth is first - ap = np.rollaxis(ap, axis, 0) - weights_below = np.rollaxis(weights_below, axis, 0) - weights_above = np.rollaxis(weights_above, axis, 0) + ap = np.moveaxis(ap, axis, 0) + weights_below = np.moveaxis(weights_below, axis, 0) + weights_above = np.moveaxis(weights_above, axis, 0) axis = 0 # Check if the array contains any nan's @@ -4392,8 +4406,8 @@ x2 = take(ap, indices_above, axis=axis) * weights_above # ensure axis with qth is first - x1 = np.rollaxis(x1, axis, 0) - x2 = np.rollaxis(x2, axis, 0) + x1 = np.moveaxis(x1, axis, 0) + x2 = np.moveaxis(x2, axis, 0) if zerod: x1 = x1.squeeze(0) @@ -4546,7 +4560,7 @@ elif isinstance(doc, list): for val in doc: add_docstring(getattr(new, val[0]), val[1].strip()) - except: + except Exception: pass @@ -5049,7 +5063,7 @@ # broadcasting is very different here, since a[:,0,:] = ... behaves # very different from a[:,[0],:] = ...! This changes values so that # it works likes the second case. (here a[:,0:1,:]) - values = np.rollaxis(values, 0, (axis % values.ndim) + 1) + values = np.moveaxis(values, 0, axis) numnew = values.shape[axis] newshape[axis] += numnew new = empty(newshape, arr.dtype, arrorder) diff -Nru python-numpy-1.13.3/numpy/lib/index_tricks.py python-numpy-1.14.5/numpy/lib/index_tricks.py --- python-numpy-1.13.3/numpy/lib/index_tricks.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/index_tricks.py 2018-06-12 18:28:52.000000000 +0000 @@ -299,7 +299,7 @@ if len(vec) == 3: trans1d = int(vec[2]) continue - except: + except Exception: raise ValueError("unknown special directive") try: axis = int(item) @@ -842,7 +842,7 @@ And use it to set the diagonal of an array of zeros to 1: - >>> a = np.zeros((2, 2, 2), dtype=np.int) + >>> a = np.zeros((2, 2, 2), dtype=int) >>> a[d3] = 1 >>> a array([[[1, 0], diff -Nru python-numpy-1.13.3/numpy/lib/__init__.py python-numpy-1.14.5/numpy/lib/__init__.py --- python-numpy-1.13.3/numpy/lib/__init__.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/__init__.py 2018-06-12 18:28:52.000000000 +0000 @@ -44,6 +44,6 @@ __all__ += financial.__all__ __all__ += nanfunctions.__all__ -from numpy.testing.nosetester import _numpy_tester +from numpy.testing import _numpy_tester test = _numpy_tester().test bench = _numpy_tester().bench diff -Nru python-numpy-1.13.3/numpy/lib/_iotools.py python-numpy-1.14.5/numpy/lib/_iotools.py --- python-numpy-1.13.3/numpy/lib/_iotools.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/_iotools.py 2018-06-12 17:31:56.000000000 +0000 @@ -8,7 +8,7 @@ import sys import numpy as np import numpy.core.numeric as nx -from numpy.compat import asbytes, bytes, asbytes_nested, basestring +from numpy.compat import asbytes, asunicode, bytes, asbytes_nested, basestring if sys.version_info[0] >= 3: from builtins import bool, int, float, complex, object, str @@ -17,15 +17,30 @@ from __builtin__ import bool, int, float, complex, object, unicode, str -if sys.version_info[0] >= 3: - def _bytes_to_complex(s): - return complex(s.decode('ascii')) +def _decode_line(line, encoding=None): + """Decode bytes from binary input streams. - def _bytes_to_name(s): - return s.decode('ascii') -else: - _bytes_to_complex = complex - _bytes_to_name = str + Defaults to decoding from 'latin1'. That differs from the behavior of + np.compat.asunicode that decodes from 'ascii'. + + Parameters + ---------- + line : str or bytes + Line to be decoded. + + Returns + ------- + decoded_line : unicode + Unicode in Python 2, a str (unicode) in Python 3. + + """ + if type(line) is bytes: + if encoding is None: + line = line.decode('latin1') + else: + line = line.decode(encoding) + + return line def _is_string_like(obj): @@ -189,12 +204,14 @@ return lambda input: [_.strip() for _ in method(input)] # - def __init__(self, delimiter=None, comments=b'#', autostrip=True): + def __init__(self, delimiter=None, comments='#', autostrip=True, encoding=None): + delimiter = _decode_line(delimiter) + comments = _decode_line(comments) + self.comments = comments + # Delimiter is a character - if isinstance(delimiter, unicode): - delimiter = delimiter.encode('ascii') - if (delimiter is None) or _is_bytes_like(delimiter): + if (delimiter is None) or isinstance(delimiter, basestring): delimiter = delimiter or None _handyman = self._delimited_splitter # Delimiter is a list of field widths @@ -213,12 +230,14 @@ self._handyman = self.autostrip(_handyman) else: self._handyman = _handyman + self.encoding = encoding # def _delimited_splitter(self, line): + """Chop off comments, strip, and split at delimiter. """ if self.comments is not None: line = line.split(self.comments)[0] - line = line.strip(b" \r\n") + line = line.strip(" \r\n") if not line: return [] return line.split(self.delimiter) @@ -227,7 +246,7 @@ def _fixedwidth_splitter(self, line): if self.comments is not None: line = line.split(self.comments)[0] - line = line.strip(b"\r\n") + line = line.strip("\r\n") if not line: return [] fixed = self.delimiter @@ -245,7 +264,7 @@ # def __call__(self, line): - return self._handyman(line) + return self._handyman(_decode_line(line, self.encoding)) class NameValidator(object): @@ -434,9 +453,9 @@ """ value = value.upper() - if value == b'TRUE': + if value == 'TRUE': return True - elif value == b'FALSE': + elif value == 'FALSE': return False else: raise ValueError("Invalid boolean") @@ -510,8 +529,10 @@ Value to return by default, that is, when the string to be converted is flagged as missing. If not given, `StringConverter` tries to supply a reasonable default value. - missing_values : sequence of str, optional - Sequence of strings indicating a missing value. + missing_values : {None, sequence of str}, optional + ``None`` or sequence of strings indicating a missing value. If ``None`` + then missing values are indicated by empty entries. The default is + ``None``. locked : bool, optional Whether the StringConverter should be locked to prevent automatic upgrade or not. Default is False. @@ -527,9 +548,10 @@ _mapper.append((nx.int64, int, -1)) _mapper.extend([(nx.floating, float, nx.nan), - (complex, _bytes_to_complex, nx.nan + 0j), + (nx.complexfloating, complex, nx.nan + 0j), (nx.longdouble, nx.longdouble, nx.nan), - (nx.string_, bytes, b'???')]) + (nx.unicode_, asunicode, '???'), + (nx.string_, asbytes, '???')]) (_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper) @@ -601,11 +623,6 @@ def __init__(self, dtype_or_func=None, default=None, missing_values=None, locked=False): - # Convert unicode (for Py3) - if isinstance(missing_values, unicode): - missing_values = asbytes(missing_values) - elif isinstance(missing_values, (list, tuple)): - missing_values = asbytes_nested(missing_values) # Defines a lock for upgrade self._locked = bool(locked) # No input dtype: minimal initialization @@ -631,7 +648,7 @@ # None if default is None: try: - default = self.func(b'0') + default = self.func('0') except ValueError: default = None dtype = self._getdtype(default) @@ -676,11 +693,11 @@ self.func = lambda x: int(float(x)) # Store the list of strings corresponding to missing values. if missing_values is None: - self.missing_values = set([b'']) + self.missing_values = set(['']) else: - if isinstance(missing_values, bytes): - missing_values = missing_values.split(b",") - self.missing_values = set(list(missing_values) + [b'']) + if isinstance(missing_values, basestring): + missing_values = missing_values.split(",") + self.missing_values = set(list(missing_values) + ['']) # self._callingfunction = self._strict_call self.type = self._dtypeortype(dtype) @@ -801,7 +818,7 @@ self.iterupgrade(value) def update(self, func, default=None, testing_value=None, - missing_values=b'', locked=False): + missing_values='', locked=False): """ Set StringConverter attributes directly. @@ -817,8 +834,9 @@ A string representing a standard input value of the converter. This string is used to help defining a reasonable default value. - missing_values : sequence of str, optional - Sequence of strings indicating a missing value. + missing_values : {sequence of str, None}, optional + Sequence of strings indicating a missing value. If ``None``, then + the existing `missing_values` are cleared. The default is `''`. locked : bool, optional Whether the StringConverter should be locked to prevent automatic upgrade or not. Default is False. @@ -832,25 +850,29 @@ """ self.func = func self._locked = locked + # Don't reset the default to None if we can avoid it if default is not None: self.default = default self.type = self._dtypeortype(self._getdtype(default)) else: try: - tester = func(testing_value or b'1') + tester = func(testing_value or '1') except (TypeError, ValueError): tester = None self.type = self._dtypeortype(self._getdtype(tester)) - # Add the missing values to the existing set - if missing_values is not None: - if _is_bytes_like(missing_values): - self.missing_values.add(missing_values) - elif hasattr(missing_values, '__iter__'): - for val in missing_values: - self.missing_values.add(val) + + # Add the missing values to the existing set or clear it. + if missing_values is None: + # Clear all missing values even though the ctor initializes it to + # set(['']) when the argument is None. + self.missing_values = set() else: - self.missing_values = [] + if not np.iterable(missing_values): + missing_values = [missing_values] + if not all(isinstance(v, basestring) for v in missing_values): + raise TypeError("missing_values must be strings or unicode") + self.missing_values.update(missing_values) def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): diff -Nru python-numpy-1.13.3/numpy/lib/nanfunctions.py python-numpy-1.14.5/numpy/lib/nanfunctions.py --- python-numpy-1.13.3/numpy/lib/nanfunctions.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/nanfunctions.py 2018-06-12 18:28:52.000000000 +0000 @@ -106,6 +106,46 @@ return a +def _remove_nan_1d(arr1d, overwrite_input=False): + """ + Equivalent to arr1d[~arr1d.isnan()], but in a different order + + Presumably faster as it incurs fewer copies + + Parameters + ---------- + arr1d : ndarray + Array to remove nans from + overwrite_input : bool + True if `arr1d` can be modified in place + + Returns + ------- + res : ndarray + Array with nan elements removed + overwrite_input : bool + True if `res` can be modified in place, given the constraint on the + input + """ + + c = np.isnan(arr1d) + s = np.nonzero(c)[0] + if s.size == arr1d.size: + warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=4) + return arr1d[:0], True + elif s.size == 0: + return arr1d, overwrite_input + else: + if not overwrite_input: + arr1d = arr1d.copy() + # select non-nans at end of array + enonan = arr1d[-s.size:][~c[-s.size:]] + # fill nans in beginning of array with non-nans of end + arr1d[s[:enonan.size]] = enonan + + return arr1d[:-s.size], True + + def _divide_by_count(a, b, out=None): """ Compute a/b ignoring invalid results. If `a` is an array the division @@ -239,7 +279,7 @@ # which do not implement isnan (gh-9009), or fmin correctly (gh-8975) res = np.fmin.reduce(a, axis=axis, out=out, **kwargs) if np.isnan(res).any(): - warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2) + warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2) else: # Slow, but safe for subclasses of ndarray a, mask = _replace_nan(a, +np.inf) @@ -554,7 +594,7 @@ Parameters ---------- a : array_like - Array containing numbers whose sum is desired. If `a` is not an + Array containing numbers whose product is desired. If `a` is not an array, a conversion is attempted. axis : int, optional Axis along which the product is computed. The default is to compute @@ -836,24 +876,12 @@ Private function for rank 1 arrays. Compute the median ignoring NaNs. See nanmedian for parameter usage """ - c = np.isnan(arr1d) - s = np.where(c)[0] - if s.size == arr1d.size: - warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3) + arr1d, overwrite_input = _remove_nan_1d(arr1d, + overwrite_input=overwrite_input) + if arr1d.size == 0: return np.nan - elif s.size == 0: - return np.median(arr1d, overwrite_input=overwrite_input) - else: - if overwrite_input: - x = arr1d - else: - x = arr1d.copy() - # select non-nans at end of array - enonan = arr1d[-s.size:][~c[-s.size:]] - # fill nans in beginning of array with non-nans of end - x[s[:enonan.size]] = enonan - # slice nans away - return np.median(x[:-s.size], overwrite_input=True) + + return np.median(arr1d, overwrite_input=overwrite_input) def _nanmedian(a, axis=None, out=None, overwrite_input=False): @@ -1123,10 +1151,7 @@ overwrite_input=overwrite_input, interpolation=interpolation) if keepdims and keepdims is not np._NoValue: - if q.ndim == 0: - return r.reshape(k) - else: - return r.reshape([len(q)] + k) + return r.reshape(q.shape + k) else: return r @@ -1149,7 +1174,7 @@ # Move that axis to the beginning to match percentile's # convention. if q.ndim != 0: - result = np.rollaxis(result, axis) + result = np.moveaxis(result, axis, 0) if out is not None: out[...] = result @@ -1158,34 +1183,16 @@ def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation='linear'): """ - Private function for rank 1 arrays. Compute percentile ignoring - NaNs. - + Private function for rank 1 arrays. Compute percentile ignoring NaNs. See nanpercentile for parameter usage """ - c = np.isnan(arr1d) - s = np.where(c)[0] - if s.size == arr1d.size: - warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3) - if q.ndim == 0: - return np.nan - else: - return np.nan * np.ones((len(q),)) - elif s.size == 0: - return np.percentile(arr1d, q, overwrite_input=overwrite_input, - interpolation=interpolation) - else: - if overwrite_input: - x = arr1d - else: - x = arr1d.copy() - # select non-nans at end of array - enonan = arr1d[-s.size:][~c[-s.size:]] - # fill nans in beginning of array with non-nans of end - x[s[:enonan.size]] = enonan - # slice nans away - return np.percentile(x[:-s.size], q, overwrite_input=True, - interpolation=interpolation) + arr1d, overwrite_input = _remove_nan_1d(arr1d, + overwrite_input=overwrite_input) + if arr1d.size == 0: + return np.full(q.shape, np.nan)[()] # convert to scalar + + return np.percentile(arr1d, q, overwrite_input=overwrite_input, + interpolation=interpolation) def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): diff -Nru python-numpy-1.13.3/numpy/lib/npyio.py python-numpy-1.14.5/numpy/lib/npyio.py --- python-numpy-1.13.3/numpy/lib/npyio.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/npyio.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,5 +1,6 @@ from __future__ import division, absolute_import, print_function +import io import sys import os import re @@ -15,11 +16,12 @@ from ._iotools import ( LineSplitter, NameValidator, StringConverter, ConverterError, ConverterLockError, ConversionWarning, _is_string_like, - has_nested_fields, flatten_dtype, easy_dtype, _bytes_to_name + has_nested_fields, flatten_dtype, easy_dtype, _decode_line ) from numpy.compat import ( - asbytes, asstr, asbytes_nested, bytes, basestring, unicode, is_pathlib_path + asbytes, asstr, asunicode, asbytes_nested, bytes, basestring, unicode, + is_pathlib_path ) if sys.version_info[0] >= 3: @@ -294,7 +296,7 @@ used in Python 3. encoding : str, optional What encoding to use when reading Python 2 strings. Only useful when - loading Python 2 generated pickled files on Python 3, which includes + loading Python 2 generated pickled files in Python 3, which includes npy/npz files containing object arrays. Values other than 'latin1', 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical data. Default: 'ASCII' @@ -424,7 +426,7 @@ "non-pickled data") try: return pickle.load(fid, **pickle_kwargs) - except: + except Exception: raise IOError( "Failed to interpret file %s as a pickle" % repr(file)) finally: @@ -443,6 +445,8 @@ then the filename is unchanged. If file is a string or Path, a ``.npy`` extension will be appended to the file name if it does not already have one. + arr : array_like + Array data to be saved. allow_pickle : bool, optional Allow saving object arrays using Python pickles. Reasons for disallowing pickles include security (loading pickled data can execute arbitrary @@ -456,8 +460,6 @@ pickled in a Python 2 compatible way. If `fix_imports` is True, pickle will try to map the new Python 3 names to the old module names used in Python 2, so that the pickle data stream is readable with Python 2. - arr : array_like - Array data to be saved. See Also -------- @@ -661,8 +663,6 @@ # Import is postponed to here since zipfile depends on gzip, an optional # component of the so-called standard library. import zipfile - # Import deferred for startup time improvement - import tempfile if isinstance(file, basestring): if not file.endswith('.npz'): @@ -686,31 +686,44 @@ zipf = zipfile_factory(file, mode="w", compression=compression) - # Stage arrays in a temporary file on disk, before writing to zip. - - # Since target file might be big enough to exceed capacity of a global - # temporary directory, create temp file side-by-side with the target file. - file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp') - fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy') - os.close(fd) - try: + if sys.version_info >= (3, 6): + # Since Python 3.6 it is possible to write directly to a ZIP file. for key, val in namedict.items(): fname = key + '.npy' - fid = open(tmpfile, 'wb') - try: - format.write_array(fid, np.asanyarray(val), + val = np.asanyarray(val) + force_zip64 = val.nbytes >= 2**30 + with zipf.open(fname, 'w', force_zip64=force_zip64) as fid: + format.write_array(fid, val, allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) - fid.close() - fid = None - zipf.write(tmpfile, arcname=fname) - except IOError as exc: - raise IOError("Failed to write to %s: %s" % (tmpfile, exc)) - finally: - if fid: + else: + # Stage arrays in a temporary file on disk, before writing to zip. + + # Import deferred for startup time improvement + import tempfile + # Since target file might be big enough to exceed capacity of a global + # temporary directory, create temp file side-by-side with the target file. + file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp') + fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy') + os.close(fd) + try: + for key, val in namedict.items(): + fname = key + '.npy' + fid = open(tmpfile, 'wb') + try: + format.write_array(fid, np.asanyarray(val), + allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) fid.close() - finally: - os.remove(tmpfile) + fid = None + zipf.write(tmpfile, arcname=fname) + except IOError as exc: + raise IOError("Failed to write to %s: %s" % (tmpfile, exc)) + finally: + if fid: + fid.close() + finally: + os.remove(tmpfile) zipf.close() @@ -720,8 +733,8 @@ def floatconv(x): x.lower() - if b'0x' in x: - return float.fromhex(asstr(x)) + if '0x' in x: + return float.fromhex(x) return float(x) typ = dtype.type @@ -737,17 +750,21 @@ return np.longdouble elif issubclass(typ, np.floating): return floatconv - elif issubclass(typ, np.complex): + elif issubclass(typ, complex): return lambda x: complex(asstr(x)) elif issubclass(typ, np.bytes_): return asbytes + elif issubclass(typ, np.unicode_): + return asunicode else: return asstr +# amount of lines loadtxt reads in one chunk, can be overriden for testing +_loadtxt_chunksize = 50000 def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False, - ndmin=0): + ndmin=0, encoding='bytes'): """ Load data from a text file. @@ -765,13 +782,13 @@ each row will be interpreted as an element of the array. In this case, the number of columns used must match the number of fields in the data-type. - comments : str or sequence, optional + comments : str or sequence of str, optional The characters or list of characters used to indicate the start of a - comment; - default: '#'. + comment. For backwards compatibility, byte strings will be decoded as + 'latin1'. The default is '#'. delimiter : str, optional - The string used to separate values. By default, this is any - whitespace. + The string used to separate values. For backwards compatibility, byte + strings will be decoded as 'latin1'. The default is whitespace. converters : dict, optional A dictionary mapping column number to a function that will convert that column to a float. E.g., if column 0 is a date string: @@ -780,18 +797,15 @@ ``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None. skiprows : int, optional Skip the first `skiprows` lines; default: 0. - usecols : int or sequence, optional Which columns to read, with 0 being the first. For example, usecols = (1,4,5) will extract the 2nd, 5th and 6th columns. The default, None, results in all columns being read. - .. versionadded:: 1.11.0 - - Also when a single column has to be read it is possible to use - an integer instead of a tuple. E.g ``usecols = 3`` reads the - fourth column the same way as `usecols = (3,)`` would. - + .. versionchanged:: 1.11.0 + When a single column has to be read it is possible to use + an integer instead of a tuple. E.g ``usecols = 3`` reads the + fourth column the same way as `usecols = (3,)`` would. unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = loadtxt(...)``. When used with a structured @@ -802,6 +816,15 @@ Legal values: 0 (default), 1 or 2. .. versionadded:: 1.6.0 + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + The special value 'bytes' enables backward compatibility workarounds + that ensures you receive byte arrays as results if possible and passes + latin1 encoded strings to converters. Override this value to receive + unicode arrays and pass strings as input to converters. If set to None + the system default is used. The default value is 'bytes'. + + .. versionadded:: 1.14.0 Returns ------- @@ -850,16 +873,22 @@ # Type conversions for Py3 convenience if comments is not None: if isinstance(comments, (basestring, bytes)): - comments = [asbytes(comments)] - else: - comments = [asbytes(comment) for comment in comments] - + comments = [comments] + comments = [_decode_line(x) for x in comments] # Compile regex for comments beforehand comments = (re.escape(comment) for comment in comments) - regex_comments = re.compile(b'|'.join(comments)) - user_converters = converters + regex_comments = re.compile('|'.join(comments)) + if delimiter is not None: - delimiter = asbytes(delimiter) + delimiter = _decode_line(delimiter) + + user_converters = converters + + if encoding == 'bytes': + encoding = None + byte_converters = True + else: + byte_converters = False if usecols is not None: # Allow usecols to be a single int or a sequence of ints @@ -885,22 +914,24 @@ if is_pathlib_path(fname): fname = str(fname) if _is_string_like(fname): + fh = np.lib._datasource.open(fname, 'rt', encoding=encoding) + fencoding = getattr(fh, 'encoding', 'latin1') + fh = iter(fh) fown = True - if fname.endswith('.gz'): - import gzip - fh = iter(gzip.GzipFile(fname)) - elif fname.endswith('.bz2'): - import bz2 - fh = iter(bz2.BZ2File(fname)) - elif sys.version_info[0] == 2: - fh = iter(open(fname, 'U')) - else: - fh = iter(open(fname)) else: fh = iter(fname) + fencoding = getattr(fname, 'encoding', 'latin1') except TypeError: raise ValueError('fname must be a string, file handle, or generator') - X = [] + + # input may be a python2 io stream + if encoding is not None: + fencoding = encoding + # we must assume local encoding + # TOOD emit portability warning? + elif fencoding is None: + import locale + fencoding = locale.getpreferredencoding() # not to be confused with the flatten_dtype we import... def flatten_dtype_internal(dt): @@ -949,21 +980,53 @@ return tuple(ret) def split_line(line): - """Chop off comments, strip, and split at delimiter. + """Chop off comments, strip, and split at delimiter. """ + line = _decode_line(line, encoding=encoding) - Note that although the file is opened as text, this function - returns bytes. - - """ - line = asbytes(line) if comments is not None: - line = regex_comments.split(asbytes(line), maxsplit=1)[0] - line = line.strip(b'\r\n') + line = regex_comments.split(line, maxsplit=1)[0] + line = line.strip('\r\n') if line: return line.split(delimiter) else: return [] + def read_data(chunk_size): + """Parse each line, including the first. + + The file read, `fh`, is a global defined above. + + Parameters + ---------- + chunk_size : int + At most `chunk_size` lines are read at a time, with iteration + until all lines are read. + + """ + X = [] + for i, line in enumerate(itertools.chain([first_line], fh)): + vals = split_line(line) + if len(vals) == 0: + continue + if usecols: + vals = [vals[j] for j in usecols] + if len(vals) != N: + line_num = i + skiprows + 1 + raise ValueError("Wrong number of columns at line %d" + % line_num) + + # Convert each value according to its column and store + items = [conv(val) for (conv, val) in zip(converters, vals)] + + # Then pack it according to the dtype's nesting + items = pack_items(items, packing) + X.append(items) + if len(X) > chunk_size: + yield X + X = [] + if X: + yield X + try: # Make sure we're dealing with a proper dtype dtype = np.dtype(dtype) @@ -1006,30 +1069,47 @@ except ValueError: # Unused converter specified continue - converters[i] = conv + if byte_converters: + # converters may use decode to workaround numpy's old behaviour, + # so encode the string again before passing to the user converter + def tobytes_first(x, conv): + if type(x) is bytes: + return conv(x) + return conv(x.encode("latin1")) + import functools + converters[i] = functools.partial(tobytes_first, conv=conv) + else: + converters[i] = conv - # Parse each line, including the first - for i, line in enumerate(itertools.chain([first_line], fh)): - vals = split_line(line) - if len(vals) == 0: - continue - if usecols: - vals = [vals[i] for i in usecols] - if len(vals) != N: - line_num = i + skiprows + 1 - raise ValueError("Wrong number of columns at line %d" - % line_num) + converters = [conv if conv is not bytes else + lambda x: x.encode(fencoding) for conv in converters] - # Convert each value according to its column and store - items = [conv(val) for (conv, val) in zip(converters, vals)] - # Then pack it according to the dtype's nesting - items = pack_items(items, packing) - X.append(items) + # read data in chunks and fill it into an array via resize + # over-allocating and shrinking the array later may be faster but is + # probably not relevant compared to the cost of actually reading and + # converting the data + X = None + for x in read_data(_loadtxt_chunksize): + if X is None: + X = np.array(x, dtype) + else: + nshape = list(X.shape) + pos = nshape[0] + nshape[0] += len(x) + X.resize(nshape) + X[pos:, ...] = x finally: if fown: fh.close() + # recursive closures have a cyclic reference to themselves, which + # requires gc to collect (gh-10620). To avoid this problem, for + # performance and PyPy friendliness, we break the cycle: + flatten_dtype_internal = None + pack_items = None + + if X is None: + X = np.array([], dtype) - X = np.array(X, dtype) # Multicolumn data are returned with shape (1, N, M), i.e. # (1, 1, M) for a single row - remove the singleton dimension there if X.ndim == 3 and X.shape[:2] == (1, 1): @@ -1061,7 +1141,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', - footer='', comments='# '): + footer='', comments='# ', encoding=None): """ Save an array to a text file. @@ -1071,7 +1151,7 @@ If the filename ends in ``.gz``, the file is automatically saved in compressed gzip format. `loadtxt` understands gzipped files transparently. - X : array_like + X : 1D or 2D array_like Data to be saved to a text file. fmt : str or sequence of strs, optional A single format (%10.5f), a sequence of formats, or a @@ -1079,12 +1159,12 @@ case `delimiter` is ignored. For complex `X`, the legal options for `fmt` are: a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted - like `' (%s+%sj)' % (fmt, fmt)` + like `' (%s+%sj)' % (fmt, fmt)` b) a full string specifying every real and imaginary part, e.g. - `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns + `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns c) a list of specifiers, one per column - in this case, the real - and imaginary part must have separate specifiers, - e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns + and imaginary part must have separate specifiers, + e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns delimiter : str, optional String or character separating columns. newline : str, optional @@ -1105,6 +1185,13 @@ ``numpy.loadtxt``. .. versionadded:: 1.7.0 + encoding : {None, str}, optional + Encoding used to encode the outputfile. Does not apply to output + streams. If the encoding is something other than 'bytes' or 'latin1' + you will not be able to load the file in NumPy versions < 1.14. Default + is 'latin1'. + + .. versionadded:: 1.14.0 See Also @@ -1179,21 +1266,53 @@ fmt = asstr(fmt) delimiter = asstr(delimiter) + class WriteWrap(object): + """Convert to unicode in py2 or to bytes on bytestream inputs. + + """ + def __init__(self, fh, encoding): + self.fh = fh + self.encoding = encoding + self.do_write = self.first_write + + def close(self): + self.fh.close() + + def write(self, v): + self.do_write(v) + + def write_bytes(self, v): + if isinstance(v, bytes): + self.fh.write(v) + else: + self.fh.write(v.encode(self.encoding)) + + def write_normal(self, v): + self.fh.write(asunicode(v)) + + def first_write(self, v): + try: + self.write_normal(v) + self.write = self.write_normal + except TypeError: + # input is probably a bytestream + self.write_bytes(v) + self.write = self.write_bytes + own_fh = False if is_pathlib_path(fname): fname = str(fname) if _is_string_like(fname): + # datasource doesn't support creating a new file ... + open(fname, 'wt').close() + fh = np.lib._datasource.open(fname, 'wt', encoding=encoding) own_fh = True - if fname.endswith('.gz'): - import gzip - fh = gzip.open(fname, 'wb') - else: - if sys.version_info[0] >= 3: - fh = open(fname, 'wb') - else: - fh = open(fname, 'w') + # need to convert str to unicode for text io output + if sys.version_info[0] == 2: + fh = WriteWrap(fh, encoding or 'latin1') elif hasattr(fname, 'write'): - fh = fname + # wrap to handle byte output streams + fh = WriteWrap(fname, encoding or 'latin1') else: raise ValueError('fname must be a string or file handle') @@ -1201,7 +1320,10 @@ X = np.asarray(X) # Handle 1-dimensional arrays - if X.ndim == 1: + if X.ndim == 0 or X.ndim > 2: + raise ValueError( + "Expected 1D or 2D array, got %dD array instead" % X.ndim) + elif X.ndim == 1: # Common case -- 1d array of numbers if X.dtype.names is None: X = np.atleast_2d(X).T @@ -1240,31 +1362,33 @@ if len(header) > 0: header = header.replace('\n', '\n' + comments) - fh.write(asbytes(comments + header + newline)) + fh.write(comments + header + newline) if iscomplex_X: for row in X: row2 = [] for number in row: row2.append(number.real) row2.append(number.imag) - fh.write(asbytes(format % tuple(row2) + newline)) + fh.write(format % tuple(row2) + newline) else: for row in X: try: - fh.write(asbytes(format % tuple(row) + newline)) + v = format % tuple(row) + newline except TypeError: raise TypeError("Mismatch between array dtype ('%s') and " "format specifier ('%s')" % (str(X.dtype), format)) + fh.write(v) + if len(footer) > 0: footer = footer.replace('\n', '\n' + comments) - fh.write(asbytes(comments + footer + newline)) + fh.write(comments + footer + newline) finally: if own_fh: fh.close() -def fromregex(file, regexp, dtype): +def fromregex(file, regexp, dtype, encoding=None): """ Construct an array from a text file, using regular expression parsing. @@ -1281,6 +1405,10 @@ Groups in the regular expression correspond to fields in the dtype. dtype : dtype or list of dtypes Dtype for the structured array. + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + + .. versionadded:: 1.14.0 Returns ------- @@ -1321,16 +1449,22 @@ """ own_fh = False if not hasattr(file, "read"): - file = open(file, 'rb') + file = np.lib._datasource.open(file, 'rt', encoding=encoding) own_fh = True try: - if not hasattr(regexp, 'match'): - regexp = re.compile(asbytes(regexp)) if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) - seq = regexp.findall(file.read()) + content = file.read() + if isinstance(content, bytes) and not isinstance(regexp, bytes): + regexp = asbytes(regexp) + elif not isinstance(content, bytes) and isinstance(regexp, bytes): + regexp = asstr(regexp) + + if not hasattr(regexp, 'match'): + regexp = re.compile(regexp) + seq = regexp.findall(content) if seq and not isinstance(seq[0], tuple): # Only one group is in the regexp. # Create the new array as a single data-type and then @@ -1358,7 +1492,7 @@ names=None, excludelist=None, deletechars=None, replace_space='_', autostrip=False, case_sensitive=True, defaultfmt="f%i", unpack=None, usemask=False, loose=True, - invalid_raise=True, max_rows=None): + invalid_raise=True, max_rows=None, encoding='bytes'): """ Load data from a text file, with missing values handled as specified. @@ -1404,11 +1538,12 @@ Which columns to read, with 0 being the first. For example, ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. names : {None, True, str, sequence}, optional - If `names` is True, the field names are read from the first valid line - after the first `skip_header` lines. - If `names` is a sequence or a single-string of comma-separated names, - the names will be used to define the field names in a structured dtype. - If `names` is None, the names of the dtype fields will be used, if any. + If `names` is True, the field names are read from the first line after + the first `skip_header` lines. This line can optionally be proceeded + by a comment delimeter. If `names` is a sequence or a single-string of + comma-separated names, the names will be used to define the field names + in a structured dtype. If `names` is None, the names of the dtype + fields will be used, if any. excludelist : sequence, optional A list of names to exclude. This list is appended to the default list ['return','file','print']. Excluded names are appended an underscore: @@ -1445,6 +1580,15 @@ to read the entire file. .. versionadded:: 1.10.0 + encoding : str, optional + Encoding used to decode the inputfile. Does not apply when `fname` is + a file object. The special value 'bytes' enables backward compatibility + workarounds that ensure that you receive byte arrays when possible + and passes latin1 encoded strings to converters. Override this value to + receive unicode arrays and pass strings as input to converters. If set + to None the system default is used. The default value is 'bytes'. + + .. versionadded:: 1.14.0 Returns ------- @@ -1521,15 +1665,6 @@ if max_rows < 1: raise ValueError("'max_rows' must be at least 1.") - # Py3 data conversions to bytes, for convenience - if comments is not None: - comments = asbytes(comments) - if isinstance(delimiter, unicode): - delimiter = asbytes(delimiter) - if isinstance(missing_values, (unicode, list, tuple)): - missing_values = asbytes_nested(missing_values) - - # if usemask: from numpy.ma import MaskedArray, make_mask_descr # Check the input dictionary of converters @@ -1539,16 +1674,19 @@ "The input argument 'converter' should be a valid dictionary " "(got '%s' instead)" % type(user_converters)) + if encoding == 'bytes': + encoding = None + byte_converters = True + else: + byte_converters = False + # Initialize the filehandle, the LineSplitter and the NameValidator own_fhd = False try: if is_pathlib_path(fname): fname = str(fname) if isinstance(fname, basestring): - if sys.version_info[0] == 2: - fhd = iter(np.lib._datasource.open(fname, 'rbU')) - else: - fhd = iter(np.lib._datasource.open(fname, 'rb')) + fhd = iter(np.lib._datasource.open(fname, 'rt', encoding=encoding)) own_fhd = True else: fhd = iter(fname) @@ -1558,7 +1696,7 @@ "or generator. Got %s instead." % type(fname)) split_line = LineSplitter(delimiter=delimiter, comments=comments, - autostrip=autostrip)._handyman + autostrip=autostrip, encoding=encoding) validate_names = NameValidator(excludelist=excludelist, deletechars=deletechars, case_sensitive=case_sensitive, @@ -1572,15 +1710,15 @@ first_values = None try: while not first_values: - first_line = next(fhd) + first_line = _decode_line(next(fhd), encoding) if names is True: if comments in first_line: first_line = ( - b''.join(first_line.split(comments)[1:])) + ''.join(first_line.split(comments)[1:])) first_values = split_line(first_line) except StopIteration: # return an empty array if the datafile is empty - first_line = b'' + first_line = '' first_values = [] warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2) @@ -1603,9 +1741,8 @@ # Check the names and overwrite the dtype.names if needed if names is True: - names = validate_names([_bytes_to_name(_.strip()) - for _ in first_values]) - first_line = b'' + names = validate_names([str(_.strip()) for _ in first_values]) + first_line = '' elif _is_string_like(names): names = validate_names([_.strip() for _ in names.split(',')]) elif names: @@ -1642,9 +1779,11 @@ # Process the missing values ............................... # Rename missing_values for convenience user_missing_values = missing_values or () + if isinstance(user_missing_values, bytes): + user_missing_values = user_missing_values.decode('latin1') # Define the list of missing_values (one column: one list) - missing_values = [list([b'']) for _ in range(nbcols)] + missing_values = [list(['']) for _ in range(nbcols)] # We have a dictionary: process it field by field if isinstance(user_missing_values, dict): @@ -1683,8 +1822,8 @@ if value not in entry: entry.append(value) # We have a string : apply it to all entries - elif isinstance(user_missing_values, bytes): - user_value = user_missing_values.split(b",") + elif isinstance(user_missing_values, basestring): + user_value = user_missing_values.split(",") for entry in missing_values: entry.extend(user_value) # We have something else: apply it to all entries @@ -1772,16 +1911,29 @@ testing_value = first_values[j] else: testing_value = None - converters[i].update(conv, locked=True, + if conv is bytes: + user_conv = asbytes + elif byte_converters: + # converters may use decode to workaround numpy's old behaviour, + # so encode the string again before passing to the user converter + def tobytes_first(x, conv): + if type(x) is bytes: + return conv(x) + return conv(x.encode("latin1")) + import functools + user_conv = functools.partial(tobytes_first, conv=conv) + else: + user_conv = conv + converters[i].update(user_conv, locked=True, testing_value=testing_value, default=filling_values[i], missing_values=missing_values[i],) - uc_update.append((i, conv)) + uc_update.append((i, user_conv)) # Make sure we have the corrected keys in user_converters... user_converters.update(uc_update) # Fixme: possible error as following variable never used. - #miss_chars = [_.missing_values for _ in converters] + # miss_chars = [_.missing_values for _ in converters] # Initialize the output lists ... # ... rows @@ -1893,25 +2045,54 @@ column_types = [conv.type for conv in converters] # Find the columns with strings... strcolidx = [i for (i, v) in enumerate(column_types) - if v in (type('S'), np.string_)] - # ... and take the largest number of chars. - for i in strcolidx: - column_types[i] = "|S%i" % max(len(row[i]) for row in data) - # + if v == np.unicode_] + + if byte_converters and strcolidx: + # convert strings back to bytes for backward compatibility + warnings.warn( + "Reading unicode strings without specifying the encoding " + "argument is deprecated. Set the encoding, use None for the " + "system default.", + np.VisibleDeprecationWarning, stacklevel=2) + def encode_unicode_cols(row_tup): + row = list(row_tup) + for i in strcolidx: + row[i] = row[i].encode('latin1') + return tuple(row) + + try: + data = [encode_unicode_cols(r) for r in data] + except UnicodeEncodeError: + pass + else: + for i in strcolidx: + column_types[i] = np.bytes_ + + # Update string types to be the right length + sized_column_types = column_types[:] + for i, col_type in enumerate(column_types): + if np.issubdtype(col_type, np.character): + n_chars = max(len(row[i]) for row in data) + sized_column_types[i] = (col_type, n_chars) + if names is None: - # If the dtype is uniform, don't define names, else use '' - base = set([c.type for c in converters if c._checked]) + # If the dtype is uniform (before sizing strings) + base = set([ + c_type + for c, c_type in zip(converters, column_types) + if c._checked]) if len(base) == 1: - (ddtype, mdtype) = (list(base)[0], np.bool) + uniform_type, = base + (ddtype, mdtype) = (uniform_type, bool) else: ddtype = [(defaultfmt % i, dt) - for (i, dt) in enumerate(column_types)] + for (i, dt) in enumerate(sized_column_types)] if usemask: - mdtype = [(defaultfmt % i, np.bool) - for (i, dt) in enumerate(column_types)] + mdtype = [(defaultfmt % i, bool) + for (i, dt) in enumerate(sized_column_types)] else: - ddtype = list(zip(names, column_types)) - mdtype = list(zip(names, [np.bool] * len(column_types))) + ddtype = list(zip(names, sized_column_types)) + mdtype = list(zip(names, [bool] * len(sized_column_types))) output = np.array(data, dtype=ddtype) if usemask: outputmask = np.array(masks, dtype=mdtype) @@ -1937,7 +2118,7 @@ # Now, process the rowmasks the same way if usemask: rowmasks = np.array( - masks, dtype=np.dtype([('', np.bool) for t in dtype_flat])) + masks, dtype=np.dtype([('', bool) for t in dtype_flat])) # Construct the new dtype mdtype = make_mask_descr(dtype) outputmask = rowmasks.view(mdtype) @@ -1951,8 +2132,8 @@ # Keep the dtype of the current converter if i in user_converters: ishomogeneous &= (ttype == dtype.type) - if ttype == np.string_: - ttype = "|S%i" % max(len(row[i]) for row in data) + if np.issubdtype(ttype, np.character): + ttype = (ttype, max(len(row[i]) for row in data)) descr.append(('', ttype)) else: descr.append(('', dtype)) @@ -1968,16 +2149,16 @@ output = np.array(data, dtype) if usemask: if dtype.names: - mdtype = [(_, np.bool) for _ in dtype.names] + mdtype = [(_, bool) for _ in dtype.names] else: - mdtype = np.bool + mdtype = bool outputmask = np.array(masks, dtype=mdtype) # Try to take care of the missing data we missed names = output.dtype.names if usemask and names: - for (name, conv) in zip(names or (), converters): + for (name, conv) in zip(names, converters): missing_values = [conv(_) for _ in conv.missing_values - if _ != b''] + if _ != ''] for mval in missing_values: outputmask[name] |= (output[name] == mval) # Construct the final array diff -Nru python-numpy-1.13.3/numpy/lib/polynomial.py python-numpy-1.14.5/numpy/lib/polynomial.py --- python-numpy-1.13.3/numpy/lib/polynomial.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/polynomial.py 2018-06-12 18:28:52.000000000 +0000 @@ -1040,14 +1040,8 @@ @property def coeffs(self): - """ The polynomial coefficients """ - return self._coeffs - - @coeffs.setter - def coeffs(self, value): - # allowing this makes p.coeffs *= 2 legal - if value is not self._coeffs: - raise AttributeError("Cannot set attribute") + """ A copy of the polynomial coefficients """ + return self._coeffs.copy() @property def variable(self): diff -Nru python-numpy-1.13.3/numpy/lib/recfunctions.py python-numpy-1.14.5/numpy/lib/recfunctions.py --- python-numpy-1.13.3/numpy/lib/recfunctions.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/recfunctions.py 2018-06-12 18:28:52.000000000 +0000 @@ -70,6 +70,37 @@ return output +def get_fieldspec(dtype): + """ + Produce a list of name/dtype pairs corresponding to the dtype fields + + Similar to dtype.descr, but the second item of each tuple is a dtype, not a + string. As a result, this handles subarray dtypes + + Can be passed to the dtype constructor to reconstruct the dtype, noting that + this (deliberately) discards field offsets. + + Examples + -------- + >>> dt = np.dtype([(('a', 'A'), int), ('b', float, 3)]) + >>> dt.descr + [(('a', 'A'), '>> get_fieldspec(dt) + [(('a', 'A'), dtype('int32')), ('b', dtype((' 1: - newdtype.append(('', current.descr)) - else: - newdtype.extend(current.descr) - return np.dtype(newdtype).descr + return zip_dtype(seqarrays, flatten=flatten).descr def get_fieldstructure(adtype, lastname=None, parents=None,): @@ -376,13 +411,12 @@ # Do we have a single ndarray as input ? if isinstance(seqarrays, (ndarray, np.void)): seqdtype = seqarrays.dtype - if (not flatten) or \ - (zip_descr((seqarrays,), flatten=True) == seqdtype.descr): + # Make sure we have named fields + if not seqdtype.names: + seqdtype = np.dtype([('', seqdtype)]) + if not flatten or zip_dtype((seqarrays,), flatten=True) == seqdtype: # Minimal processing needed: just make sure everythng's a-ok seqarrays = seqarrays.ravel() - # Make sure we have named fields - if not seqdtype.names: - seqdtype = [('', seqdtype)] # Find what type of array we must return if usemask: if asrecarray: @@ -403,7 +437,7 @@ sizes = tuple(a.size for a in seqarrays) maxlength = max(sizes) # Get the dtype of the output (flattening if needed) - newdtype = zip_descr(seqarrays, flatten=flatten) + newdtype = zip_dtype(seqarrays, flatten=flatten) # Initialize the sequences for data and mask seqdata = [] seqmask = [] @@ -655,8 +689,9 @@ else: data = data.pop() # - output = ma.masked_all(max(len(base), len(data)), - dtype=base.dtype.descr + data.dtype.descr) + output = ma.masked_all( + max(len(base), len(data)), + dtype=get_fieldspec(base.dtype) + get_fieldspec(data.dtype)) output = recursive_fill_fields(base, output) output = recursive_fill_fields(data, output) # @@ -696,6 +731,82 @@ return append_fields(base, names, data=data, dtypes=dtypes, asrecarray=True, usemask=False) +def repack_fields(a, align=False, recurse=False): + """ + Re-pack the fields of a structured array or dtype in memory. + + The memory layout of structured datatypes allows fields at arbitrary + byte offsets. This means the fields can be separated by padding bytes, + their offsets can be non-monotonically increasing, and they can overlap. + + This method removes any overlaps and reorders the fields in memory so they + have increasing byte offsets, and adds or removes padding bytes depending + on the `align` option, which behaves like the `align` option to `np.dtype`. + + If `align=False`, this method produces a "packed" memory layout in which + each field starts at the byte the previous field ended, and any padding + bytes are removed. + + If `align=True`, this methods produces an "aligned" memory layout in which + each field's offset is a multiple of its alignment, and the total itemsize + is a multiple of the largest alignment, by adding padding bytes as needed. + + Parameters + ---------- + a : ndarray or dtype + Structured array or dtype for which to repack the fields. + align : boolean + If true, use an "aligned" memory layout, otherwise use a "packed" layout. + recurse : boolean + If True, also repack nested structures. + + Returns + ------- + repacked : ndarray or dtype + Copy of `a` with fields repacked, or `a` itself if no repacking was + needed. + + Examples + -------- + + >>> def print_offsets(d): + ... print("offsets:", [d.fields[name][1] for name in d.names]) + ... print("itemsize:", d.itemsize) + ... + >>> dt = np.dtype('u1,i4,f4', align=True) + >>> dt + dtype({'names':['f0','f1','f2'], 'formats':['u1','>> print_offsets(dt) + offsets: [0, 4, 8] + itemsize: 16 + >>> packed_dt = repack_fields(dt) + >>> packed_dt + dtype([('f0', 'u1'), ('f1', '>> print_offsets(packed_dt) + offsets: [0, 1, 5] + itemsize: 13 + + """ + if not isinstance(a, np.dtype): + dt = repack_fields(a.dtype, align=align, recurse=recurse) + return a.astype(dt, copy=False) + + if a.names is None: + raise ValueError("a must be or have a structured dtype") + + fieldinfo = [] + for name in a.names: + tup = a.fields[name] + if recurse: + fmt = repack_fields(tup[0], align=align, recurse=True) + else: + fmt = tup[0] + if len(tup) == 3: + name = (tup[2], name) + fieldinfo.append((name, fmt)) + + dt = np.dtype(fieldinfo, align=align) + return np.dtype((a.type, dt)) def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, autoconvert=False): @@ -746,25 +857,21 @@ fldnames = [d.names for d in ndtype] # dtype_l = ndtype[0] - newdescr = dtype_l.descr - names = [_[0] for _ in newdescr] + newdescr = get_fieldspec(dtype_l) + names = [n for n, d in newdescr] for dtype_n in ndtype[1:]: - for descr in dtype_n.descr: - name = descr[0] or '' - if name not in names: - newdescr.append(descr) - names.append(name) + for fname, fdtype in get_fieldspec(dtype_n): + if fname not in names: + newdescr.append((fname, fdtype)) + names.append(fname) else: - nameidx = names.index(name) - current_descr = newdescr[nameidx] + nameidx = names.index(fname) + _, cdtype = newdescr[nameidx] if autoconvert: - if np.dtype(descr[1]) > np.dtype(current_descr[-1]): - current_descr = list(current_descr) - current_descr[-1] = descr[1] - newdescr[nameidx] = tuple(current_descr) - elif descr[1] != current_descr[-1]: + newdescr[nameidx] = (fname, max(fdtype, cdtype)) + elif fdtype != cdtype: raise TypeError("Incompatible type '%s' <> '%s'" % - (dict(newdescr)[name], descr[1])) + (cdtype, fdtype)) # Only one field: use concatenate if len(newdescr) == 1: output = ma.concatenate(seqarrays) @@ -920,10 +1027,10 @@ (r1names, r2names) = (r1.dtype.names, r2.dtype.names) # Check the names for collision - if (set.intersection(set(r1names), set(r2names)).difference(key) and - not (r1postfix or r2postfix)): + collisions = (set(r1names) & set(r2names)) - set(key) + if collisions and not (r1postfix or r2postfix): msg = "r1 and r2 contain common names, r1postfix and r2postfix " - msg += "can't be empty" + msg += "can't both be empty" raise ValueError(msg) # Make temporary arrays of just the keys @@ -960,32 +1067,38 @@ # # Build the new description of the output array ....... # Start with the key fields - ndtype = [list(_) for _ in r1k.dtype.descr] - # Add the other fields - ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key) - # Find the new list of names (it may be different from r1names) - names = list(_[0] for _ in ndtype) - for desc in r2.dtype.descr: - desc = list(desc) - name = desc[0] + ndtype = get_fieldspec(r1k.dtype) + + # Add the fields from r1 + for fname, fdtype in get_fieldspec(r1.dtype): + if fname not in key: + ndtype.append((fname, fdtype)) + + # Add the fields from r2 + for fname, fdtype in get_fieldspec(r2.dtype): # Have we seen the current name already ? - if name in names: - nameidx = ndtype.index(desc) - current = ndtype[nameidx] - # The current field is part of the key: take the largest dtype - if name in key: - current[-1] = max(desc[1], current[-1]) - # The current field is not part of the key: add the suffixes - else: - current[0] += r1postfix - desc[0] += r2postfix - ndtype.insert(nameidx + 1, desc) - #... we haven't: just add the description to the current list + # we need to rebuild this list every time + names = list(name for name, dtype in ndtype) + try: + nameidx = names.index(fname) + except ValueError: + #... we haven't: just add the description to the current list + ndtype.append((fname, fdtype)) else: - names.extend(desc[0]) - ndtype.append(desc) - # Revert the elements to tuples - ndtype = [tuple(_) for _ in ndtype] + # collision + _, cdtype = ndtype[nameidx] + if fname in key: + # The current field is part of the key: take the largest dtype + ndtype[nameidx] = (fname, max(fdtype, cdtype)) + else: + # The current field is not part of the key: add the suffixes, + # and place the new field adjacent to the old one + ndtype[nameidx:nameidx + 1] = [ + (fname + r1postfix, cdtype), + (fname + r2postfix, fdtype) + ] + # Rebuild a dtype from the new fields + ndtype = np.dtype(ndtype) # Find the largest nb of common fields : # r1cmn and r2cmn should be equal, but... cmn = max(r1cmn, r2cmn) diff -Nru python-numpy-1.13.3/numpy/lib/shape_base.py python-numpy-1.14.5/numpy/lib/shape_base.py --- python-numpy-1.13.3/numpy/lib/shape_base.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/shape_base.py 2018-06-12 18:28:52.000000000 +0000 @@ -4,7 +4,7 @@ import numpy.core.numeric as _nx from numpy.core.numeric import ( - asarray, zeros, outer, concatenate, isscalar, array, asanyarray + asarray, zeros, outer, concatenate, array, asanyarray ) from numpy.core.fromnumeric import product, reshape, transpose from numpy.core.multiarray import normalize_axis_index @@ -27,14 +27,32 @@ Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a` is a 1-D slice of `arr` along `axis`. + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + f = func1d(arr[ii + s_[:,] + kk]) + Nj = f.shape + for jj in ndindex(Nj): + out[ii + jj + kk] = f[jj] + + Equivalently, eliminating the inner loop, this can be expressed as:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk]) + Parameters ---------- - func1d : function + func1d : function (M,) -> (Nj...) This function should accept 1-D arrays. It is applied to 1-D slices of `arr` along the specified axis. axis : integer Axis along which `arr` is sliced. - arr : ndarray + arr : ndarray (Ni..., M, Nk...) Input array. args : any Additional arguments to `func1d`. @@ -46,11 +64,11 @@ Returns ------- - apply_along_axis : ndarray - The output array. The shape of `outarr` is identical to the shape of + out : ndarray (Ni..., Nj..., Nk...) + The output array. The shape of `out` is identical to the shape of `arr`, except along the `axis` dimension. This axis is removed, and replaced with new dimensions equal to the shape of the return value - of `func1d`. So if `func1d` returns a scalar `outarr` will have one + of `func1d`. So if `func1d` returns a scalar `out` will have one fewer dimensions than `arr`. See Also @@ -85,11 +103,9 @@ array([[[1, 0, 0], [0, 2, 0], [0, 0, 3]], - [[4, 0, 0], [0, 5, 0], [0, 0, 6]], - [[7, 0, 0], [0, 8, 0], [0, 0, 9]]]) @@ -281,7 +297,7 @@ >>> y.shape (1, 2) - >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis] + >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis] >>> y array([[1], [2]]) @@ -332,7 +348,7 @@ See Also -------- - hstack, vstack, concatenate + stack, hstack, vstack, concatenate Examples -------- @@ -356,25 +372,26 @@ """ Stack arrays in sequence depth wise (along third axis). - Takes a sequence of arrays and stack them along the third axis - to make a single array. Rebuilds arrays divided by `dsplit`. - This is a simple way to stack 2D arrays (images) into a single - 3D array for processing. - - This function continues to be supported for backward compatibility, but - you should prefer ``np.concatenate`` or ``np.stack``. The ``np.stack`` - function was added in NumPy 1.10. + This is equivalent to concatenation along the third axis after 2-D arrays + of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape + `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by + `dsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of arrays - Arrays to stack. All of them must have the same shape along all - but the third axis. + The arrays must have the same shape along all but the third axis. + 1-D or 2-D arrays must have the same shape. Returns ------- stacked : ndarray - The array formed by stacking the given arrays. + The array formed by stacking the given arrays, will be at least 3-D. See Also -------- @@ -384,11 +401,6 @@ concatenate : Join a sequence of arrays along an existing axis. dsplit : Split array along third axis. - Notes - ----- - Equivalent to ``np.concatenate(tup, axis=2)`` if `tup` contains arrays that - are at least 3-dimensional. - Examples -------- >>> a = np.array((1,2,3)) @@ -423,7 +435,9 @@ Please refer to the ``split`` documentation. The only difference between these functions is that ``array_split`` allows `indices_or_sections` to be an integer that does *not* equally - divide the axis. + divide the axis. For an array of length l that should be split + into n sections, it returns l % n sub-arrays of size l//n + 1 + and the rest of size l//n. See Also -------- @@ -435,6 +449,10 @@ >>> np.array_split(x, 3) [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])] + >>> x = np.arange(7.0) + >>> np.array_split(x, 3) + [array([ 0., 1., 2.]), array([ 3., 4.]), array([ 5., 6.])] + """ try: Ntotal = ary.shape[axis] diff -Nru python-numpy-1.13.3/numpy/lib/tests/test_arraypad.py python-numpy-1.14.5/numpy/lib/tests/test_arraypad.py --- python-numpy-1.13.3/numpy/lib/tests/test_arraypad.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/tests/test_arraypad.py 2018-06-12 18:28:52.000000000 +0000 @@ -4,12 +4,11 @@ from __future__ import division, absolute_import, print_function import numpy as np -from numpy.testing import (assert_array_equal, assert_raises, assert_allclose, - TestCase) +from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,) from numpy.lib import pad -class TestConditionalShortcuts(TestCase): +class TestConditionalShortcuts(object): def test_zero_padding_shortcuts(self): test = np.arange(120).reshape(4, 5, 6) pad_amt = [(0, 0) for axis in test.shape] @@ -52,7 +51,7 @@ pad(test, pad_amt, mode=mode, stat_length=30)) -class TestStatistic(TestCase): +class TestStatistic(object): def test_check_mean_stat_length(self): a = np.arange(100).astype('f') a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), )) @@ -346,7 +345,7 @@ assert_array_equal(a, b) -class TestConstant(TestCase): +class TestConstant(object): def test_check_constant(self): a = np.arange(100) a = pad(a, (25, 20), 'constant', constant_values=(10, 20)) @@ -491,7 +490,7 @@ assert_allclose(test, expected) -class TestLinearRamp(TestCase): +class TestLinearRamp(object): def test_check_simple(self): a = np.arange(100).astype('f') a = pad(a, (25, 20), 'linear_ramp', end_values=(4, 5)) @@ -531,7 +530,7 @@ assert_allclose(test, expected) -class TestReflect(TestCase): +class TestReflect(object): def test_check_simple(self): a = np.arange(100) a = pad(a, (25, 20), 'reflect') @@ -646,7 +645,7 @@ assert_array_equal(a, b) -class TestSymmetric(TestCase): +class TestSymmetric(object): def test_check_simple(self): a = np.arange(100) a = pad(a, (25, 20), 'symmetric') @@ -780,7 +779,7 @@ assert_array_equal(a, b) -class TestWrap(TestCase): +class TestWrap(object): def test_check_simple(self): a = np.arange(100) a = pad(a, (25, 20), 'wrap') @@ -876,7 +875,7 @@ assert_array_equal(a, b) -class TestStatLen(TestCase): +class TestStatLen(object): def test_check_simple(self): a = np.arange(30) a = np.reshape(a, (6, 5)) @@ -899,7 +898,7 @@ assert_array_equal(a, b) -class TestEdge(TestCase): +class TestEdge(object): def test_check_simple(self): a = np.arange(12) a = np.reshape(a, (4, 3)) @@ -938,7 +937,7 @@ assert_array_equal(padded, expected) -class TestZeroPadWidth(TestCase): +class TestZeroPadWidth(object): def test_zero_pad_width(self): arr = np.arange(30) arr = np.reshape(arr, (6, 5)) @@ -946,7 +945,7 @@ assert_array_equal(arr, pad(arr, pad_width, mode='constant')) -class TestLegacyVectorFunction(TestCase): +class TestLegacyVectorFunction(object): def test_legacy_vector_functionality(self): def _padwithtens(vector, pad_width, iaxis, kwargs): vector[:pad_width[0]] = 10 @@ -968,7 +967,7 @@ assert_array_equal(a, b) -class TestNdarrayPadWidth(TestCase): +class TestNdarrayPadWidth(object): def test_check_simple(self): a = np.arange(12) a = np.reshape(a, (4, 3)) @@ -989,7 +988,7 @@ assert_array_equal(a, b) -class TestUnicodeInput(TestCase): +class TestUnicodeInput(object): def test_unicode_mode(self): constant_mode = u'constant' a = np.pad([1], 2, mode=constant_mode) @@ -997,7 +996,7 @@ assert_array_equal(a, b) -class ValueError1(TestCase): +class TestValueError1(object): def test_check_simple(self): arr = np.arange(30) arr = np.reshape(arr, (6, 5)) @@ -1026,7 +1025,7 @@ mode='reflect') -class ValueError2(TestCase): +class TestValueError2(object): def test_check_negative_pad_amount(self): arr = np.arange(30) arr = np.reshape(arr, (6, 5)) @@ -1035,7 +1034,7 @@ **kwargs) -class ValueError3(TestCase): +class TestValueError3(object): def test_check_kwarg_not_allowed(self): arr = np.arange(30).reshape(5, 6) assert_raises(ValueError, pad, arr, 4, mode='mean', @@ -1063,7 +1062,7 @@ mode='constant') -class TypeError1(TestCase): +class TestTypeError1(object): def test_float(self): arr = np.arange(30) assert_raises(TypeError, pad, arr, ((-2.1, 3), (3, 2))) diff -Nru python-numpy-1.13.3/numpy/lib/tests/test_arraysetops.py python-numpy-1.14.5/numpy/lib/tests/test_arraysetops.py --- python-numpy-1.13.3/numpy/lib/tests/test_arraysetops.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/tests/test_arraysetops.py 2018-06-12 18:28:52.000000000 +0000 @@ -5,14 +5,14 @@ import numpy as np from numpy.testing import ( - run_module_suite, TestCase, assert_array_equal, assert_equal, assert_raises + run_module_suite, assert_array_equal, assert_equal, assert_raises, ) from numpy.lib.arraysetops import ( ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin ) -class TestSetOps(TestCase): +class TestSetOps(object): def test_intersect1d(self): # unique inputs @@ -89,28 +89,28 @@ x = isin(a, b) y = isin_slow(a, b) assert_array_equal(x, y) - + #multidimensional arrays in both arguments a = np.arange(24).reshape([2, 3, 4]) b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]]) assert_isin_equal(a, b) - + #array-likes as both arguments c = [(9, 8), (7, 6)] d = (9, 7) assert_isin_equal(c, d) - + #zero-d array: f = np.array(3) assert_isin_equal(f, b) assert_isin_equal(a, f) assert_isin_equal(f, f) - + #scalar: assert_isin_equal(5, b) assert_isin_equal(a, 6) assert_isin_equal(5, 6) - + #empty array-like: x = [] assert_isin_equal(x, b) @@ -208,6 +208,37 @@ assert_array_equal(in1d(a, long_b, assume_unique=True), ec) assert_array_equal(in1d(a, long_b, assume_unique=False), ec) + def test_in1d_first_array_is_object(self): + ar1 = [None] + ar2 = np.array([1]*10) + expected = np.array([False]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + + def test_in1d_second_array_is_object(self): + ar1 = 1 + ar2 = np.array([None]*10) + expected = np.array([False]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + + def test_in1d_both_arrays_are_object(self): + ar1 = [None] + ar2 = np.array([None]*10) + expected = np.array([True]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + + def test_in1d_both_arrays_have_structured_dtype(self): + # Test arrays of a structured data type containing an integer field + # and a field of dtype `object` allowing for arbitrary Python objects + dt = np.dtype([('field1', int), ('field2', object)]) + ar1 = np.array([(1, None)], dtype=dt) + ar2 = np.array([(1, None)]*10, dtype=dt) + expected = np.array([True]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + def test_union1d(self): a = np.array([5, 4, 7, 1, 2]) b = np.array([2, 4, 3, 3, 2, 1, 5]) @@ -216,6 +247,14 @@ c = union1d(a, b) assert_array_equal(c, ec) + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = np.array([[0, 1, 2], [3, 4, 5]]) + y = np.array([0, 1, 2, 3, 4]) + ez = np.array([0, 1, 2, 3, 4, 5]) + z = union1d(x, y) + assert_array_equal(z, ez) + assert_array_equal([], union1d([], [])) def test_setdiff1d(self): @@ -252,7 +291,7 @@ assert_array_equal(c1, c2) -class TestUnique(TestCase): +class TestUnique(object): def test_unique_1d(self): @@ -355,6 +394,16 @@ a2, a2_inv = np.unique(a, return_inverse=True) assert_array_equal(a2_inv, np.zeros(5)) + # test for ticket #9137 + a = [] + a1_idx = np.unique(a, return_index=True)[1] + a2_inv = np.unique(a, return_inverse=True)[1] + a3_idx, a3_inv = np.unique(a, return_index=True, return_inverse=True)[1:] + assert_equal(a1_idx.dtype, np.intp) + assert_equal(a2_inv.dtype, np.intp) + assert_equal(a3_idx.dtype, np.intp) + assert_equal(a3_inv.dtype, np.intp) + def test_unique_axis_errors(self): assert_raises(TypeError, self._run_axis_tests, object) assert_raises(TypeError, self._run_axis_tests, @@ -404,6 +453,15 @@ assert_array_equal(v.data, v2.data, msg) assert_array_equal(v.mask, v2.mask, msg) + def test_unique_sort_order_with_axis(self): + # These tests fail if sorting along axis is done by treating subarrays + # as unsigned byte strings. See gh-10495. + fmt = "sort order incorrect for integer type '%s'" + for dt in 'bhilq': + a = np.array([[-1],[0]], dt) + b = np.unique(a, axis=0) + assert_array_equal(a, b, fmt % dt) + def _run_axis_tests(self, dtype): data = np.array([[0, 1, 0, 0], [1, 0, 0, 0], diff -Nru python-numpy-1.13.3/numpy/lib/tests/test__datasource.py python-numpy-1.14.5/numpy/lib/tests/test__datasource.py --- python-numpy-1.13.3/numpy/lib/tests/test__datasource.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/tests/test__datasource.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,7 +6,7 @@ from shutil import rmtree from numpy.testing import ( - run_module_suite, TestCase, assert_, SkipTest + run_module_suite, assert_, assert_equal, assert_raises, SkipTest, ) import numpy.lib._datasource as datasource @@ -55,7 +55,7 @@ magic_line = b'three is the magic number' -# Utility functions used by many TestCases +# Utility functions used by many tests def valid_textfile(filedir): # Generate and return a valid temporary file. fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True) @@ -95,12 +95,12 @@ return http_fakefile -class TestDataSourceOpen(TestCase): - def setUp(self): +class TestDataSourceOpen(object): + def setup(self): self.tmpdir = mkdtemp() self.ds = datasource.DataSource(self.tmpdir) - def tearDown(self): + def teardown(self): rmtree(self.tmpdir) del self.ds @@ -111,7 +111,7 @@ def test_InvalidHTTP(self): url = invalid_httpurl() - self.assertRaises(IOError, self.ds.open, url) + assert_raises(IOError, self.ds.open, url) try: self.ds.open(url) except IOError as e: @@ -119,7 +119,7 @@ assert_(e.errno is None) def test_InvalidHTTPCacheURLError(self): - self.assertRaises(URLError, self.ds._cache, invalid_httpurl()) + assert_raises(URLError, self.ds._cache, invalid_httpurl()) def test_ValidFile(self): local_file = valid_textfile(self.tmpdir) @@ -129,7 +129,7 @@ def test_InvalidFile(self): invalid_file = invalid_textfile(self.tmpdir) - self.assertRaises(IOError, self.ds.open, invalid_file) + assert_raises(IOError, self.ds.open, invalid_file) def test_ValidGzipFile(self): try: @@ -145,7 +145,7 @@ fp = self.ds.open(filepath) result = fp.readline() fp.close() - self.assertEqual(magic_line, result) + assert_equal(magic_line, result) def test_ValidBz2File(self): try: @@ -161,15 +161,15 @@ fp = self.ds.open(filepath) result = fp.readline() fp.close() - self.assertEqual(magic_line, result) + assert_equal(magic_line, result) -class TestDataSourceExists(TestCase): - def setUp(self): +class TestDataSourceExists(object): + def setup(self): self.tmpdir = mkdtemp() self.ds = datasource.DataSource(self.tmpdir) - def tearDown(self): + def teardown(self): rmtree(self.tmpdir) del self.ds @@ -177,7 +177,7 @@ assert_(self.ds.exists(valid_httpurl())) def test_InvalidHTTP(self): - self.assertEqual(self.ds.exists(invalid_httpurl()), False) + assert_equal(self.ds.exists(invalid_httpurl()), False) def test_ValidFile(self): # Test valid file in destpath @@ -191,15 +191,15 @@ def test_InvalidFile(self): tmpfile = invalid_textfile(self.tmpdir) - self.assertEqual(self.ds.exists(tmpfile), False) + assert_equal(self.ds.exists(tmpfile), False) -class TestDataSourceAbspath(TestCase): - def setUp(self): +class TestDataSourceAbspath(object): + def setup(self): self.tmpdir = os.path.abspath(mkdtemp()) self.ds = datasource.DataSource(self.tmpdir) - def tearDown(self): + def teardown(self): rmtree(self.tmpdir) del self.ds @@ -207,30 +207,30 @@ scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) local_path = os.path.join(self.tmpdir, netloc, upath.strip(os.sep).strip('/')) - self.assertEqual(local_path, self.ds.abspath(valid_httpurl())) + assert_equal(local_path, self.ds.abspath(valid_httpurl())) def test_ValidFile(self): tmpfile = valid_textfile(self.tmpdir) tmpfilename = os.path.split(tmpfile)[-1] # Test with filename only - self.assertEqual(tmpfile, self.ds.abspath(tmpfilename)) + assert_equal(tmpfile, self.ds.abspath(tmpfilename)) # Test filename with complete path - self.assertEqual(tmpfile, self.ds.abspath(tmpfile)) + assert_equal(tmpfile, self.ds.abspath(tmpfile)) def test_InvalidHTTP(self): scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl()) invalidhttp = os.path.join(self.tmpdir, netloc, upath.strip(os.sep).strip('/')) - self.assertNotEqual(invalidhttp, self.ds.abspath(valid_httpurl())) + assert_(invalidhttp != self.ds.abspath(valid_httpurl())) def test_InvalidFile(self): invalidfile = valid_textfile(self.tmpdir) tmpfile = valid_textfile(self.tmpdir) tmpfilename = os.path.split(tmpfile)[-1] # Test with filename only - self.assertNotEqual(invalidfile, self.ds.abspath(tmpfilename)) + assert_(invalidfile != self.ds.abspath(tmpfilename)) # Test filename with complete path - self.assertNotEqual(invalidfile, self.ds.abspath(tmpfile)) + assert_(invalidfile != self.ds.abspath(tmpfile)) def test_sandboxing(self): tmpfile = valid_textfile(self.tmpdir) @@ -259,12 +259,12 @@ os.sep = orig_os_sep -class TestRepositoryAbspath(TestCase): - def setUp(self): +class TestRepositoryAbspath(object): + def setup(self): self.tmpdir = os.path.abspath(mkdtemp()) self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) - def tearDown(self): + def teardown(self): rmtree(self.tmpdir) del self.repos @@ -273,7 +273,7 @@ local_path = os.path.join(self.repos._destpath, netloc, upath.strip(os.sep).strip('/')) filepath = self.repos.abspath(valid_httpfile()) - self.assertEqual(local_path, filepath) + assert_equal(local_path, filepath) def test_sandboxing(self): tmp_path = lambda x: os.path.abspath(self.repos.abspath(x)) @@ -292,12 +292,12 @@ os.sep = orig_os_sep -class TestRepositoryExists(TestCase): - def setUp(self): +class TestRepositoryExists(object): + def setup(self): self.tmpdir = mkdtemp() self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) - def tearDown(self): + def teardown(self): rmtree(self.tmpdir) del self.repos @@ -308,7 +308,7 @@ def test_InvalidFile(self): tmpfile = invalid_textfile(self.tmpdir) - self.assertEqual(self.repos.exists(tmpfile), False) + assert_equal(self.repos.exists(tmpfile), False) def test_RemoveHTTPFile(self): assert_(self.repos.exists(valid_httpurl())) @@ -325,11 +325,11 @@ assert_(self.repos.exists(tmpfile)) -class TestOpenFunc(TestCase): - def setUp(self): +class TestOpenFunc(object): + def setup(self): self.tmpdir = mkdtemp() - def tearDown(self): + def teardown(self): rmtree(self.tmpdir) def test_DataSourceOpen(self): diff -Nru python-numpy-1.13.3/numpy/lib/tests/test_financial.py python-numpy-1.14.5/numpy/lib/tests/test_financial.py --- python-numpy-1.13.3/numpy/lib/tests/test_financial.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/tests/test_financial.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,16 +1,23 @@ from __future__ import division, absolute_import, print_function +from decimal import Decimal + import numpy as np from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_almost_equal, - assert_allclose, assert_equal - ) + run_module_suite, assert_, assert_almost_equal, assert_allclose, + assert_equal, assert_raises +) -class TestFinancial(TestCase): +class TestFinancial(object): def test_rate(self): - assert_almost_equal(np.rate(10, 0, -3500, 10000), - 0.1107, 4) + assert_almost_equal( + np.rate(10, 0, -3500, 10000), + 0.1107, 4) + + def test_rate_decimal(self): + rate = np.rate(Decimal('10'), Decimal('0'), Decimal('-3500'), Decimal('10000')) + assert_equal(Decimal('0.1106908537142689284704528100'), rate) def test_irr(self): v = [-150000, 15000, 25000, 35000, 45000, 60000] @@ -34,28 +41,84 @@ def test_pv(self): assert_almost_equal(np.pv(0.07, 20, 12000, 0), -127128.17, 2) + def test_pv_decimal(self): + assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')), + Decimal('-127128.1709461939327295222005')) + def test_fv(self): - assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.36, 2) + assert_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.362673042924) + + def test_fv_decimal(self): + assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), 0, 0), + Decimal('86609.36267304300040536731624')) def test_pmt(self): - res = np.pmt(0.08/12, 5*12, 15000) + res = np.pmt(0.08 / 12, 5 * 12, 15000) tgt = -304.145914 assert_allclose(res, tgt) # Test the edge case where rate == 0.0 - res = np.pmt(0.0, 5*12, 15000) + res = np.pmt(0.0, 5 * 12, 15000) tgt = -250.0 assert_allclose(res, tgt) # Test the case where we use broadcast and # the arguments passed in are arrays. - res = np.pmt([[0.0, 0.8],[0.3, 0.8]],[12, 3],[2000, 20000]) - tgt = np.array([[-166.66667, -19311.258],[-626.90814, -19311.258]]) + res = np.pmt([[0.0, 0.8], [0.3, 0.8]], [12, 3], [2000, 20000]) + tgt = np.array([[-166.66667, -19311.258], [-626.90814, -19311.258]]) assert_allclose(res, tgt) + def test_pmt_decimal(self): + res = np.pmt(Decimal('0.08') / Decimal('12'), 5 * 12, 15000) + tgt = Decimal('-304.1459143262052370338701494') + assert_equal(res, tgt) + # Test the edge case where rate == 0.0 + res = np.pmt(Decimal('0'), Decimal('60'), Decimal('15000')) + tgt = -250 + assert_equal(res, tgt) + # Test the case where we use broadcast and + # the arguments passed in are arrays. + res = np.pmt([[Decimal('0'), Decimal('0.8')], [Decimal('0.3'), Decimal('0.8')]], + [Decimal('12'), Decimal('3')], [Decimal('2000'), Decimal('20000')]) + tgt = np.array([[Decimal('-166.6666666666666666666666667'), Decimal('-19311.25827814569536423841060')], + [Decimal('-626.9081401700757748402586600'), Decimal('-19311.25827814569536423841060')]]) + + # Cannot use the `assert_allclose` because it uses isfinite under the covers + # which does not support the Decimal type + # See issue: https://github.com/numpy/numpy/issues/9954 + assert_equal(res[0][0], tgt[0][0]) + assert_equal(res[0][1], tgt[0][1]) + assert_equal(res[1][0], tgt[1][0]) + assert_equal(res[1][1], tgt[1][1]) + def test_ppmt(self): - np.round(np.ppmt(0.1/12, 1, 60, 55000), 2) == 710.25 + assert_equal(np.round(np.ppmt(0.1 / 12, 1, 60, 55000), 2), -710.25) + + def test_ppmt_decimal(self): + assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000')), + Decimal('-710.2541257864217612489830917')) + + # Two tests showing how Decimal is actually getting at a more exact result + # .23 / 12 does not come out nicely as a float but does as a decimal + def test_ppmt_special_rate(self): + assert_equal(np.round(np.ppmt(0.23 / 12, 1, 60, 10000000000), 8), -90238044.232277036) + + def test_ppmt_special_rate_decimal(self): + # When rounded out to 8 decimal places like the float based test, this should not equal the same value + # as the float, substituted for the decimal + def raise_error_because_not_equal(): + assert_equal( + round(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')), 8), + Decimal('-90238044.232277036')) + + assert_raises(AssertionError, raise_error_because_not_equal) + assert_equal(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')), + Decimal('-90238044.2322778884413969909')) def test_ipmt(self): - np.round(np.ipmt(0.1/12, 1, 24, 2000), 2) == 16.67 + assert_almost_equal(np.round(np.ipmt(0.1 / 12, 1, 24, 2000), 2), -16.67) + + def test_ipmt_decimal(self): + result = np.ipmt(Decimal('0.1') / Decimal('12'), 1, 24, 2000) + assert_equal(result.flat[0], Decimal('-16.66666666666666666666666667')) def test_nper(self): assert_almost_equal(np.nper(0.075, -2000, 0, 100000.), @@ -70,6 +133,11 @@ np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]), 122.89, 2) + def test_npv_decimal(self): + assert_equal( + np.npv(Decimal('0.05'), [-15000, 1500, 2500, 3500, 4500, 6000]), + Decimal('122.894854950942692161628715')) + def test_mirr(self): val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000] assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4) @@ -83,86 +151,195 @@ val = [39000, 30000, 21000, 37000, 46000] assert_(np.isnan(np.mirr(val, 0.10, 0.12))) + def test_mirr_decimal(self): + val = [Decimal('-4500'), Decimal('-800'), Decimal('800'), Decimal('800'), + Decimal('600'), Decimal('600'), Decimal('800'), Decimal('800'), + Decimal('700'), Decimal('3000')] + assert_equal(np.mirr(val, Decimal('0.08'), Decimal('0.055')), + Decimal('0.066597175031553548874239618')) + + val = [Decimal('-120000'), Decimal('39000'), Decimal('30000'), + Decimal('21000'), Decimal('37000'), Decimal('46000')] + assert_equal(np.mirr(val, Decimal('0.10'), Decimal('0.12')), Decimal('0.126094130365905145828421880')) + + val = [Decimal('100'), Decimal('200'), Decimal('-50'), + Decimal('300'), Decimal('-200')] + assert_equal(np.mirr(val, Decimal('0.05'), Decimal('0.06')), Decimal('0.342823387842176663647819868')) + + val = [Decimal('39000'), Decimal('30000'), Decimal('21000'), Decimal('37000'), Decimal('46000')] + assert_(np.isnan(np.mirr(val, Decimal('0.10'), Decimal('0.12')))) + def test_when(self): - #begin - assert_almost_equal(np.rate(10, 20, -3500, 10000, 1), - np.rate(10, 20, -3500, 10000, 'begin'), 4) - #end - assert_almost_equal(np.rate(10, 20, -3500, 10000), - np.rate(10, 20, -3500, 10000, 'end'), 4) - assert_almost_equal(np.rate(10, 20, -3500, 10000, 0), - np.rate(10, 20, -3500, 10000, 'end'), 4) + # begin + assert_equal(np.rate(10, 20, -3500, 10000, 1), + np.rate(10, 20, -3500, 10000, 'begin')) + # end + assert_equal(np.rate(10, 20, -3500, 10000), + np.rate(10, 20, -3500, 10000, 'end')) + assert_equal(np.rate(10, 20, -3500, 10000, 0), + np.rate(10, 20, -3500, 10000, 'end')) # begin - assert_almost_equal(np.pv(0.07, 20, 12000, 0, 1), - np.pv(0.07, 20, 12000, 0, 'begin'), 2) + assert_equal(np.pv(0.07, 20, 12000, 0, 1), + np.pv(0.07, 20, 12000, 0, 'begin')) # end - assert_almost_equal(np.pv(0.07, 20, 12000, 0), - np.pv(0.07, 20, 12000, 0, 'end'), 2) - assert_almost_equal(np.pv(0.07, 20, 12000, 0, 0), - np.pv(0.07, 20, 12000, 0, 'end'), 2) + assert_equal(np.pv(0.07, 20, 12000, 0), + np.pv(0.07, 20, 12000, 0, 'end')) + assert_equal(np.pv(0.07, 20, 12000, 0, 0), + np.pv(0.07, 20, 12000, 0, 'end')) # begin - assert_almost_equal(np.fv(0.075, 20, -2000, 0, 1), - np.fv(0.075, 20, -2000, 0, 'begin'), 4) + assert_equal(np.fv(0.075, 20, -2000, 0, 1), + np.fv(0.075, 20, -2000, 0, 'begin')) # end - assert_almost_equal(np.fv(0.075, 20, -2000, 0), - np.fv(0.075, 20, -2000, 0, 'end'), 4) - assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0), - np.fv(0.075, 20, -2000, 0, 'end'), 4) + assert_equal(np.fv(0.075, 20, -2000, 0), + np.fv(0.075, 20, -2000, 0, 'end')) + assert_equal(np.fv(0.075, 20, -2000, 0, 0), + np.fv(0.075, 20, -2000, 0, 'end')) # begin - assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 1), - np.pmt(0.08/12, 5*12, 15000., 0, 'begin'), 4) + assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 1), + np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'begin')) # end - assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0), - np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4) - assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 0), - np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4) + assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0), + np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end')) + assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 0), + np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end')) # begin - assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 1), - np.ppmt(0.1/12, 1, 60, 55000, 0, 'begin'), 4) + assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 1), + np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'begin')) # end - assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0), - np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4) - assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 0), - np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4) + assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0), + np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end')) + assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 0), + np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end')) # begin - assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 1), - np.ipmt(0.1/12, 1, 24, 2000, 0, 'begin'), 4) + assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 1), + np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'begin')) # end - assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0), - np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4) - assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 0), - np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4) + assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0), + np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end')) + assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 0), + np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end')) # begin - assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 1), - np.nper(0.075, -2000, 0, 100000., 'begin'), 4) + assert_equal(np.nper(0.075, -2000, 0, 100000., 1), + np.nper(0.075, -2000, 0, 100000., 'begin')) # end - assert_almost_equal(np.nper(0.075, -2000, 0, 100000.), - np.nper(0.075, -2000, 0, 100000., 'end'), 4) - assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 0), - np.nper(0.075, -2000, 0, 100000., 'end'), 4) + assert_equal(np.nper(0.075, -2000, 0, 100000.), + np.nper(0.075, -2000, 0, 100000., 'end')) + assert_equal(np.nper(0.075, -2000, 0, 100000., 0), + np.nper(0.075, -2000, 0, 100000., 'end')) + + def test_decimal_with_when(self): + """Test that decimals are still supported if the when argument is passed""" + # begin + assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('1')), + np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'begin')) + # end + assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000')), + np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end')) + assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('0')), + np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end')) + + # begin + assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('1')), + np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'begin')) + # end + assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')), + np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end')) + assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('0')), + np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end')) + + # begin + assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('1')), + np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'begin')) + # end + assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0')), + np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end')) + assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('0')), + np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end')) + + # begin + assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), + Decimal('0'), Decimal('1')), + np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), + Decimal('0'), 'begin')) + # end + assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), + Decimal('0')), + np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), + Decimal('0'), 'end')) + assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), + Decimal('0'), Decimal('0')), + np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'), + Decimal('0'), 'end')) + + # begin + assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), + Decimal('0'), Decimal('1')), + np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), + Decimal('0'), 'begin')) + # end + assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), + Decimal('0')), + np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), + Decimal('0'), 'end')) + assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), + Decimal('0'), Decimal('0')), + np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'), + Decimal('0'), 'end')) + + # begin + assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), + Decimal('0'), Decimal('1')).flat[0], + np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), + Decimal('0'), 'begin').flat[0]) + # end + assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), + Decimal('0')).flat[0], + np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), + Decimal('0'), 'end').flat[0]) + assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), + Decimal('0'), Decimal('0')).flat[0], + np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'), + Decimal('0'), 'end').flat[0]) def test_broadcast(self): assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]), [21.5449442, 20.76156441], 4) - assert_almost_equal(np.ipmt(0.1/12, list(range(5)), 24, 2000), + assert_almost_equal(np.ipmt(0.1 / 12, list(range(5)), 24, 2000), [-17.29165168, -16.66666667, -16.03647345, - -15.40102862, -14.76028842], 4) + -15.40102862, -14.76028842], 4) - assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000), + assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000), [-74.998201, -75.62318601, -76.25337923, - -76.88882405, -77.52956425], 4) + -76.88882405, -77.52956425], 4) - assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000, 0, + assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000, 0, [0, 0, 1, 'end', 'begin']), [-74.998201, -75.62318601, -75.62318601, - -76.88882405, -76.88882405], 4) + -76.88882405, -76.88882405], 4) + + def test_broadcast_decimal(self): + # Use almost equal because precision is tested in the explicit tests, this test is to ensure + # broadcast with Decimal is not broken. + assert_almost_equal(np.ipmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')), + [Decimal('-17.29165168'), Decimal('-16.66666667'), Decimal('-16.03647345'), + Decimal('-15.40102862'), Decimal('-14.76028842')], 4) + + assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')), + [Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-76.25337923'), + Decimal('-76.88882405'), Decimal('-77.52956425')], 4) + + assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000'), + Decimal('0'), [Decimal('0'), Decimal('0'), Decimal('1'), 'end', 'begin']), + [Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-75.62318601'), + Decimal('-76.88882405'), Decimal('-76.88882405')], 4) + if __name__ == "__main__": run_module_suite() diff -Nru python-numpy-1.13.3/numpy/lib/tests/test_format.py python-numpy-1.14.5/numpy/lib/tests/test_format.py --- python-numpy-1.13.3/numpy/lib/tests/test_format.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/tests/test_format.py 2018-06-12 18:28:52.000000000 +0000 @@ -615,6 +615,11 @@ format.write_array(f, d) assert_(w[0].category is UserWarning) + # check alignment of data portion + f.seek(0) + header = f.readline() + assert_(len(header) % format.ARRAY_ALIGN == 0) + f.seek(0) n = format.read_array(f) assert_array_equal(d, n) @@ -758,6 +763,7 @@ s.seek(format.MAGIC_LEN) shape, fortran, dtype = format.read_array_header_1_0(s) + assert_(s.tell() % format.ARRAY_ALIGN == 0) assert_((shape, fortran, dtype) == ((3, 6), False, float)) @@ -770,6 +776,7 @@ s.seek(format.MAGIC_LEN) shape, fortran, dtype = format.read_array_header_2_0(s) + assert_(s.tell() % format.ARRAY_ALIGN == 0) assert_((shape, fortran, dtype) == ((3, 6), False, float)) @@ -811,7 +818,7 @@ # avoid actually writing 5GB import subprocess as sp sp.check_call(["truncate", "-s", "5368709120", tf_name]) - except: + except Exception: raise SkipTest("Could not create 5GB large file") # write a small array to the end with open(tf_name, "wb") as f: diff -Nru python-numpy-1.13.3/numpy/lib/tests/test_function_base.py python-numpy-1.14.5/numpy/lib/tests/test_function_base.py --- python-numpy-1.13.3/numpy/lib/tests/test_function_base.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/tests/test_function_base.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,13 +6,13 @@ import decimal import numpy as np +from numpy import ma from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, + run_module_suite, assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, - assert_allclose, assert_array_max_ulp, assert_warns, - assert_raises_regex, dec, suppress_warnings + assert_allclose, assert_array_max_ulp, assert_warns, assert_raises_regex, + dec, suppress_warnings, HAS_REFCOUNT, ) -from numpy.testing.utils import HAS_REFCOUNT import numpy.lib.function_base as nfb from numpy.random import rand from numpy.lib import ( @@ -32,9 +32,9 @@ return data -class TestRot90(TestCase): +class TestRot90(object): def test_basic(self): - self.assertRaises(ValueError, rot90, np.ones(4)) + assert_raises(ValueError, rot90, np.ones(4)) assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(0,1,2)) assert_raises(ValueError, rot90, np.ones((2,2)), axes=(0,2)) assert_raises(ValueError, rot90, np.ones((2,2)), axes=(1,1)) @@ -100,12 +100,12 @@ rot90(a_rot90_20, k=k-1, axes=(2, 0))) -class TestFlip(TestCase): +class TestFlip(object): def test_axes(self): - self.assertRaises(ValueError, np.flip, np.ones(4), axis=1) - self.assertRaises(ValueError, np.flip, np.ones((4, 4)), axis=2) - self.assertRaises(ValueError, np.flip, np.ones((4, 4)), axis=-3) + assert_raises(ValueError, np.flip, np.ones(4), axis=1) + assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=2) + assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=-3) def test_basic_lr(self): a = get_mat(4) @@ -173,7 +173,7 @@ np.flipud(a.swapaxes(0, i)).swapaxes(i, 0)) -class TestAny(TestCase): +class TestAny(object): def test_basic(self): y1 = [0, 0, 1, 0] @@ -190,7 +190,7 @@ assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1]) -class TestAll(TestCase): +class TestAll(object): def test_basic(self): y1 = [0, 1, 1, 0] @@ -208,7 +208,7 @@ assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1]) -class TestCopy(TestCase): +class TestCopy(object): def test_basic(self): a = np.array([[1, 2], [3, 4]]) @@ -236,7 +236,7 @@ assert_(a_fort_copy.flags.f_contiguous) -class TestAverage(TestCase): +class TestAverage(object): def test_basic(self): y1 = np.array([1, 2, 3]) @@ -346,9 +346,9 @@ a = np.array([decimal.Decimal(x) for x in range(10)]) w = np.array([decimal.Decimal(1) for _ in range(10)]) w /= w.sum() - assert_almost_equal(a.mean(0), average(a, weights=w)) + assert_almost_equal(a.mean(0), average(a, weights=w)) -class TestSelect(TestCase): +class TestSelect(object): choices = [np.array([1, 2, 3]), np.array([4, 5, 6]), np.array([7, 8, 9])] @@ -420,7 +420,7 @@ select(conditions, choices) -class TestInsert(TestCase): +class TestInsert(object): def test_basic(self): a = [1, 2, 3] @@ -521,7 +521,7 @@ assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype)) -class TestAmax(TestCase): +class TestAmax(object): def test_basic(self): a = [3, 4, 5, 10, -3, -5, 6.0] @@ -533,7 +533,7 @@ assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0]) -class TestAmin(TestCase): +class TestAmin(object): def test_basic(self): a = [3, 4, 5, 10, -3, -5, 6.0] @@ -545,7 +545,7 @@ assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0]) -class TestPtp(TestCase): +class TestPtp(object): def test_basic(self): a = np.array([3, 4, 5, 10, -3, -5, 6.0]) @@ -557,7 +557,7 @@ assert_equal(b.ptp(axis=-1), [6.0, 6.0, 6.0]) -class TestCumsum(TestCase): +class TestCumsum(object): def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] @@ -580,7 +580,7 @@ assert_array_equal(np.cumsum(a2, axis=1), tgt) -class TestProd(TestCase): +class TestProd(object): def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] @@ -590,8 +590,8 @@ a = np.array(ba, ctype) a2 = np.array(ba2, ctype) if ctype in ['1', 'b']: - self.assertRaises(ArithmeticError, np.prod, a) - self.assertRaises(ArithmeticError, np.prod, a2, 1) + assert_raises(ArithmeticError, np.prod, a) + assert_raises(ArithmeticError, np.prod, a2, 1) else: assert_equal(a.prod(axis=0), 26400) assert_array_equal(a2.prod(axis=0), @@ -600,7 +600,7 @@ np.array([24, 1890, 600], ctype)) -class TestCumprod(TestCase): +class TestCumprod(object): def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] @@ -610,9 +610,9 @@ a = np.array(ba, ctype) a2 = np.array(ba2, ctype) if ctype in ['1', 'b']: - self.assertRaises(ArithmeticError, np.cumprod, a) - self.assertRaises(ArithmeticError, np.cumprod, a2, 1) - self.assertRaises(ArithmeticError, np.cumprod, a) + assert_raises(ArithmeticError, np.cumprod, a) + assert_raises(ArithmeticError, np.cumprod, a2, 1) + assert_raises(ArithmeticError, np.cumprod, a) else: assert_array_equal(np.cumprod(a, axis=-1), np.array([1, 2, 20, 220, @@ -627,7 +627,7 @@ [10, 30, 120, 600]], ctype)) -class TestDiff(TestCase): +class TestDiff(object): def test_basic(self): x = [1, 4, 6, 7, 12] @@ -638,6 +638,29 @@ assert_array_equal(diff(x, n=2), out2) assert_array_equal(diff(x, n=3), out3) + x = [1.1, 2.2, 3.0, -0.2, -0.1] + out = np.array([1.1, 0.8, -3.2, 0.1]) + assert_almost_equal(diff(x), out) + + x = [True, True, False, False] + out = np.array([False, True, False]) + out2 = np.array([True, True]) + assert_array_equal(diff(x), out) + assert_array_equal(diff(x, n=2), out2) + + def test_axis(self): + x = np.zeros((10, 20, 30)) + x[:, 1::2, :] = 1 + exp = np.ones((10, 19, 30)) + exp[:, 1::2, :] = -1 + assert_array_equal(diff(x), np.zeros((10, 20, 29))) + assert_array_equal(diff(x, axis=-1), np.zeros((10, 20, 29))) + assert_array_equal(diff(x, axis=0), np.zeros((9, 20, 30))) + assert_array_equal(diff(x, axis=1), exp) + assert_array_equal(diff(x, axis=-2), exp) + assert_raises(np.AxisError, diff, x, axis=3) + assert_raises(np.AxisError, diff, x, axis=-4) + def test_nd(self): x = 20 * rand(10, 20, 30) out1 = x[:, :, 1:] - x[:, :, :-1] @@ -649,10 +672,49 @@ assert_array_equal(diff(x, axis=0), out3) assert_array_equal(diff(x, n=2, axis=0), out4) + def test_n(self): + x = list(range(3)) + assert_raises(ValueError, diff, x, n=-1) + output = [diff(x, n=n) for n in range(1, 5)] + expected = [[1, 1], [0], [], []] + assert_(diff(x, n=0) is x) + for n, (expected, out) in enumerate(zip(expected, output), start=1): + assert_(type(out) is np.ndarray) + assert_array_equal(out, expected) + assert_equal(out.dtype, np.int_) + assert_equal(len(out), max(0, len(x) - n)) + + def test_times(self): + x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) + expected = [ + np.array([1, 1], dtype='timedelta64[D]'), + np.array([0], dtype='timedelta64[D]'), + ] + expected.extend([np.array([], dtype='timedelta64[D]')] * 3) + for n, exp in enumerate(expected, start=1): + out = diff(x, n=n) + assert_array_equal(out, exp) + assert_equal(out.dtype, exp.dtype) + + def test_subclass(self): + x = ma.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], + mask=[[False, False], [True, False], + [False, True], [True, True], [False, False]]) + out = diff(x) + assert_array_equal(out.data, [[1], [1], [1], [1], [1]]) + assert_array_equal(out.mask, [[False], [True], + [True], [True], [False]]) + assert_(type(out) is type(x)) + + out3 = diff(x, n=3) + assert_array_equal(out3.data, [[], [], [], [], []]) + assert_array_equal(out3.mask, [[], [], [], [], []]) + assert_(type(out3) is type(x)) + -class TestDelete(TestCase): +class TestDelete(object): - def setUp(self): + def setup(self): self.a = np.arange(5) self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2) @@ -725,7 +787,7 @@ assert_equal(m.flags.f_contiguous, k.flags.f_contiguous) -class TestGradient(TestCase): +class TestGradient(object): def test_basic(self): v = [[1, 1], [3, 4]] @@ -735,7 +797,7 @@ assert_array_equal(gradient(x), dx) assert_array_equal(gradient(v), dx) - def test_args(self): + def test_args(self): dx = np.cumsum(np.ones(5)) dx_uneven = [1., 2., 5., 9., 11.] f_2d = np.arange(25).reshape(5, 5) @@ -825,15 +887,15 @@ def test_spacing(self): f = np.array([0, 2., 3., 4., 5., 5.]) - f = np.tile(f, (6,1)) + f.reshape(-1, 1) + f = np.tile(f, (6,1)) + f.reshape(-1, 1) x_uneven = np.array([0., 0.5, 1., 3., 5., 7.]) x_even = np.arange(6.) - + fdx_even_ord1 = np.tile([2., 1.5, 1., 1., 0.5, 0.], (6,1)) fdx_even_ord2 = np.tile([2.5, 1.5, 1., 1., 0.5, -0.5], (6,1)) fdx_uneven_ord1 = np.tile([4., 3., 1.7, 0.5, 0.25, 0.], (6,1)) fdx_uneven_ord2 = np.tile([5., 3., 1.7, 0.5, 0.25, -0.25], (6,1)) - + # evenly spaced for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]: res1 = gradient(f, 1., axis=(0,1), edge_order=edge_order) @@ -843,19 +905,19 @@ axis=None, edge_order=edge_order) assert_array_equal(res1, res2) assert_array_equal(res2, res3) - assert_almost_equal(res1[0], exp_res.T) - assert_almost_equal(res1[1], exp_res) - + assert_almost_equal(res1[0], exp_res.T) + assert_almost_equal(res1[1], exp_res) + res1 = gradient(f, 1., axis=0, edge_order=edge_order) res2 = gradient(f, x_even, axis=0, edge_order=edge_order) assert_(res1.shape == res2.shape) assert_almost_equal(res2, exp_res.T) - + res1 = gradient(f, 1., axis=1, edge_order=edge_order) res2 = gradient(f, x_even, axis=1, edge_order=edge_order) assert_(res1.shape == res2.shape) assert_array_equal(res2, exp_res) - + # unevenly spaced for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]: res1 = gradient(f, x_uneven, x_uneven, @@ -865,13 +927,13 @@ assert_array_equal(res1, res2) assert_almost_equal(res1[0], exp_res.T) assert_almost_equal(res1[1], exp_res) - + res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order) assert_almost_equal(res1, exp_res.T) - + res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order) assert_almost_equal(res1, exp_res) - + # mixed res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=1) res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=1) @@ -879,14 +941,14 @@ assert_array_equal(res1[1], res2[0]) assert_almost_equal(res1[0], fdx_even_ord1.T) assert_almost_equal(res1[1], fdx_uneven_ord1) - + res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=2) res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=2) assert_array_equal(res1[0], res2[1]) assert_array_equal(res1[1], res2[0]) assert_almost_equal(res1[0], fdx_even_ord2.T) assert_almost_equal(res1[1], fdx_uneven_ord2) - + def test_specific_axes(self): # Testing that gradient can work on a given axis only v = [[1, 1], [3, 4]] @@ -912,7 +974,7 @@ assert_raises(np.AxisError, gradient, x, axis=3) assert_raises(np.AxisError, gradient, x, axis=-3) # assert_raises(TypeError, gradient, x, axis=[1,]) - + def test_timedelta64(self): # Make sure gradient() can handle special types like timedelta64 x = np.array( @@ -924,20 +986,26 @@ assert_array_equal(gradient(x), dx) assert_(dx.dtype == np.dtype('timedelta64[D]')) + def test_inexact_dtypes(self): + for dt in [np.float16, np.float32, np.float64]: + # dtypes should not be promoted in a different way to what diff does + x = np.array([1, 2, 3], dtype=dt) + assert_equal(gradient(x).dtype, np.diff(x).dtype) + def test_values(self): # needs at least 2 points for edge_order ==1 gradient(np.arange(2), edge_order=1) # needs at least 3 points for edge_order ==1 gradient(np.arange(3), edge_order=2) - + assert_raises(ValueError, gradient, np.arange(0), edge_order=1) assert_raises(ValueError, gradient, np.arange(0), edge_order=2) assert_raises(ValueError, gradient, np.arange(1), edge_order=1) assert_raises(ValueError, gradient, np.arange(1), edge_order=2) - assert_raises(ValueError, gradient, np.arange(2), edge_order=2) + assert_raises(ValueError, gradient, np.arange(2), edge_order=2) -class TestAngle(TestCase): +class TestAngle(object): def test_basic(self): x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2, @@ -953,7 +1021,7 @@ assert_array_almost_equal(z, zo, 11) -class TestTrimZeros(TestCase): +class TestTrimZeros(object): """ Only testing for integer splits. @@ -976,7 +1044,7 @@ assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4])) -class TestExtins(TestCase): +class TestExtins(object): def test_basic(self): a = np.array([1, 3, 2, 1, 2, 3, 3]) @@ -1015,7 +1083,7 @@ assert_array_equal(a, ac) -class TestVectorize(TestCase): +class TestVectorize(object): def test_simple(self): def addsubtract(a, b): @@ -1074,7 +1142,7 @@ import random try: vectorize(random.randrange) # Should succeed - except: + except Exception: raise AssertionError() def test_keywords2_ticket_2100(self): @@ -1347,7 +1415,7 @@ f(x) -class TestDigitize(TestCase): +class TestDigitize(object): def test_forward(self): x = np.arange(-6, 5) @@ -1420,7 +1488,7 @@ assert_(not isinstance(digitize(b, a, True), A)) -class TestUnwrap(TestCase): +class TestUnwrap(object): def test_simple(self): # check that unwrap removes jumps greather that 2*pi @@ -1429,7 +1497,7 @@ assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) -class TestFilterwindows(TestCase): +class TestFilterwindows(object): def test_hanning(self): # check symmetry @@ -1460,7 +1528,7 @@ assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) -class TestTrapz(TestCase): +class TestTrapz(object): def test_simple(self): x = np.arange(-10, 10, .1) @@ -1532,7 +1600,7 @@ assert_almost_equal(mr, r) -class TestSinc(TestCase): +class TestSinc(object): def test_simple(self): assert_(sinc(0) == 1) @@ -1549,12 +1617,12 @@ assert_array_equal(y1, y3) -class TestHistogram(TestCase): +class TestHistogram(object): - def setUp(self): + def setup(self): pass - def tearDown(self): + def teardown(self): pass def test_simple(self): @@ -1650,16 +1718,16 @@ # Check the type of the returned histogram a = np.arange(10) + .5 h, b = histogram(a) - assert_(np.issubdtype(h.dtype, int)) + assert_(np.issubdtype(h.dtype, np.integer)) h, b = histogram(a, normed=True) - assert_(np.issubdtype(h.dtype, float)) + assert_(np.issubdtype(h.dtype, np.floating)) h, b = histogram(a, weights=np.ones(10, int)) - assert_(np.issubdtype(h.dtype, int)) + assert_(np.issubdtype(h.dtype, np.integer)) h, b = histogram(a, weights=np.ones(10, float)) - assert_(np.issubdtype(h.dtype, float)) + assert_(np.issubdtype(h.dtype, np.floating)) def test_f32_rounding(self): # gh-4799, check that the rounding of the edges works with float32 @@ -1760,13 +1828,13 @@ left_edges = edges[:-1][mask] right_edges = edges[1:][mask] for x, left, right in zip(arr, left_edges, right_edges): - self.assertGreaterEqual(x, left) - self.assertLess(x, right) + assert_(x >= left) + assert_(x < right) def test_last_bin_inclusive_range(self): arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5)) - self.assertEqual(hist[-1], 1) + assert_equal(hist[-1], 1) def test_unsigned_monotonicity_check(self): # Ensures ValueError is raised if bins not increasing monotonically @@ -1777,7 +1845,7 @@ hist, edges = np.histogram(arr, bins=bins) -class TestHistogramOptimBinNums(TestCase): +class TestHistogramOptimBinNums(object): """ Provide test coverage when using provided estimators for optimal number of bins @@ -1887,7 +1955,7 @@ completely ignored. All test values have been precomputed and the shouldn't change. """ - # some basic sanity checking, with some fixed data. + # some basic sanity checking, with some fixed data. # Checking for the correct number of bins basic_test = { 50: {'fd': 8, 'scott': 8, 'rice': 15, @@ -1899,7 +1967,7 @@ } for testlen, expectedResults in basic_test.items(): - # create some sort of non uniform data to test with + # create some sort of non uniform data to test with # (3 peak uniform mixture) x1 = np.linspace(-10, -1, testlen // 5 * 2) x2 = np.linspace(1, 10, testlen // 5 * 3) @@ -1917,11 +1985,11 @@ """ estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] for estimator in estimator_list: - assert_raises(TypeError, histogram, [1, 2, 3], + assert_raises(TypeError, histogram, [1, 2, 3], estimator, weights=[1, 2, 3]) -class TestHistogramdd(TestCase): +class TestHistogramdd(object): def test_simple(self): x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], @@ -2061,7 +2129,7 @@ range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]]) -class TestUnique(TestCase): +class TestUnique(object): def test_simple(self): x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0]) @@ -2073,7 +2141,7 @@ assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) -class TestCheckFinite(TestCase): +class TestCheckFinite(object): def test_simple(self): a = [1, 2, 3] @@ -2090,7 +2158,7 @@ assert_(a.dtype == np.float64) -class TestCorrCoef(TestCase): +class TestCorrCoef(object): A = np.array( [[0.15391142, 0.18045767, 0.14197213], [0.70461506, 0.96474128, 0.27906989], @@ -2175,7 +2243,7 @@ assert_(np.all(np.abs(c) <= 1.0)) -class TestCov(TestCase): +class TestCov(object): x1 = np.array([[0, 2], [1, 1], [2, 0]]).T res1 = np.array([[1., -1.], [-1., 1.]]) x2 = np.array([0.0, 1.0, 2.0], ndmin=2) @@ -2273,7 +2341,7 @@ self.res1) -class Test_I0(TestCase): +class Test_I0(object): def test_simple(self): assert_almost_equal( @@ -2299,7 +2367,7 @@ [1.05884290, 1.06432317]])) -class TestKaiser(TestCase): +class TestKaiser(object): def test_simple(self): assert_(np.isfinite(kaiser(1, 1.0))) @@ -2318,7 +2386,7 @@ kaiser(3, 4) -class TestMsort(TestCase): +class TestMsort(object): def test_simple(self): A = np.array([[0.44567325, 0.79115165, 0.54900530], @@ -2331,7 +2399,7 @@ [0.64864341, 0.79115165, 0.96098397]])) -class TestMeshgrid(TestCase): +class TestMeshgrid(object): def test_simple(self): [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) @@ -2420,7 +2488,7 @@ assert_equal(x[1, :], X) -class TestPiecewise(TestCase): +class TestPiecewise(object): def test_simple(self): # Condition is single bool list @@ -2446,6 +2514,11 @@ x = piecewise([0, 0], [[False, True]], [lambda x:-1]) assert_array_equal(x, [0, -1]) + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], []) + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], [1, 2, 3]) + def test_two_conditions(self): x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) assert_array_equal(x, [3, 4]) @@ -2470,7 +2543,7 @@ assert_(y == 0) x = 5 - y = piecewise(x, [[True], [False]], [1, 0]) + y = piecewise(x, [True, False], [1, 0]) assert_(y.ndim == 0) assert_(y == 1) @@ -2488,6 +2561,17 @@ y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3]) assert_array_equal(y, 2) + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1]) + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1]) + + def test_0d_0d_condition(self): + x = np.array(3) + c = np.array(x > 3) + y = piecewise(x, [c], [1, 2]) + assert_equal(y, 2) + def test_multidimensional_extrafunc(self): x = np.array([[-2.5, -1.5, -0.5], [0.5, 1.5, 2.5]]) @@ -2496,7 +2580,7 @@ [3., 3., 1.]])) -class TestBincount(TestCase): +class TestBincount(object): def test_simple(self): y = np.bincount(np.arange(4)) @@ -2555,7 +2639,7 @@ "'str' object cannot be interpreted", lambda: np.bincount(x, minlength="foobar")) assert_raises_regex(ValueError, - "must be non-negative", + "must not be negative", lambda: np.bincount(x, minlength=-1)) x = np.arange(5) @@ -2563,7 +2647,7 @@ "'str' object cannot be interpreted", lambda: np.bincount(x, minlength="foobar")) assert_raises_regex(ValueError, - "minlength must be non-negative", + "must not be negative", lambda: np.bincount(x, minlength=-1)) @dec.skipif(not HAS_REFCOUNT, "python has no sys.getrefcount") @@ -2583,7 +2667,7 @@ assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) -class TestInterp(TestCase): +class TestInterp(object): def test_exceptions(self): assert_raises(ValueError, interp, 0, [], []) @@ -2610,28 +2694,28 @@ incres = interp(incpts, xp, yp) decres = interp(decpts, xp, yp) - inctgt = np.array([1, 1, 1, 1], dtype=np.float) + inctgt = np.array([1, 1, 1, 1], dtype=float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) incres = interp(incpts, xp, yp, left=0) decres = interp(decpts, xp, yp, left=0) - inctgt = np.array([0, 1, 1, 1], dtype=np.float) + inctgt = np.array([0, 1, 1, 1], dtype=float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) incres = interp(incpts, xp, yp, right=2) decres = interp(decpts, xp, yp, right=2) - inctgt = np.array([1, 1, 1, 2], dtype=np.float) + inctgt = np.array([1, 1, 1, 2], dtype=float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) incres = interp(incpts, xp, yp, left=0, right=2) decres = interp(decpts, xp, yp, left=0, right=2) - inctgt = np.array([0, 1, 1, 2], dtype=np.float) + inctgt = np.array([0, 1, 1, 2], dtype=float) dectgt = inctgt[::-1] assert_equal(incres, inctgt) assert_equal(decres, dectgt) @@ -2701,7 +2785,7 @@ assert_array_equal(res[i], desired[i]) -class TestPercentile(TestCase): +class TestPercentile(object): def test_basic(self): x = np.arange(8) * 0.5 @@ -2805,7 +2889,7 @@ # test for no empty dimensions for compatibility with old percentile x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50), 5.5) - self.assertTrue(np.isscalar(np.percentile(x, 50))) + assert_(np.isscalar(np.percentile(x, 50))) r0 = np.array([4., 5., 6., 7.]) assert_equal(np.percentile(x, 50, axis=0), r0) assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) @@ -2826,7 +2910,7 @@ # test for no empty dimensions for compatibility with old percentile x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50, interpolation='lower'), 5.) - self.assertTrue(np.isscalar(np.percentile(x, 50))) + assert_(np.isscalar(np.percentile(x, 50))) r0 = np.array([4., 5., 6., 7.]) c0 = np.percentile(x, 50, interpolation='lower', axis=0) assert_equal(c0, r0) @@ -2958,7 +3042,7 @@ o = np.random.normal(size=(71, 23)) x = np.dstack([o] * 10) assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30)) - x = np.rollaxis(x, -1, 0) + x = np.moveaxis(x, -1, 0) assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30)) x = x.swapaxes(0, 1).copy() assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30)) @@ -3132,7 +3216,7 @@ a, [0.3, 0.6], (0, 2), interpolation='nearest'), b) -class TestMedian(TestCase): +class TestMedian(object): def test_basic(self): a0 = np.array(1) @@ -3339,7 +3423,7 @@ o = np.random.normal(size=(71, 23)) x = np.dstack([o] * 10) assert_equal(np.median(x, axis=(0, 1)), np.median(o)) - x = np.rollaxis(x, -1, 0) + x = np.moveaxis(x, -1, 0) assert_equal(np.median(x, axis=(-2, -1)), np.median(o)) x = x.swapaxes(0, 1).copy() assert_equal(np.median(x, axis=(0, -1)), np.median(o)) @@ -3389,7 +3473,7 @@ (1, 1, 7, 1)) -class TestAdd_newdoc_ufunc(TestCase): +class TestAdd_newdoc_ufunc(object): def test_ufunc_arg(self): assert_raises(TypeError, add_newdoc_ufunc, 2, "blah") @@ -3399,15 +3483,15 @@ assert_raises(TypeError, add_newdoc_ufunc, np.add, 3) -class TestAdd_newdoc(TestCase): +class TestAdd_newdoc(object): @dec.skipif(sys.flags.optimize == 2) def test_add_doc(self): # test np.add_newdoc tgt = "Current flat index into the array." - self.assertEqual(np.core.flatiter.index.__doc__[:len(tgt)], tgt) - self.assertTrue(len(np.core.ufunc.identity.__doc__) > 300) - self.assertTrue(len(np.lib.index_tricks.mgrid.__doc__) > 300) + assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt) + assert_(len(np.core.ufunc.identity.__doc__) > 300) + assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300) if __name__ == "__main__": diff -Nru python-numpy-1.13.3/numpy/lib/tests/test_index_tricks.py python-numpy-1.14.5/numpy/lib/tests/test_index_tricks.py --- python-numpy-1.13.3/numpy/lib/tests/test_index_tricks.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/tests/test_index_tricks.py 2018-06-12 18:28:52.000000000 +0000 @@ -2,8 +2,9 @@ import numpy as np from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, - assert_almost_equal, assert_array_almost_equal, assert_raises + run_module_suite, assert_, assert_equal, assert_array_equal, + assert_almost_equal, assert_array_almost_equal, assert_raises, + assert_raises_regex ) from numpy.lib.index_tricks import ( mgrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from, @@ -11,7 +12,7 @@ ) -class TestRavelUnravelIndex(TestCase): +class TestRavelUnravelIndex(object): def test_basic(self): assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) @@ -110,11 +111,21 @@ def test_writeability(self): # See gh-7269 x, y = np.unravel_index([1, 2, 3], (4, 5)) - self.assertTrue(x.flags.writeable) - self.assertTrue(y.flags.writeable) + assert_(x.flags.writeable) + assert_(y.flags.writeable) -class TestGrid(TestCase): + def test_0d(self): + # gh-580 + x = np.unravel_index(0, ()) + assert_equal(x, ()) + + assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ()) + assert_raises_regex( + ValueError, "out of bounds", np.unravel_index, [1], ()) + + +class TestGrid(object): def test_basic(self): a = mgrid[-1:1:10j] b = mgrid[-1:1:0.1] @@ -147,7 +158,7 @@ 0.2*np.ones(20, 'd'), 11) -class TestConcatenator(TestCase): +class TestConcatenator(object): def test_1d(self): assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6])) b = np.ones(5) @@ -206,14 +217,14 @@ assert_equal(type(actual), type(expected)) -class TestNdenumerate(TestCase): +class TestNdenumerate(object): def test_basic(self): a = np.array([[1, 2], [3, 4]]) assert_equal(list(ndenumerate(a)), [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)]) -class TestIndexExpression(TestCase): +class TestIndexExpression(object): def test_regression_1(self): # ticket #1196 a = np.arange(2) @@ -227,7 +238,7 @@ assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]]) -class TestIx_(TestCase): +class TestIx_(object): def test_regression_1(self): # Test empty inputs create ouputs of indexing type, gh-5804 # Test both lists and arrays @@ -243,7 +254,7 @@ for k, (a, sz) in enumerate(zip(arrays, sizes)): assert_equal(a.shape[k], sz) assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k)) - assert_(np.issubdtype(a.dtype, int)) + assert_(np.issubdtype(a.dtype, np.integer)) def test_bool(self): bool_a = [True, False, True, True] diff -Nru python-numpy-1.13.3/numpy/lib/tests/test_io.py python-numpy-1.14.5/numpy/lib/tests/test_io.py --- python-numpy-1.13.3/numpy/lib/tests/test_io.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/tests/test_io.py 2018-06-12 18:28:52.000000000 +0000 @@ -8,8 +8,11 @@ import time import warnings import gc -from io import BytesIO +import io +from io import BytesIO, StringIO from datetime import datetime +import locale +import re import numpy as np import numpy.ma as ma @@ -17,9 +20,9 @@ from numpy.compat import asbytes, bytes, unicode, Path from numpy.ma.testutils import assert_equal from numpy.testing import ( - TestCase, run_module_suite, assert_warns, assert_, + run_module_suite, assert_warns, assert_, SkipTest, assert_raises_regex, assert_raises, assert_allclose, - assert_array_equal, temppath, dec, IS_PYPY, suppress_warnings + assert_array_equal, temppath, tempdir, dec, IS_PYPY, suppress_warnings ) @@ -44,6 +47,16 @@ MAJVER, MINVER = sys.version_info[:2] IS_64BIT = sys.maxsize > 2**32 +try: + import bz2 + HAS_BZ2 = True +except ImportError: + HAS_BZ2 = False +try: + import lzma + HAS_LZMA = True +except ImportError: + HAS_LZMA = False def strptime(s, fmt=None): @@ -52,10 +65,9 @@ 2.5. """ - if sys.version_info[0] >= 3: - return datetime(*time.strptime(s.decode('latin1'), fmt)[:3]) - else: - return datetime(*time.strptime(s, fmt)[:3]) + if type(s) == bytes: + s = s.decode("latin1") + return datetime(*time.strptime(s, fmt)[:3]) class RoundtripTest(object): @@ -144,7 +156,7 @@ a = np.array([1, 2, 3, 4], int) self.roundtrip(a) - @np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32") + @dec.knownfailureif(sys.platform == 'win32', "Fail on Win32") def test_mmap(self): a = np.array([[1, 2.5], [4, 7.3]]) self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) @@ -165,7 +177,7 @@ self.check_roundtrips(a) -class TestSaveLoad(RoundtripTest, TestCase): +class TestSaveLoad(RoundtripTest): def roundtrip(self, *args, **kwargs): RoundtripTest.roundtrip(self, np.save, *args, **kwargs) assert_equal(self.arr[0], self.arr_reloaded) @@ -173,7 +185,7 @@ assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) -class TestSavezLoad(RoundtripTest, TestCase): +class TestSavezLoad(RoundtripTest): def roundtrip(self, *args, **kwargs): RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) try: @@ -188,8 +200,8 @@ self.arr_reloaded.fid.close() os.remove(self.arr_reloaded.fid.name) - @np.testing.dec.skipif(not IS_64BIT, "Works only with 64bit systems") - @np.testing.dec.slow + @dec.skipif(not IS_64BIT, "Works only with 64bit systems") + @dec.slow def test_big_arrays(self): L = (1 << 31) + 100000 a = np.empty(L, dtype=np.uint8) @@ -265,7 +277,7 @@ fp.seek(0) assert_(not fp.closed) - @np.testing.dec.skipif(IS_PYPY, "context manager required on PyPy") + @dec.skipif(IS_PYPY, "context manager required on PyPy") def test_closing_fid(self): # Test that issue #1517 (too many opened files) remains closed # It might be a "weak" test since failed to get triggered on @@ -304,7 +316,7 @@ assert_(fp.closed) -class TestSaveTxt(TestCase): +class TestSaveTxt(object): def test_array(self): a = np.array([[1, 2], [3, 4]], float) fmt = "%.18e" @@ -329,6 +341,12 @@ lines = c.readlines() assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n']) + def test_0D_3D(self): + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, np.array(1)) + assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]])) + + def test_record(self): a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) c = BytesIO() @@ -373,7 +391,7 @@ # Test the functionality of the header and footer keyword argument. c = BytesIO() - a = np.array([(1, 2), (3, 4)], dtype=np.int) + a = np.array([(1, 2), (3, 4)], dtype=int) test_header_footer = 'Test header / footer' # Test the header keyword argument np.savetxt(c, a, fmt='%1d', header=test_header_footer) @@ -460,8 +478,134 @@ b = np.loadtxt(w) assert_array_equal(a, b) + def test_unicode(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.unicode) + with tempdir() as tmpdir: + # set encoding as on windows it may not be unicode even on py3 + np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'], + encoding='UTF-8') + + def test_unicode_roundtrip(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.unicode) + # our gz wrapper support encoding + suffixes = ['', '.gz'] + # stdlib 2 versions do not support encoding + if MAJVER > 2: + if HAS_BZ2: + suffixes.append('.bz2') + if HAS_LZMA: + suffixes.extend(['.xz', '.lzma']) + with tempdir() as tmpdir: + for suffix in suffixes: + np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a, + fmt=['%s'], encoding='UTF-16-LE') + b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix), + encoding='UTF-16-LE', dtype=np.unicode) + assert_array_equal(a, b) + + def test_unicode_bytestream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.unicode) + s = BytesIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read().decode('UTF-8'), utf8 + '\n') + + def test_unicode_stringstream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.unicode) + s = StringIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read(), utf8 + '\n') + + +class LoadTxtBase(object): + def check_compressed(self, fopen, suffixes): + # Test that we can load data from a compressed file + wanted = np.arange(6).reshape((2, 3)) + linesep = ('\n', '\r\n', '\r') + for sep in linesep: + data = '0 1 2' + sep + '3 4 5' + for suffix in suffixes: + with temppath(suffix=suffix) as name: + with fopen(name, mode='wt', encoding='UTF-32-LE') as f: + f.write(data) + res = self.loadfunc(name, encoding='UTF-32-LE') + assert_array_equal(res, wanted) + with fopen(name, "rt", encoding='UTF-32-LE') as f: + res = self.loadfunc(f) + assert_array_equal(res, wanted) + + # Python2 .open does not support encoding + @dec.skipif(MAJVER == 2) + def test_compressed_gzip(self): + self.check_compressed(gzip.open, ('.gz',)) + + @dec.skipif(MAJVER == 2 or not HAS_BZ2) + def test_compressed_gzip(self): + self.check_compressed(bz2.open, ('.bz2',)) + + @dec.skipif(MAJVER == 2 or not HAS_LZMA) + def test_compressed_gzip(self): + self.check_compressed(lzma.open, ('.xz', '.lzma')) + + def test_encoding(self): + with temppath() as path: + with open(path, "wb") as f: + f.write('0.\n1.\n2.'.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16") + assert_array_equal(x, [0., 1., 2.]) + + def test_stringload(self): + # umlaute + nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8") + with temppath() as path: + with open(path, "wb") as f: + f.write(nonascii.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode) + assert_array_equal(x, nonascii) + + def test_binary_decode(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=np.unicode, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_converters_decode(self): + # test converters that decode strings + c = TextIO() + c.write(b'\xcf\x96') + c.seek(0) + x = self.loadfunc(c, dtype=np.unicode, + converters={0: lambda x: x.decode('UTF-8')}) + a = np.array([b'\xcf\x96'.decode('UTF-8')]) + assert_array_equal(x, a) + + def test_converters_nodecode(self): + # test native string converters enabled by setting an encoding + utf8 = b'\xcf\x96'.decode('UTF-8') + with temppath() as path: + with io.open(path, 'wt', encoding='UTF-8') as f: + f.write(utf8) + x = self.loadfunc(path, dtype=np.unicode, + converters={0: lambda x: x + 't'}, + encoding='UTF-8') + a = np.array([utf8 + 't']) + assert_array_equal(x, a) + + +class TestLoadTxt(LoadTxtBase): + loadfunc = staticmethod(np.loadtxt) + + def setUp(self): + # lower chunksize for testing + self.orig_chunk = np.lib.npyio._loadtxt_chunksize + np.lib.npyio._loadtxt_chunksize = 1 + def tearDown(self): + np.lib.npyio._loadtxt_chunksize = self.orig_chunk -class TestLoadTxt(TestCase): def test_record(self): c = TextIO() c.write('1 2\n3 4') @@ -485,7 +629,7 @@ c.write('1 2\n3 4') c.seek(0) - x = np.loadtxt(c, dtype=np.int) + x = np.loadtxt(c, dtype=int) a = np.array([[1, 2], [3, 4]], int) assert_array_equal(x, a) @@ -721,7 +865,7 @@ # Test using an explicit dtype with an object data = """ 1; 2001-01-01 2; 2002-01-31 """ - ndtype = [('idx', int), ('code', np.object)] + ndtype = [('idx', int), ('code', object)] func = lambda s: strptime(s.strip(), "%Y-%m-%d") converters = {1: func} test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype, @@ -751,11 +895,11 @@ # IEEE doubles and floats only, otherwise the float32 # conversion may fail. tgt = np.logspace(-10, 10, 5).astype(np.float32) - tgt = np.hstack((tgt, -tgt)).astype(np.float) + tgt = np.hstack((tgt, -tgt)).astype(float) inp = '\n'.join(map(float.hex, tgt)) c = TextIO() c.write(inp) - for dt in [np.float, np.float32]: + for dt in [float, np.float32]: c.seek(0) res = np.loadtxt(c, dtype=dt) assert_equal(res, tgt, err_msg="%s" % dt) @@ -765,7 +909,7 @@ c = TextIO() c.write("%s %s" % tgt) c.seek(0) - res = np.loadtxt(c, dtype=np.complex) + res = np.loadtxt(c, dtype=complex) assert_equal(res, tgt) def test_universal_newline(self): @@ -863,9 +1007,24 @@ dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)]) np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed + @dec.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968') + def test_binary_load(self): + butf8 = b"5,6,7,\xc3\x95scarscar\n\r15,2,3,hello\n\r"\ + b"20,2,3,\xc3\x95scar\n\r" + sutf8 = butf8.decode("UTF-8").replace("\r", "").splitlines() + with temppath() as path: + with open(path, "wb") as f: + f.write(butf8) + with open(path, "rb") as f: + x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode) + assert_array_equal(x, sutf8) + # test broken latin1 conversion people now rely on + with open(path, "rb") as f: + x = np.loadtxt(f, encoding="UTF-8", dtype="S") + x = [b'5,6,7,\xc3\x95scarscar', b'15,2,3,hello', b'20,2,3,\xc3\x95scar'] + assert_array_equal(x, np.array(x, dtype="S")) -class Testfromregex(TestCase): - # np.fromregex expects files opened in binary mode. +class Testfromregex(object): def test_record(self): c = TextIO() c.write('1.312 foo\n1.534 bar\n4.444 qux') @@ -898,12 +1057,29 @@ a = np.array([(1312,), (1534,), (4444,)], dtype=dt) assert_array_equal(x, a) + def test_record_unicode(self): + utf8 = b'\xcf\x96' + with temppath() as path: + with open(path, 'wb') as f: + f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux') + + dt = [('num', np.float64), ('val', 'U4')] + x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8') + a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'), + (4.444, 'qux')], dtype=dt) + assert_array_equal(x, a) + + regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE) + x = np.fromregex(path, regexp, dt, encoding='UTF-8') + assert_array_equal(x, a) + #####-------------------------------------------------------------------------- -class TestFromTxt(TestCase): - # +class TestFromTxt(LoadTxtBase): + loadfunc = staticmethod(np.genfromtxt) + def test_record(self): # Test w/ explicit dtype data = TextIO('1 2\n3 4') @@ -1006,7 +1182,10 @@ def test_header(self): # Test retrieving a header data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0') - test = np.ndfromtxt(data, dtype=None, names=True) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.ndfromtxt(data, dtype=None, names=True) + assert_(w[0].category is np.VisibleDeprecationWarning) control = {'gender': np.array([b'M', b'F']), 'age': np.array([64.0, 25.0]), 'weight': np.array([75.0, 60.0])} @@ -1017,7 +1196,10 @@ def test_auto_dtype(self): # Test the automatic definition of the output dtype data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False') - test = np.ndfromtxt(data, dtype=None) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.ndfromtxt(data, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) control = [np.array([b'A', b'BCD']), np.array([64, 25]), np.array([75.0, 60.0]), @@ -1063,7 +1245,10 @@ M 33 21.99 """) # The # is part of the first name and should be deleted automatically. - test = np.genfromtxt(data, names=True, dtype=None) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)], dtype=[('gender', '|S1'), ('age', int), ('weight', float)]) assert_equal(test, ctrl) @@ -1074,14 +1259,20 @@ F 35 58.330000 M 33 21.99 """) - test = np.genfromtxt(data, names=True, dtype=None) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) assert_equal(test, ctrl) def test_autonames_and_usecols(self): # Tests names and usecols data = TextIO('A B C D\n aaaa 121 45 9.1') - test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), - names=True, dtype=None) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), + names=True, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) control = np.array(('aaaa', 45, 9.1), dtype=[('A', '|S4'), ('C', int), ('D', float)]) assert_equal(test, control) @@ -1098,8 +1289,12 @@ def test_converters_with_usecols_and_names(self): # Tests names and usecols data = TextIO('A B C D\n aaaa 121 45 9.1') - test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True, - dtype=None, converters={'C': lambda s: 2 * int(s)}) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True, + dtype=None, + converters={'C': lambda s: 2 * int(s)}) + assert_(w[0].category is np.VisibleDeprecationWarning) control = np.array(('aaaa', 90, 9.1), dtype=[('A', '|S4'), ('C', int), ('D', float)]) assert_equal(test, control) @@ -1178,19 +1373,19 @@ conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', names=None, converters=conv) - control = np.rec.array([[1,5,-1,0], [2,8,-1,1], [3,3,-2,3]], dtype=dtyp) + control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp) assert_equal(test, control) dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')] test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', usecols=(0,1,3), names=None, converters=conv) - control = np.rec.array([[1,5,0], [2,8,1], [3,3,3]], dtype=dtyp) + control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp) assert_equal(test, control) def test_dtype_with_object(self): # Test using an explicit dtype with an object data = """ 1; 2001-01-01 2; 2002-01-31 """ - ndtype = [('idx', int), ('code', np.object)] + ndtype = [('idx', int), ('code', object)] func = lambda s: strptime(s.strip(), "%Y-%m-%d") converters = {1: func} test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype, @@ -1200,7 +1395,7 @@ dtype=ndtype) assert_equal(test, control) - ndtype = [('nest', [('idx', int), ('code', np.object)])] + ndtype = [('nest', [('idx', int), ('code', object)])] try: test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype, converters=converters) @@ -1219,6 +1414,18 @@ dtype=[('', '|S10'), ('', float)]) assert_equal(test, control) + def test_utf8_userconverters_with_explicit_dtype(self): + utf8 = b'\xcf\x96' + with temppath() as path: + with open(path, 'wb') as f: + f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip') + test = np.genfromtxt(path, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: np.unicode}, + encoding='UTF-8') + control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)], + dtype=[('', '|U11'), ('', float)]) + assert_equal(test, control) + def test_spacedelimiter(self): # Test space delimiter data = TextIO("1 2 3 4 5\n6 7 8 9 10") @@ -1337,7 +1544,7 @@ test = np.mafromtxt(data, dtype=None, **kwargs) control = ma.array([(0, 1), (2, -1)], mask=[(False, False), (False, True)], - dtype=[('A', np.int), ('B', np.int)]) + dtype=[('A', int), ('B', int)]) assert_equal(test, control) assert_equal(test.mask, control.mask) # @@ -1345,7 +1552,7 @@ test = np.mafromtxt(data, **kwargs) control = ma.array([(0, 1), (2, -1)], mask=[(False, False), (False, True)], - dtype=[('A', np.float), ('B', np.float)]) + dtype=[('A', float), ('B', float)]) assert_equal(test, control) assert_equal(test.mask, control.mask) @@ -1414,7 +1621,7 @@ missing_values='-999.0', names=True,) control = ma.array([(0, 1.5), (2, -1.)], mask=[(False, False), (False, True)], - dtype=[('A', np.int), ('B', np.float)]) + dtype=[('A', int), ('B', float)]) assert_equal(test, control) assert_equal(test.mask, control.mask) @@ -1545,11 +1752,17 @@ # Test autostrip data = "01/01/2003 , 1.3, abcde" kwargs = dict(delimiter=",", dtype=None) - mtest = np.ndfromtxt(TextIO(data), **kwargs) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + mtest = np.ndfromtxt(TextIO(data), **kwargs) + assert_(w[0].category is np.VisibleDeprecationWarning) ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')], dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')]) assert_equal(mtest, ctrl) - mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs) + assert_(w[0].category is np.VisibleDeprecationWarning) ctrl = np.array([('01/01/2003', 1.3, 'abcde')], dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')]) assert_equal(mtest, ctrl) @@ -1669,28 +1882,142 @@ def test_comments_is_none(self): # Github issue 329 (None was previously being converted to 'None'). - test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"), - dtype=None, comments=None, delimiter=',') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) assert_equal(test[1], b'testNonetherestofthedata') - test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"), - dtype=None, comments=None, delimiter=',') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) assert_equal(test[1], b' testNonetherestofthedata') + def test_latin1(self): + latin1 = b'\xf6\xfc\xf6' + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + latin1 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test[1, 0], b"test1") + assert_equal(test[1, 1], b"testNonethe" + latin1) + assert_equal(test[1, 2], b"test3") + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding='latin1') + assert_equal(test[1, 0], u"test1") + assert_equal(test[1, 1], u"testNonethe" + latin1.decode('latin1')) + assert_equal(test[1, 2], u"test3") + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test['f0'], 0) + assert_equal(test['f1'], b"testNonethe" + latin1) + + def test_binary_decode_autodtype(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_utf8_byte_encoding(self): + utf8 = b"\xcf\x96" + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + utf8 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + ctl = np.array([ + [b'norm1', b'norm2', b'norm3'], + [b'test1', b'testNonethe' + utf8, b'test3'], + [b'norm1', b'norm2', b'norm3']]) + assert_array_equal(test, ctl) + + def test_utf8_file(self): + utf8 = b"\xcf\x96" + latin1 = b"\xf6\xfc\xf6" + with temppath() as path: + with open(path, "wb") as f: + f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + ctl = np.array([ + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"], + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]], + dtype=np.unicode) + assert_array_equal(test, ctl) + + # test a mixed dtype + with open(path, "wb") as f: + f.write(b"0,testNonethe" + utf8) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + assert_equal(test['f0'], 0) + assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8")) + + + def test_utf8_file_nodtype_unicode(self): + # bytes encoding with non-latin1 -> unicode upcast + utf8 = u'\u03d6' + latin1 = u'\xf6\xfc\xf6' + + # skip test if cannot encode utf8 test string with preferred + # encoding. The preferred encoding is assumed to be the default + # encoding of io.open. Will need to change this for PyTest, maybe + # using pytest.mark.xfail(raises=***). + try: + import locale + encoding = locale.getpreferredencoding() + utf8.encode(encoding) + except (UnicodeError, ImportError): + raise SkipTest('Skipping test_utf8_file_nodtype_unicode, ' + 'unable to encode utf8 in preferred encoding') + + with temppath() as path: + with io.open(path, "wt") as f: + f.write(u"norm1,norm2,norm3\n") + f.write(u"norm1," + latin1 + u",norm3\n") + f.write(u"test1,testNonethe" + utf8 + u",test3\n") + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', + np.VisibleDeprecationWarning) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',') + # Check for warning when encoding not specified. + assert_(w[0].category is np.VisibleDeprecationWarning) + ctl = np.array([ + ["norm1", "norm2", "norm3"], + ["norm1", latin1, "norm3"], + ["test1", "testNonethe" + utf8, "test3"]], + dtype=np.unicode) + assert_array_equal(test, ctl) + def test_recfromtxt(self): # data = TextIO('A,B\n0,1\n2,3') kwargs = dict(delimiter=",", missing_values="N/A", names=True) test = np.recfromtxt(data, **kwargs) control = np.array([(0, 1), (2, 3)], - dtype=[('A', np.int), ('B', np.int)]) - self.assertTrue(isinstance(test, np.recarray)) + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) assert_equal(test, control) # data = TextIO('A,B\n0,1\n2,N/A') test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs) control = ma.array([(0, 1), (2, -1)], mask=[(False, False), (False, True)], - dtype=[('A', np.int), ('B', np.int)]) + dtype=[('A', int), ('B', int)]) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(test.A, [0, 2]) @@ -1701,15 +2028,15 @@ kwargs = dict(missing_values="N/A", names=True, case_sensitive=True) test = np.recfromcsv(data, dtype=None, **kwargs) control = np.array([(0, 1), (2, 3)], - dtype=[('A', np.int), ('B', np.int)]) - self.assertTrue(isinstance(test, np.recarray)) + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) assert_equal(test, control) # data = TextIO('A,B\n0,1\n2,N/A') test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs) control = ma.array([(0, 1), (2, -1)], mask=[(False, False), (False, True)], - dtype=[('A', np.int), ('B', np.int)]) + dtype=[('A', int), ('B', int)]) assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(test.A, [0, 2]) @@ -1717,16 +2044,23 @@ data = TextIO('A,B\n0,1\n2,3') test = np.recfromcsv(data, missing_values='N/A',) control = np.array([(0, 1), (2, 3)], - dtype=[('a', np.int), ('b', np.int)]) - self.assertTrue(isinstance(test, np.recarray)) + dtype=[('a', int), ('b', int)]) + assert_(isinstance(test, np.recarray)) assert_equal(test, control) # data = TextIO('A,B\n0,1\n2,3') - dtype = [('a', np.int), ('b', np.float)] + dtype = [('a', int), ('b', float)] test = np.recfromcsv(data, missing_values='N/A', dtype=dtype) control = np.array([(0, 1), (2, 3)], dtype=dtype) - self.assertTrue(isinstance(test, np.recarray)) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + #gh-10394 + data = TextIO('color\n"red"\n"blue"') + test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')}) + control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))]) + assert_equal(test.dtype, control.dtype) assert_equal(test, control) def test_max_rows(self): @@ -1787,11 +2121,7 @@ # Test that we can load data from a filename as well as a file # object tgt = np.arange(6).reshape((2, 3)) - if sys.version_info[0] >= 3: - # python 3k is known to fail for '\r' - linesep = ('\n', '\r\n') - else: - linesep = ('\n', '\r\n', '\r') + linesep = ('\n', '\r\n', '\r') for sep in linesep: data = '0 1 2' + sep + '3 4 5' @@ -1801,6 +2131,22 @@ res = np.genfromtxt(name) assert_array_equal(res, tgt) + def test_gft_from_gzip(self): + # Test that we can load data from a gzipped file + wanted = np.arange(6).reshape((2, 3)) + linesep = ('\n', '\r\n', '\r') + + for sep in linesep: + data = '0 1 2' + sep + '3 4 5' + s = BytesIO() + with gzip.GzipFile(fileobj=s, mode='w') as g: + g.write(asbytes(data)) + + with temppath(suffix='.gz2') as name: + with open(name, 'w') as f: + f.write(data) + assert_array_equal(np.genfromtxt(name), wanted) + def test_gft_using_generator(self): # gft doesn't work with unicode. def count(): @@ -1827,7 +2173,7 @@ assert_equal(test.dtype.names, ['f0', 'f1', 'f2']) - assert_(test.dtype['f0'] == np.float) + assert_(test.dtype['f0'] == float) assert_(test.dtype['f1'] == np.int64) assert_(test.dtype['f2'] == np.integer) @@ -1836,9 +2182,9 @@ assert_equal(test['f2'], 1024) -class TestPathUsage(TestCase): +class TestPathUsage(object): # Test that pathlib.Path can be used - @np.testing.dec.skipif(Path is None, "No pathlib.Path") + @dec.skipif(Path is None, "No pathlib.Path") def test_loadtxt(self): with temppath(suffix='.txt') as path: path = Path(path) @@ -1847,7 +2193,7 @@ x = np.loadtxt(path) assert_array_equal(x, a) - @np.testing.dec.skipif(Path is None, "No pathlib.Path") + @dec.skipif(Path is None, "No pathlib.Path") def test_save_load(self): # Test that pathlib.Path instances can be used with savez. with temppath(suffix='.npy') as path: @@ -1857,7 +2203,7 @@ data = np.load(path) assert_array_equal(data, a) - @np.testing.dec.skipif(Path is None, "No pathlib.Path") + @dec.skipif(Path is None, "No pathlib.Path") def test_savez_load(self): # Test that pathlib.Path instances can be used with savez. with temppath(suffix='.npz') as path: @@ -1866,7 +2212,7 @@ with np.load(path) as data: assert_array_equal(data['lab'], 'place holder') - @np.testing.dec.skipif(Path is None, "No pathlib.Path") + @dec.skipif(Path is None, "No pathlib.Path") def test_savez_compressed_load(self): # Test that pathlib.Path instances can be used with savez. with temppath(suffix='.npz') as path: @@ -1876,7 +2222,7 @@ assert_array_equal(data['lab'], 'place holder') data.close() - @np.testing.dec.skipif(Path is None, "No pathlib.Path") + @dec.skipif(Path is None, "No pathlib.Path") def test_genfromtxt(self): with temppath(suffix='.txt') as path: path = Path(path) @@ -1885,7 +2231,7 @@ data = np.genfromtxt(path) assert_array_equal(a, data) - @np.testing.dec.skipif(Path is None, "No pathlib.Path") + @dec.skipif(Path is None, "No pathlib.Path") def test_ndfromtxt(self): # Test outputing a standard ndarray with temppath(suffix='.txt') as path: @@ -1897,7 +2243,7 @@ test = np.ndfromtxt(path, dtype=int) assert_array_equal(test, control) - @np.testing.dec.skipif(Path is None, "No pathlib.Path") + @dec.skipif(Path is None, "No pathlib.Path") def test_mafromtxt(self): # From `test_fancy_dtype_alt` above with temppath(suffix='.txt') as path: @@ -1909,7 +2255,7 @@ control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)]) assert_equal(test, control) - @np.testing.dec.skipif(Path is None, "No pathlib.Path") + @dec.skipif(Path is None, "No pathlib.Path") def test_recfromtxt(self): with temppath(suffix='.txt') as path: path = Path(path) @@ -1919,11 +2265,11 @@ kwargs = dict(delimiter=",", missing_values="N/A", names=True) test = np.recfromtxt(path, **kwargs) control = np.array([(0, 1), (2, 3)], - dtype=[('A', np.int), ('B', np.int)]) - self.assertTrue(isinstance(test, np.recarray)) + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) assert_equal(test, control) - @np.testing.dec.skipif(Path is None, "No pathlib.Path") + @dec.skipif(Path is None, "No pathlib.Path") def test_recfromcsv(self): with temppath(suffix='.txt') as path: path = Path(path) @@ -1933,8 +2279,8 @@ kwargs = dict(missing_values="N/A", names=True, case_sensitive=True) test = np.recfromcsv(path, dtype=None, **kwargs) control = np.array([(0, 1), (2, 3)], - dtype=[('A', np.int), ('B', np.int)]) - self.assertTrue(isinstance(test, np.recarray)) + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) assert_equal(test, control) diff -Nru python-numpy-1.13.3/numpy/lib/tests/test__iotools.py python-numpy-1.14.5/numpy/lib/tests/test__iotools.py --- python-numpy-1.13.3/numpy/lib/tests/test__iotools.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/tests/test__iotools.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,80 +6,85 @@ import numpy as np from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_equal, assert_allclose, - assert_raises + run_module_suite, assert_, assert_equal, assert_allclose, assert_raises, ) from numpy.lib._iotools import ( LineSplitter, NameValidator, StringConverter, has_nested_fields, easy_dtype, flatten_dtype ) +from numpy.compat import unicode -class TestLineSplitter(TestCase): +class TestLineSplitter(object): "Tests the LineSplitter class." def test_no_delimiter(self): "Test LineSplitter w/o delimiter" - strg = b" 1 2 3 4 5 # test" + strg = " 1 2 3 4 5 # test" test = LineSplitter()(strg) - assert_equal(test, [b'1', b'2', b'3', b'4', b'5']) + assert_equal(test, ['1', '2', '3', '4', '5']) test = LineSplitter('')(strg) - assert_equal(test, [b'1', b'2', b'3', b'4', b'5']) + assert_equal(test, ['1', '2', '3', '4', '5']) def test_space_delimiter(self): "Test space delimiter" - strg = b" 1 2 3 4 5 # test" - test = LineSplitter(b' ')(strg) - assert_equal(test, [b'1', b'2', b'3', b'4', b'', b'5']) - test = LineSplitter(b' ')(strg) - assert_equal(test, [b'1 2 3 4', b'5']) + strg = " 1 2 3 4 5 # test" + test = LineSplitter(' ')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + test = LineSplitter(' ')(strg) + assert_equal(test, ['1 2 3 4', '5']) def test_tab_delimiter(self): "Test tab delimiter" - strg = b" 1\t 2\t 3\t 4\t 5 6" - test = LineSplitter(b'\t')(strg) - assert_equal(test, [b'1', b'2', b'3', b'4', b'5 6']) - strg = b" 1 2\t 3 4\t 5 6" - test = LineSplitter(b'\t')(strg) - assert_equal(test, [b'1 2', b'3 4', b'5 6']) + strg = " 1\t 2\t 3\t 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1', '2', '3', '4', '5 6']) + strg = " 1 2\t 3 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1 2', '3 4', '5 6']) def test_other_delimiter(self): "Test LineSplitter on delimiter" - strg = b"1,2,3,4,,5" - test = LineSplitter(b',')(strg) - assert_equal(test, [b'1', b'2', b'3', b'4', b'', b'5']) + strg = "1,2,3,4,,5" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) # - strg = b" 1,2,3,4,,5 # test" - test = LineSplitter(b',')(strg) - assert_equal(test, [b'1', b'2', b'3', b'4', b'', b'5']) + strg = " 1,2,3,4,,5 # test" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + + # gh-11028 bytes comment/delimiters should get decoded + strg = b" 1,2,3,4,,5 % test" + test = LineSplitter(delimiter=b',', comments=b'%')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) def test_constant_fixed_width(self): "Test LineSplitter w/ fixed-width fields" - strg = b" 1 2 3 4 5 # test" + strg = " 1 2 3 4 5 # test" test = LineSplitter(3)(strg) - assert_equal(test, [b'1', b'2', b'3', b'4', b'', b'5', b'']) + assert_equal(test, ['1', '2', '3', '4', '', '5', '']) # - strg = b" 1 3 4 5 6# test" + strg = " 1 3 4 5 6# test" test = LineSplitter(20)(strg) - assert_equal(test, [b'1 3 4 5 6']) + assert_equal(test, ['1 3 4 5 6']) # - strg = b" 1 3 4 5 6# test" + strg = " 1 3 4 5 6# test" test = LineSplitter(30)(strg) - assert_equal(test, [b'1 3 4 5 6']) + assert_equal(test, ['1 3 4 5 6']) def test_variable_fixed_width(self): - strg = b" 1 3 4 5 6# test" + strg = " 1 3 4 5 6# test" test = LineSplitter((3, 6, 6, 3))(strg) - assert_equal(test, [b'1', b'3', b'4 5', b'6']) + assert_equal(test, ['1', '3', '4 5', '6']) # - strg = b" 1 3 4 5 6# test" + strg = " 1 3 4 5 6# test" test = LineSplitter((6, 6, 9))(strg) - assert_equal(test, [b'1', b'3 4', b'5 6']) + assert_equal(test, ['1', '3 4', '5 6']) # ----------------------------------------------------------------------------- -class TestNameValidator(TestCase): +class TestNameValidator(object): def test_case_sensitivity(self): "Test case sensitivity" @@ -134,13 +139,10 @@ def _bytes_to_date(s): - if sys.version_info[0] >= 3: - return date(*time.strptime(s.decode('latin1'), "%Y-%m-%d")[:3]) - else: - return date(*time.strptime(s, "%Y-%m-%d")[:3]) + return date(*time.strptime(s, "%Y-%m-%d")[:3]) -class TestStringConverter(TestCase): +class TestStringConverter(object): "Test StringConverter" def test_creation(self): @@ -156,39 +158,45 @@ assert_equal(converter._status, 0) # test int - assert_equal(converter.upgrade(b'0'), 0) + assert_equal(converter.upgrade('0'), 0) assert_equal(converter._status, 1) - # On systems where integer defaults to 32-bit, the statuses will be + # On systems where long defaults to 32-bit, the statuses will be # offset by one, so we check for this here. import numpy.core.numeric as nx - status_offset = int(nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize) + status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize) # test int > 2**32 - assert_equal(converter.upgrade(b'17179869184'), 17179869184) + assert_equal(converter.upgrade('17179869184'), 17179869184) assert_equal(converter._status, 1 + status_offset) # test float - assert_allclose(converter.upgrade(b'0.'), 0.0) + assert_allclose(converter.upgrade('0.'), 0.0) assert_equal(converter._status, 2 + status_offset) # test complex - assert_equal(converter.upgrade(b'0j'), complex('0j')) + assert_equal(converter.upgrade('0j'), complex('0j')) assert_equal(converter._status, 3 + status_offset) # test str - assert_equal(converter.upgrade(b'a'), b'a') - assert_equal(converter._status, len(converter._mapper) - 1) + # note that the longdouble type has been skipped, so the + # _status increases by 2. Everything should succeed with + # unicode conversion (5). + for s in ['a', u'a', b'a']: + res = converter.upgrade(s) + assert_(type(res) is unicode) + assert_equal(res, u'a') + assert_equal(converter._status, 5 + status_offset) def test_missing(self): "Tests the use of missing values." - converter = StringConverter(missing_values=(b'missing', - b'missed')) - converter.upgrade(b'0') - assert_equal(converter(b'0'), 0) - assert_equal(converter(b''), converter.default) - assert_equal(converter(b'missing'), converter.default) - assert_equal(converter(b'missed'), converter.default) + converter = StringConverter(missing_values=('missing', + 'missed')) + converter.upgrade('0') + assert_equal(converter('0'), 0) + assert_equal(converter(''), converter.default) + assert_equal(converter('missing'), converter.default) + assert_equal(converter('missed'), converter.default) try: converter('miss') except ValueError: @@ -199,66 +207,67 @@ dateparser = _bytes_to_date StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1)) convert = StringConverter(dateparser, date(2000, 1, 1)) - test = convert(b'2001-01-01') + test = convert('2001-01-01') assert_equal(test, date(2001, 1, 1)) - test = convert(b'2009-01-01') + test = convert('2009-01-01') assert_equal(test, date(2009, 1, 1)) - test = convert(b'') + test = convert('') assert_equal(test, date(2000, 1, 1)) def test_string_to_object(self): "Make sure that string-to-object functions are properly recognized" + old_mapper = StringConverter._mapper[:] # copy of list conv = StringConverter(_bytes_to_date) - assert_equal(conv._mapper[-2][0](0), 0j) + assert_equal(conv._mapper, old_mapper) assert_(hasattr(conv, 'default')) def test_keep_default(self): "Make sure we don't lose an explicit default" - converter = StringConverter(None, missing_values=b'', + converter = StringConverter(None, missing_values='', default=-999) - converter.upgrade(b'3.14159265') + converter.upgrade('3.14159265') assert_equal(converter.default, -999) assert_equal(converter.type, np.dtype(float)) # converter = StringConverter( - None, missing_values=b'', default=0) - converter.upgrade(b'3.14159265') + None, missing_values='', default=0) + converter.upgrade('3.14159265') assert_equal(converter.default, 0) assert_equal(converter.type, np.dtype(float)) def test_keep_default_zero(self): "Check that we don't lose a default of 0" converter = StringConverter(int, default=0, - missing_values=b"N/A") + missing_values="N/A") assert_equal(converter.default, 0) def test_keep_missing_values(self): "Check that we're not losing missing values" converter = StringConverter(int, default=0, - missing_values=b"N/A") + missing_values="N/A") assert_equal( - converter.missing_values, set([b'', b'N/A'])) + converter.missing_values, set(['', 'N/A'])) def test_int64_dtype(self): "Check that int64 integer types can be specified" converter = StringConverter(np.int64, default=0) - val = b"-9223372036854775807" + val = "-9223372036854775807" assert_(converter(val) == -9223372036854775807) - val = b"9223372036854775807" + val = "9223372036854775807" assert_(converter(val) == 9223372036854775807) def test_uint64_dtype(self): "Check that uint64 integer types can be specified" converter = StringConverter(np.uint64, default=0) - val = b"9223372043271415339" + val = "9223372043271415339" assert_(converter(val) == 9223372043271415339) -class TestMiscFunctions(TestCase): +class TestMiscFunctions(object): def test_has_nested_dtype(self): "Test has_nested_dtype" - ndtype = np.dtype(np.float) + ndtype = np.dtype(float) assert_equal(has_nested_fields(ndtype), False) ndtype = np.dtype([('A', '|S3'), ('B', float)]) assert_equal(has_nested_fields(ndtype), False) diff -Nru python-numpy-1.13.3/numpy/lib/tests/test_mixins.py python-numpy-1.14.5/numpy/lib/tests/test_mixins.py --- python-numpy-1.13.3/numpy/lib/tests/test_mixins.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/tests/test_mixins.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,7 +6,8 @@ import numpy as np from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal, assert_raises) + run_module_suite, assert_, assert_equal, assert_raises + ) PY2 = sys.version_info.major < 3 @@ -99,7 +100,7 @@ ] -class TestNDArrayOperatorsMixin(TestCase): +class TestNDArrayOperatorsMixin(object): def test_array_like_add(self): diff -Nru python-numpy-1.13.3/numpy/lib/tests/test_nanfunctions.py python-numpy-1.14.5/numpy/lib/tests/test_nanfunctions.py --- python-numpy-1.13.3/numpy/lib/tests/test_nanfunctions.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/tests/test_nanfunctions.py 2018-06-12 18:28:52.000000000 +0000 @@ -4,7 +4,7 @@ import numpy as np from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal, + run_module_suite, assert_, assert_equal, assert_almost_equal, assert_no_warnings, assert_raises, assert_array_equal, suppress_warnings ) @@ -35,7 +35,7 @@ [0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]]) -class TestNanFunctions_MinMax(TestCase): +class TestNanFunctions_MinMax(object): nanfuncs = [np.nanmin, np.nanmax] stdfuncs = [np.min, np.max] @@ -165,7 +165,7 @@ assert_(issubclass(w[0].category, RuntimeWarning)) -class TestNanFunctions_ArgminArgmax(TestCase): +class TestNanFunctions_ArgminArgmax(object): nanfuncs = [np.nanargmin, np.nanargmax] @@ -224,7 +224,7 @@ assert_(np.isscalar(res)) -class TestNanFunctions_IntTypes(TestCase): +class TestNanFunctions_IntTypes(object): int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64) @@ -396,7 +396,7 @@ assert_(np.isscalar(res)) -class TestNanFunctions_SumProd(TestCase, SharedNanFunctionsTestsMixin): +class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin): nanfuncs = [np.nansum, np.nanprod] stdfuncs = [np.sum, np.prod] @@ -430,7 +430,7 @@ assert_equal(res, tgt) -class TestNanFunctions_CumSumProd(TestCase, SharedNanFunctionsTestsMixin): +class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin): nanfuncs = [np.nancumsum, np.nancumprod] stdfuncs = [np.cumsum, np.cumprod] @@ -513,7 +513,7 @@ assert_almost_equal(res, tgt) -class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin): +class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin): nanfuncs = [np.nanmean, np.nanvar, np.nanstd] stdfuncs = [np.mean, np.var, np.std] @@ -585,7 +585,7 @@ assert_(len(w) == 0) -class TestNanFunctions_Median(TestCase): +class TestNanFunctions_Median(object): def test_mutation(self): # Check that passed array is not modified. @@ -749,7 +749,7 @@ ([np.nan] * i) + [-inf] * j) -class TestNanFunctions_Percentile(TestCase): +class TestNanFunctions_Percentile(object): def test_mutation(self): # Check that passed array is not modified. diff -Nru python-numpy-1.13.3/numpy/lib/tests/test_polynomial.py python-numpy-1.14.5/numpy/lib/tests/test_polynomial.py --- python-numpy-1.13.3/numpy/lib/tests/test_polynomial.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/tests/test_polynomial.py 2018-06-12 18:28:52.000000000 +0000 @@ -80,12 +80,12 @@ ''' import numpy as np from numpy.testing import ( - run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, + run_module_suite, assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, rundocs ) -class TestDocs(TestCase): +class TestDocs(object): def test_doctests(self): return rundocs() @@ -222,19 +222,19 @@ assert_equal(p == p2, False) assert_equal(p != p2, True) - def test_poly_coeffs_mutable(self): - """ Coefficients should be modifiable """ + def test_poly_coeffs_immutable(self): + """ Coefficients should not be modifiable """ p = np.poly1d([1, 2, 3]) - p.coeffs += 1 - assert_equal(p.coeffs, [2, 3, 4]) + try: + # despite throwing an exception, this used to change state + p.coeffs += 1 + except Exception: + pass + assert_equal(p.coeffs, [1, 2, 3]) p.coeffs[2] += 10 - assert_equal(p.coeffs, [2, 3, 14]) - - # this never used to be allowed - let's not add features to deprecated - # APIs - assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1)) + assert_equal(p.coeffs, [1, 2, 3]) if __name__ == "__main__": diff -Nru python-numpy-1.13.3/numpy/lib/tests/test_recfunctions.py python-numpy-1.14.5/numpy/lib/tests/test_recfunctions.py --- python-numpy-1.13.3/numpy/lib/tests/test_recfunctions.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/tests/test_recfunctions.py 2018-06-12 18:28:52.000000000 +0000 @@ -4,20 +4,22 @@ import numpy.ma as ma from numpy.ma.mrecords import MaskedRecords from numpy.ma.testutils import assert_equal -from numpy.testing import TestCase, run_module_suite, assert_, assert_raises +from numpy.testing import ( + run_module_suite, assert_, assert_raises, dec + ) from numpy.lib.recfunctions import ( drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields, - find_duplicates, merge_arrays, append_fields, stack_arrays, join_by - ) + find_duplicates, merge_arrays, append_fields, stack_arrays, join_by, + repack_fields) get_names = np.lib.recfunctions.get_names get_names_flat = np.lib.recfunctions.get_names_flat zip_descr = np.lib.recfunctions.zip_descr -class TestRecFunctions(TestCase): +class TestRecFunctions(object): # Misc tests - def setUp(self): + def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array([('A', 1.), ('B', 2.)], @@ -190,8 +192,20 @@ assert_equal(sorted(test[-1]), control) assert_equal(test[0], a[test[-1]]) + def test_repack_fields(self): + dt = np.dtype('u1,f4,i8', align=True) + a = np.zeros(2, dtype=dt) + + assert_equal(repack_fields(dt), np.dtype('u1,f4,i8')) + assert_equal(repack_fields(a).itemsize, 13) + assert_equal(repack_fields(repack_fields(dt), align=True), dt) -class TestRecursiveFillFields(TestCase): + # make sure type is preserved + dt = np.dtype((np.record, dt)) + assert_(repack_fields(dt).type is np.record) + + +class TestRecursiveFillFields(object): # Test recursive_fill_fields. def test_simple_flexible(self): # Test recursive_fill_fields on flexible-array @@ -214,10 +228,10 @@ assert_equal(test, control) -class TestMergeArrays(TestCase): +class TestMergeArrays(object): # Test merge_arrays - def setUp(self): + def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( @@ -347,10 +361,10 @@ assert_equal(test, control) -class TestAppendFields(TestCase): +class TestAppendFields(object): # Test append_fields - def setUp(self): + def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( @@ -401,9 +415,9 @@ assert_equal(test, control) -class TestStackArrays(TestCase): +class TestStackArrays(object): # Test stack_arrays - def setUp(self): + def setup(self): x = np.array([1, 2, ]) y = np.array([10, 20, 30]) z = np.array( @@ -417,11 +431,11 @@ (_, x, _, _) = self.data test = stack_arrays((x,)) assert_equal(test, x) - self.assertTrue(test is x) + assert_(test is x) test = stack_arrays(x) assert_equal(test, x) - self.assertTrue(test is x) + assert_(test is x) def test_unnamed_fields(self): # Tests combinations of arrays w/o named fields @@ -546,9 +560,38 @@ assert_equal(test, control) assert_equal(test.mask, control.mask) + def test_subdtype(self): + z = np.array([ + ('A', 1), ('B', 2) + ], dtype=[('A', '|S3'), ('B', float, (1,))]) + zz = np.array([ + ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.) + ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)]) + + res = stack_arrays((z, zz)) + expected = ma.array( + data=[ + (b'A', [1.0], 0), + (b'B', [2.0], 0), + (b'a', [10.0], 100.0), + (b'b', [20.0], 200.0), + (b'c', [30.0], 300.0)], + mask=[ + (False, [False], True), + (False, [False], True), + (False, [False], False), + (False, [False], False), + (False, [False], False) + ], + dtype=zz.dtype + ) + assert_equal(res.dtype, expected.dtype) + assert_equal(res, expected) + assert_equal(res.mask, expected.mask) + -class TestJoinBy(TestCase): - def setUp(self): +class TestJoinBy(object): + def setup(self): self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), np.arange(100, 110))), dtype=[('a', int), ('b', int), ('c', int)]) @@ -588,6 +631,16 @@ dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) + def test_join_subdtype(self): + # tests the bug in https://stackoverflow.com/q/44769632/102441 + from numpy.lib import recfunctions as rfn + foo = np.array([(1,)], + dtype=[('key', int)]) + bar = np.array([(1, np.array([1,2,3]))], + dtype=[('key', int), ('value', 'uint16', 3)]) + res = join_by('key', foo, bar) + assert_equal(res, bar.view(ma.MaskedArray)) + def test_outer_join(self): a, b = self.a, self.b @@ -646,10 +699,66 @@ b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b) + @dec.knownfailureif(True) + def test_same_name_different_dtypes_key(self): + a_dtype = np.dtype([('key', 'S5'), ('value', '= M: return m if k >= 0: @@ -717,7 +723,7 @@ """ m = ones((n, n), int) a = mask_func(m, k) - return where(a != 0) + return nonzero(a != 0) def tril_indices(n, k=0, m=None): @@ -797,7 +803,7 @@ [-10, -10, -10, -10]]) """ - return where(tri(n, m, k=k, dtype=bool)) + return nonzero(tri(n, m, k=k, dtype=bool)) def tril_indices_from(arr, k=0): @@ -907,7 +913,7 @@ [ 12, 13, 14, -1]]) """ - return where(~tri(n, m, k=k-1, dtype=bool)) + return nonzero(~tri(n, m, k=k-1, dtype=bool)) def triu_indices_from(arr, k=0): diff -Nru python-numpy-1.13.3/numpy/lib/type_check.py python-numpy-1.14.5/numpy/lib/type_check.py --- python-numpy-1.13.3/numpy/lib/type_check.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/type_check.py 2018-06-12 18:28:52.000000000 +0000 @@ -98,8 +98,7 @@ array([ 2., 3.]) """ - dtype = _nx.obj2sctype(dtype) - if not issubclass(dtype, _nx.inexact): + if not _nx.issubdtype(dtype, _nx.inexact): dtype = _nx.float_ return asarray(a, dtype=dtype) @@ -209,7 +208,7 @@ Examples -------- >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) - array([ True, False, False, False, False, True], dtype=bool) + array([ True, False, False, False, False, True]) """ ax = asanyarray(x) @@ -243,7 +242,7 @@ Examples -------- >>> np.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j]) - array([False, True, True, True, True, False], dtype=bool) + array([False, True, True, True, True, False]) """ return imag(x) == 0 @@ -331,11 +330,16 @@ def nan_to_num(x, copy=True): """ - Replace nan with zero and inf with finite numbers. + Replace nan with zero and inf with large finite numbers. - Returns an array or scalar replacing Not a Number (NaN) with zero, - (positive) infinity with a very large number and negative infinity - with a very small (or negative) number. + If `x` is inexact, NaN is replaced by zero, and infinity and -infinity + replaced by the respectively largest and most negative finite floating + point values representable by ``x.dtype``. + + For complex dtypes, the above is applied to each of the real and + imaginary components of `x` separately. + + If `x` is not inexact, then no replacements are made. Parameters ---------- @@ -352,12 +356,8 @@ Returns ------- out : ndarray - New Array with the same shape as `x` and dtype of the element in - `x` with the greatest precision. If `x` is inexact, then NaN is - replaced by zero, and infinity (-infinity) is replaced by the - largest (smallest or most negative) floating point value that fits - in the output dtype. If `x` is not inexact, then a copy of `x` is - returned. + `x`, with the non-finite values replaced. If `copy` is False, this may + be `x` itself. See Also -------- @@ -372,15 +372,17 @@ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. - Examples -------- - >>> np.set_printoptions(precision=8) >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) >>> np.nan_to_num(x) array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, -1.28000000e+002, 1.28000000e+002]) - + >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) + >>> np.nan_to_num(y) + array([ 1.79769313e+308 +0.00000000e+000j, + 0.00000000e+000 +0.00000000e+000j, + 0.00000000e+000 +1.79769313e+308j]) """ x = _nx.array(x, subok=True, copy=copy) xtype = x.dtype.type @@ -430,12 +432,12 @@ ----- Machine epsilon varies from machine to machine and between data types but Python floats on most platforms have a machine epsilon equal to - 2.2204460492503131e-16. You can use 'np.finfo(np.float).eps' to print + 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print out the machine epsilon for floats. Examples -------- - >>> np.finfo(np.float).eps + >>> np.finfo(float).eps 2.2204460492503131e-16 >>> np.real_if_close([2.1 + 4e-14j], tol=1000) @@ -577,8 +579,8 @@ an integer array, the minimum precision type that is returned is a 64-bit floating point dtype. - All input arrays can be safely cast to the returned dtype without loss - of information. + All input arrays except int64 and uint64 can be safely cast to the + returned dtype without loss of information. Parameters ---------- diff -Nru python-numpy-1.13.3/numpy/lib/ufunclike.py python-numpy-1.14.5/numpy/lib/ufunclike.py --- python-numpy-1.13.3/numpy/lib/ufunclike.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/ufunclike.py 2018-06-12 17:31:56.000000000 +0000 @@ -128,7 +128,7 @@ >>> np.isposinf(np.NINF) array(False, dtype=bool) >>> np.isposinf([-np.inf, 0., np.inf]) - array([False, False, True], dtype=bool) + array([False, False, True]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([2, 2, 2]) @@ -189,7 +189,7 @@ >>> np.isneginf(np.PINF) array(False, dtype=bool) >>> np.isneginf([-np.inf, 0., np.inf]) - array([ True, False, False], dtype=bool) + array([ True, False, False]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([2, 2, 2]) diff -Nru python-numpy-1.13.3/numpy/lib/utils.py python-numpy-1.14.5/numpy/lib/utils.py --- python-numpy-1.13.3/numpy/lib/utils.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/lib/utils.py 2018-06-12 18:28:52.000000000 +0000 @@ -557,7 +557,7 @@ if len(arglist) > 1: arglist[1] = "("+arglist[1] arguments = ", ".join(arglist[1:]) - except: + except Exception: pass if len(name+arguments) > maxwidth: @@ -689,7 +689,7 @@ try: print("In file: %s\n" % inspect.getsourcefile(object), file=output) print(inspect.getsource(object), file=output) - except: + except Exception: print("Not available for this object.", file=output) @@ -1138,7 +1138,7 @@ """ if data.size == 0: return result - data = np.rollaxis(data, axis, data.ndim) + data = np.moveaxis(data, axis, -1) n = np.isnan(data[..., -1]) # masked NaN values are ok if np.ma.isMaskedArray(n): diff -Nru python-numpy-1.13.3/numpy/linalg/__init__.py python-numpy-1.14.5/numpy/linalg/__init__.py --- python-numpy-1.13.3/numpy/linalg/__init__.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/linalg/__init__.py 2018-06-12 18:28:52.000000000 +0000 @@ -50,6 +50,6 @@ from .linalg import * -from numpy.testing.nosetester import _numpy_tester +from numpy.testing import _numpy_tester test = _numpy_tester().test bench = _numpy_tester().bench diff -Nru python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c_blas.c python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c_blas.c --- python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c_blas.c 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c_blas.c 2018-06-12 17:31:56.000000000 +0000 @@ -200,9 +200,6 @@ integer i__1, i__2; complex q__1, q__2, q__3; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, ix, iy; static complex ctemp; @@ -378,9 +375,6 @@ i__3, i__4, i__5, i__6; complex q__1, q__2, q__3, q__4; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j, l, info; static logical nota, notb; @@ -1051,9 +1045,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; complex q__1, q__2, q__3; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j, ix, iy, jx, jy, kx, ky, info; static complex temp; @@ -1442,9 +1433,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; complex q__1, q__2; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j, ix, jy, kx, info; static complex temp; @@ -1837,9 +1825,6 @@ real r__1; complex q__1, q__2, q__3, q__4; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j, ix, iy, jx, jy, kx, ky, info; static complex temp1, temp2; @@ -2248,9 +2233,6 @@ real r__1; complex q__1, q__2, q__3, q__4; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j, ix, iy, jx, jy, kx, ky, info; static complex temp1, temp2; @@ -2677,9 +2659,6 @@ real r__1; complex q__1, q__2, q__3, q__4, q__5, q__6; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j, l, info; static complex temp1, temp2; @@ -3329,9 +3308,6 @@ real r__1; complex q__1, q__2, q__3; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j, l, info; static complex temp; @@ -4039,9 +4015,6 @@ real r__1, r__2; complex q__1; - /* Builtin functions */ - double r_imag(complex *); - /* Local variables */ static integer i__, nincx; @@ -4196,9 +4169,6 @@ i__6; complex q__1, q__2, q__3; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j, k, info; static complex temp; @@ -4859,9 +4829,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; complex q__1, q__2, q__3; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j, ix, jx, kx, info; static complex temp; @@ -5396,9 +5363,6 @@ i__6, i__7; complex q__1, q__2, q__3; - /* Builtin functions */ - void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j, k, info; static complex temp; @@ -6069,9 +6033,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; complex q__1, q__2, q__3; - /* Builtin functions */ - void c_div(complex *, complex *, complex *), r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j, ix, jx, kx, info; static complex temp; @@ -6667,9 +6628,6 @@ /* System generated locals */ doublereal ret_val, d__1, d__2; - /* Builtin functions */ - double d_imag(doublecomplex *); - /* Purpose ======= @@ -7719,9 +7677,6 @@ integer i__1, i__2; doublereal ret_val, d__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer ix; static doublereal ssq, norm, scale, absxi; @@ -10627,9 +10582,6 @@ integer i__1, i__2, i__3; doublereal ret_val, d__1; - /* Builtin functions */ - double d_imag(doublecomplex *), sqrt(doublereal); - /* Local variables */ static integer ix; static doublereal ssq, temp, norm, scale; @@ -11125,9 +11077,6 @@ /* System generated locals */ real ret_val, r__1, r__2; - /* Builtin functions */ - double r_imag(complex *); - /* Purpose @@ -11148,9 +11097,6 @@ integer i__1, i__2, i__3; real ret_val, r__1, r__2; - /* Builtin functions */ - double r_imag(complex *); - /* Local variables */ static integer i__, nincx; static real stemp; @@ -11220,9 +11166,6 @@ integer i__1, i__2, i__3; real ret_val, r__1; - /* Builtin functions */ - double r_imag(complex *), sqrt(doublereal); - /* Local variables */ static integer ix; static real ssq, temp, norm, scale; @@ -12337,9 +12280,6 @@ integer i__1, i__2; real ret_val, r__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer ix; static real ssq, norm, scale, absxi; @@ -15342,9 +15282,6 @@ integer i__1, i__2; doublecomplex z__1, z__2, z__3; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, ix, iy; static doublecomplex ztemp; @@ -15725,9 +15662,6 @@ i__3, i__4, i__5, i__6; doublecomplex z__1, z__2, z__3, z__4; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j, l, info; static logical nota, notb; @@ -16399,9 +16333,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; doublecomplex z__1, z__2, z__3; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j, ix, iy, jx, jy, kx, ky, info; static doublecomplex temp; @@ -16791,9 +16722,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; doublecomplex z__1, z__2; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j, ix, jy, kx, info; static doublecomplex temp; @@ -17187,9 +17115,6 @@ doublereal d__1; doublecomplex z__1, z__2, z__3, z__4; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j, ix, iy, jx, jy, kx, ky, info; static doublecomplex temp1, temp2; @@ -17599,9 +17524,6 @@ doublereal d__1; doublecomplex z__1, z__2, z__3, z__4; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j, ix, iy, jx, jy, kx, ky, info; static doublecomplex temp1, temp2; @@ -18028,9 +17950,6 @@ doublereal d__1; doublecomplex z__1, z__2, z__3, z__4, z__5, z__6; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j, l, info; static doublecomplex temp1, temp2; @@ -18680,9 +18599,6 @@ doublereal d__1; doublecomplex z__1, z__2, z__3; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j, l, info; static doublecomplex temp; @@ -19339,9 +19255,6 @@ i__6; doublecomplex z__1, z__2, z__3; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j, k, info; static doublecomplex temp; @@ -20002,9 +19915,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; doublecomplex z__1, z__2, z__3; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j, ix, jx, kx, info; static doublecomplex temp; @@ -20539,10 +20449,6 @@ i__6, i__7; doublecomplex z__1, z__2, z__3; - /* Builtin functions */ - void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg( - doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j, k, info; static doublecomplex temp; @@ -21213,10 +21119,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3, i__4, i__5; doublecomplex z__1, z__2, z__3; - /* Builtin functions */ - void z_div(doublecomplex *, doublecomplex *, doublecomplex *), d_cnjg( - doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j, ix, jx, kx, info; static doublecomplex temp; diff -Nru python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c_c_lapack.c python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c_c_lapack.c --- python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c_c_lapack.c 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c_c_lapack.c 2018-06-12 18:28:52.000000000 +0000 @@ -270,9 +270,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3; real r__1, r__2; - /* Builtin functions */ - double r_imag(complex *), c_abs(complex *); - /* Local variables */ static real c__, f, g; static integer i__, j, k, l, m; @@ -664,9 +661,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3; complex q__1; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__; static complex alpha; @@ -1313,10 +1307,6 @@ real r__1, r__2; complex q__1, q__2; - /* Builtin functions */ - double sqrt(doublereal), r_imag(complex *); - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, k, ihi; static real scl; @@ -1832,9 +1822,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3; complex q__1; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__; static complex alpha; @@ -2704,9 +2691,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3; complex q__1; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, k; static complex alpha; @@ -3079,9 +3063,6 @@ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2, i__3; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, ie, il, ir, iu, blk; static real dum[1], eps; @@ -5695,10 +5676,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3; complex q__1; - /* Builtin functions */ - double c_abs(complex *); - void c_div(complex *, complex *, complex *); - /* Local variables */ static integer i__, j, jp; extern /* Subroutine */ int cscal_(integer *, complex *, complex *, @@ -6216,9 +6193,6 @@ integer a_dim1, a_offset, i__1, i__2; real r__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real eps; static integer inde; @@ -7256,9 +7230,6 @@ complex q__1; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static complex hl[2401] /* was [49][49] */; static integer kbot, nmin; @@ -8178,9 +8149,6 @@ integer i__1, i__2; complex q__1; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, ioff; @@ -8480,9 +8448,6 @@ real r__1; complex q__1; - /* Builtin functions */ - double r_imag(complex *); - /* Local variables */ static integer i__, j, l; extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *, @@ -8624,9 +8589,6 @@ real r__1, r__2, r__3, r__4; complex q__1; - /* Builtin functions */ - double r_imag(complex *); - /* Local variables */ static real zi, zr; extern /* Subroutine */ int sladiv_(real *, real *, real *, real *, real * @@ -8680,10 +8642,6 @@ integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2; real r__1; - /* Builtin functions */ - double log(doublereal); - integer pow_ii(integer *, integer *); - /* Local variables */ static integer i__, j, k, ll, iq, lgn, msd2, smm1, spm1, spm2; static real temp; @@ -9033,9 +8991,6 @@ /* System generated locals */ integer q_dim1, q_offset, i__1, i__2; - /* Builtin functions */ - integer pow_ii(integer *, integer *); - /* Local variables */ static integer i__, k, n1, n2, iq, iw, iz, ptr, indx, curr, indxc, indxp; extern /* Subroutine */ int claed8_(integer *, integer *, integer *, @@ -9338,9 +9293,6 @@ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1; real r__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real c__; static integer i__, j; @@ -9761,13 +9713,6 @@ real r__1, r__2, r__3, r__4, r__5, r__6; complex q__1, q__2, q__3, q__4, q__5, q__6, q__7; - /* Builtin functions */ - double r_imag(complex *); - void r_cnjg(complex *, complex *); - double c_abs(complex *); - void c_sqrt(complex *, complex *), pow_ci(complex *, complex *, integer *) - ; - /* Local variables */ static integer i__, j, k, l, m; static real s; @@ -10808,9 +10753,6 @@ integer a_dim1, a_offset, i__1, i__2; real ret_val, r__1, r__2; - /* Builtin functions */ - double c_abs(complex *), sqrt(doublereal); - /* Local variables */ static integer i__, j; static real sum, scale; @@ -10977,9 +10919,6 @@ integer a_dim1, a_offset, i__1, i__2; real ret_val, r__1, r__2, r__3; - /* Builtin functions */ - double c_abs(complex *), sqrt(doublereal); - /* Local variables */ static integer i__, j; static real sum, absa, scale; @@ -11215,10 +11154,6 @@ real r__1, r__2, r__3, r__4, r__5, r__6, r__7, r__8; complex q__1, q__2, q__3, q__4, q__5; - /* Builtin functions */ - double r_imag(complex *); - void c_sqrt(complex *, complex *); - /* Local variables */ static integer i__, k; static real s; @@ -12000,9 +11935,6 @@ real r__1, r__2, r__3, r__4, r__5, r__6; complex q__1, q__2, q__3, q__4, q__5, q__6, q__7, q__8; - /* Builtin functions */ - double r_imag(complex *); - /* Local variables */ static real s; static complex h21s, h31s; @@ -12175,10 +12107,6 @@ real r__1, r__2, r__3, r__4, r__5, r__6; complex q__1, q__2; - /* Builtin functions */ - double r_imag(complex *); - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j; static complex s; @@ -12753,10 +12681,6 @@ real r__1, r__2, r__3, r__4, r__5, r__6; complex q__1, q__2; - /* Builtin functions */ - double r_imag(complex *); - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j; static complex s; @@ -13347,10 +13271,6 @@ real r__1, r__2, r__3, r__4, r__5, r__6, r__7, r__8; complex q__1, q__2, q__3, q__4, q__5; - /* Builtin functions */ - double r_imag(complex *); - void c_sqrt(complex *, complex *); - /* Local variables */ static integer i__, k; static real s; @@ -14136,10 +14056,6 @@ real r__1, r__2, r__3, r__4, r__5, r__6, r__7, r__8, r__9, r__10; complex q__1, q__2, q__3, q__4, q__5, q__6, q__7, q__8; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - double r_imag(complex *); - /* Local variables */ static integer j, k, m, i2, j2, i4, j4, k1; static real h11, h12, h21, h22; @@ -15482,9 +15398,6 @@ real r__1; complex q__1; - /* Builtin functions */ - double r_imag(complex *); - /* Local variables */ static integer i__, j, l; extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *, @@ -15802,9 +15715,6 @@ work_offset, i__1, i__2, i__3, i__4, i__5; complex q__1, q__2; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j; extern /* Subroutine */ int cgemm_(char *, char *, integer *, integer *, @@ -16645,9 +16555,6 @@ real r__1, r__2; complex q__1, q__2; - /* Builtin functions */ - double r_imag(complex *), r_sign(real *, real *); - /* Local variables */ static integer j, knt; static real beta; @@ -17146,11 +17053,6 @@ real r__1, r__2, r__3, r__4, r__5, r__6, r__7, r__8, r__9, r__10; complex q__1, q__2, q__3; - /* Builtin functions */ - double log(doublereal), pow_ri(real *, integer *), r_imag(complex *), - sqrt(doublereal); - void r_cnjg(complex *, complex *); - /* Local variables */ static real d__; static integer i__; @@ -18493,9 +18395,6 @@ integer i__1, i__2, i__3; real r__1; - /* Builtin functions */ - double r_imag(complex *); - /* Local variables */ static integer ix; static real temp1; @@ -19150,10 +19049,6 @@ real r__1, r__2, r__3, r__4; complex q__1, q__2, q__3, q__4; - /* Builtin functions */ - double r_imag(complex *); - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j; static real xj, rec, tjj; @@ -20672,9 +20567,6 @@ real r__1; complex q__1, q__2; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer j; static real ajj; @@ -21348,9 +21240,6 @@ integer i__1, i__2, i__3, i__4; complex q__1, q__2, q__3, q__4; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, ix, iy; static complex stemp; @@ -21485,11 +21374,6 @@ integer z_dim1, z_offset, i__1, i__2, i__3, i__4; real r__1, r__2; - /* Builtin functions */ - double log(doublereal); - integer pow_ii(integer *, integer *); - double sqrt(doublereal); - /* Local variables */ static integer i__, j, k, m; static real p; @@ -21955,9 +21839,6 @@ integer z_dim1, z_offset, i__1, i__2; real r__1, r__2; - /* Builtin functions */ - double sqrt(doublereal), r_sign(real *, real *); - /* Local variables */ static real b, c__, f, g; static integer i__, j, k, l, m; @@ -22558,10 +22439,6 @@ real r__1, r__2, r__3; complex q__1, q__2; - /* Builtin functions */ - double r_imag(complex *); - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j, k, ii, ki, is; static real ulp; @@ -23060,9 +22937,6 @@ integer q_dim1, q_offset, t_dim1, t_offset, i__1, i__2, i__3; complex q__1; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer k, m1, m2, m3; static real cs; @@ -23246,9 +23120,6 @@ integer a_dim1, a_offset, i__1, i__2; complex q__1; - /* Builtin functions */ - void c_div(complex *, complex *, complex *); - /* Local variables */ static integer j; static complex ajj; @@ -23415,9 +23286,6 @@ complex q__1; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer j, jb, nb, nn; extern logical lsame_(char *, char *); @@ -24266,9 +24134,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3; complex q__1, q__2; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, j, l; extern /* Subroutine */ int cscal_(integer *, complex *, complex *, @@ -24959,9 +24824,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3; complex q__1; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, i1, i2, i3, mi, ni, nq; static complex aii; @@ -25174,9 +25036,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3; complex q__1; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, i1, i2, i3, ic, jc, mi, ni, nq; static complex aii; @@ -25395,9 +25254,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2]; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i1, i2, nb, mi, ni, nq, nw; static logical left; @@ -25738,9 +25594,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i1, i2, nb, mi, nh, ni, nq, nw; static logical left; @@ -25965,9 +25818,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3; complex q__1; - /* Builtin functions */ - void r_cnjg(complex *, complex *); - /* Local variables */ static integer i__, i1, i2, i3, ic, jc, mi, ni, nq; static complex aii; @@ -26195,9 +26045,6 @@ i__5; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i__; static complex t[4160] /* was [65][64] */; @@ -26503,9 +26350,6 @@ i__5; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i__; static complex t[4160] /* was [65][64] */; @@ -26804,9 +26648,6 @@ i__5; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i__; static complex t[4160] /* was [65][64] */; @@ -27104,9 +26945,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i1, i2, nb, mi, ni, nq, nw; static logical left; diff -Nru python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c_config.c python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c_config.c --- python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c_config.c 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c_config.c 2018-06-12 17:31:56.000000000 +0000 @@ -43,9 +43,6 @@ integer i__1; doublereal ret_val; - /* Builtin functions */ - double pow_di(doublereal *, integer *); - /* Local variables */ static doublereal t; static integer it; @@ -397,10 +394,6 @@ integer i__1; doublereal d__1, d__2, d__3, d__4, d__5; - /* Builtin functions */ - double pow_di(doublereal *, integer *); - integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void); - /* Local variables */ static doublereal a, b, c__; static integer i__, lt; @@ -696,7 +689,7 @@ doublereal dlamc3_(doublereal *a, doublereal *b) { /* System generated locals */ - volatile doublereal ret_val; + doublereal ret_val; /* @@ -1121,9 +1114,6 @@ integer i__1; real ret_val; - /* Builtin functions */ - double pow_ri(real *, integer *); - /* Local variables */ static real t; static integer it; @@ -1474,10 +1464,6 @@ integer i__1; real r__1, r__2, r__3, r__4, r__5; - /* Builtin functions */ - double pow_ri(real *, integer *); - integer s_wsfe(cilist *), do_fio(integer *, char *, ftnlen), e_wsfe(void); - /* Local variables */ static real a, b, c__; static integer i__, lt; @@ -1773,7 +1759,7 @@ doublereal slamc3_(real *a, real *b) { /* System generated locals */ - volatile real ret_val; + real ret_val; /* diff -Nru python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c_d_lapack.c python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c_d_lapack.c --- python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c_d_lapack.c 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c_d_lapack.c 2018-06-12 17:31:56.000000000 +0000 @@ -63,9 +63,6 @@ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; doublereal d__1; - /* Builtin functions */ - double d_sign(doublereal *, doublereal *), log(doublereal); - /* Local variables */ static integer i__, j, k; static doublereal p, r__; @@ -555,10 +552,6 @@ i__2; doublereal d__1, d__2, d__3, d__4; - /* Builtin functions */ - double pow_dd(doublereal *, doublereal *), sqrt(doublereal), d_sign( - doublereal *, doublereal *); - /* Local variables */ static doublereal f, g, h__; static integer i__, j, m; @@ -2660,9 +2653,6 @@ i__2, i__3; doublereal d__1, d__2; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, k; static doublereal r__, cs, sn; @@ -4067,9 +4057,6 @@ /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4; - /* Builtin functions */ - double log(doublereal); - /* Local variables */ static integer ie, il, mm; static doublereal eps, anrm, bnrm; @@ -5132,9 +5119,6 @@ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2, i__3; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, ie, il, ir, iu, blk; static doublereal dum[1], eps; @@ -7496,9 +7480,6 @@ doublereal d__1; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i__; static doublereal hl[2401] /* was [49][49] */; @@ -7986,9 +7967,6 @@ /* Subroutine */ int dlabad_(doublereal *small, doublereal *large) { - /* Builtin functions */ - double d_lg10(doublereal *), sqrt(doublereal); - /* -- LAPACK auxiliary routine (version 3.2) -- @@ -8612,9 +8590,6 @@ /* System generated locals */ doublereal d__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal ab, df, tb, sm, rt, adf, acmn, acmx; @@ -8741,10 +8716,6 @@ integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2; doublereal d__1; - /* Builtin functions */ - double log(doublereal); - integer pow_ii(integer *, integer *); - /* Local variables */ static integer i__, j, k, iq, lgn, msd2, smm1, spm1, spm2; static doublereal temp; @@ -9388,9 +9359,6 @@ integer q_dim1, q_offset, i__1, i__2; doublereal d__1, d__2, d__3, d__4; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal c__; static integer i__, j; @@ -9902,9 +9870,6 @@ integer q_dim1, q_offset, i__1, i__2; doublereal d__1; - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - /* Local variables */ static integer i__, j, n2, n12, ii, n23, iq2; static doublereal temp; @@ -10209,9 +10174,6 @@ integer i__1; doublereal d__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal a, b, c__; static integer j; @@ -11155,9 +11117,6 @@ /* System generated locals */ doublereal d__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal b, c__, w, del, tau, temp; @@ -11283,9 +11242,6 @@ integer i__1; doublereal d__1, d__2, d__3, d__4; - /* Builtin functions */ - double sqrt(doublereal), log(doublereal), pow_di(doublereal *, integer *); - /* Local variables */ static doublereal a, b, c__, f; static integer i__; @@ -11639,9 +11595,6 @@ /* System generated locals */ integer q_dim1, q_offset, i__1, i__2; - /* Builtin functions */ - integer pow_ii(integer *, integer *); - /* Local variables */ static integer i__, k, n1, n2, is, iw, iz, iq2, ptr, ldq2, indx, curr; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, @@ -11968,9 +11921,6 @@ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1; doublereal d__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal c__; static integer i__, j; @@ -12427,9 +12377,6 @@ integer q_dim1, q_offset, s_dim1, s_offset, i__1, i__2; doublereal d__1; - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - /* Local variables */ static integer i__, j; static doublereal temp; @@ -12674,10 +12621,6 @@ /* System generated locals */ integer i__1, i__2, i__3; - /* Builtin functions */ - integer pow_ii(integer *, integer *); - double sqrt(doublereal); - /* Local variables */ static integer i__, k, mid, ptr; extern /* Subroutine */ int drot_(integer *, doublereal *, integer *, @@ -12936,9 +12879,6 @@ /* System generated locals */ doublereal d__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal ab, df, cs, ct, tb, sm, tn, rt, adf, acs; static integer sgn1, sgn2; @@ -13549,9 +13489,6 @@ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3; doublereal d__1, d__2, d__3, d__4; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, j, k, l, m; static doublereal s, v[3]; @@ -15525,9 +15462,6 @@ u_dim1, u_offset, vt_dim1, vt_offset, z_dim1, z_offset, i__1, i__2; - /* Builtin functions */ - integer pow_ii(integer *, integer *); - /* Local variables */ static integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl, ndb1, nlp1, lvl2, nrp1, nlvl, sqre; @@ -15965,9 +15899,6 @@ integer b_dim1, b_offset, i__1, i__2; doublereal d__1; - /* Builtin functions */ - double log(doublereal), d_sign(doublereal *, doublereal *); - /* Local variables */ static integer c__, i__, j, k; static doublereal r__; @@ -16583,9 +16514,6 @@ integer a_dim1, a_offset, i__1, i__2; doublereal ret_val, d__1, d__2, d__3; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, j; static doublereal sum, scale; @@ -16751,9 +16679,6 @@ integer i__1; doublereal ret_val, d__1, d__2, d__3, d__4, d__5; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__; static doublereal sum, scale; @@ -16888,9 +16813,6 @@ integer a_dim1, a_offset, i__1, i__2; doublereal ret_val, d__1, d__2, d__3; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, j; static doublereal sum, absa, scale; @@ -17097,9 +17019,6 @@ /* System generated locals */ doublereal d__1, d__2; - /* Builtin functions */ - double d_sign(doublereal *, doublereal *), sqrt(doublereal); - /* Local variables */ static doublereal p, z__, aa, bb, cc, dd, cs1, sn1, sab, sac, eps, tau, temp, scale, bcmax, bcmis, sigma; @@ -17312,9 +17231,6 @@ /* System generated locals */ doublereal ret_val, d__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal w, z__, xabs, yabs; @@ -17365,9 +17281,6 @@ /* System generated locals */ doublereal ret_val, d__1, d__2, d__3; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal w, xabs, yabs, zabs; @@ -18309,9 +18222,6 @@ wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4; doublereal d__1, d__2, d__3, d__4, d__5, d__6; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, j, k; static doublereal s, aa, bb, cc, dd, cs, sn; @@ -18988,9 +18898,6 @@ wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4; doublereal d__1, d__2, d__3, d__4, d__5, d__6; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, j, k; static doublereal s, aa, bb, cc, dd, cs, sn; @@ -22434,9 +22341,6 @@ integer i__1; doublereal d__1; - /* Builtin functions */ - double d_sign(doublereal *, doublereal *); - /* Local variables */ static integer j, knt; static doublereal beta; @@ -23585,9 +23489,6 @@ integer i__1; doublereal d__1, d__2; - /* Builtin functions */ - double log(doublereal), pow_di(doublereal *, integer *), sqrt(doublereal); - /* Local variables */ static integer i__; static doublereal f1, g1, eps, scale; @@ -23750,9 +23651,6 @@ /* System generated locals */ doublereal d__1, d__2; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal c__, fa, ga, ha, as, at, au, fhmn, fhmx; @@ -24210,9 +24108,6 @@ /* System generated locals */ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; - /* Builtin functions */ - integer pow_ii(integer *, integer *); - /* Local variables */ static integer i__, j, m, i1, ic, lf, nd, ll, nl, nr, im1, ncc, nlf, nrf, iwk, lvl, ndb1, nlp1, nrp1; @@ -25341,9 +25236,6 @@ vt_offset, vt2_dim1, vt2_offset, i__1, i__2; doublereal d__1, d__2; - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - /* Local variables */ static integer i__, j, m, n, jc; static doublereal rho; @@ -25761,9 +25653,6 @@ integer i__1; doublereal d__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal a, b, c__; static integer j; @@ -26771,9 +26660,6 @@ /* System generated locals */ doublereal d__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal b, c__, w, del, tau, delsq; @@ -27789,9 +27675,6 @@ integer difr_dim1, difr_offset, i__1, i__2; doublereal d__1, d__2; - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - /* Local variables */ static integer i__, j; static doublereal dj, rho; @@ -28091,9 +27974,6 @@ poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset, z_dim1, z_offset, i__1, i__2; - /* Builtin functions */ - integer pow_ii(integer *, integer *); - /* Local variables */ static integer i__, j, m, i1, ic, lf, nd, ll, nl, vf, nr, vl, im1, ncc, nlf, nrf, vfi, iwk, vli, lvl, nru, ndb1, nlp1, lvl2, nrp1; @@ -28908,9 +28788,6 @@ /* System generated locals */ integer i__1, i__2; - /* Builtin functions */ - double log(doublereal); - /* Local variables */ static integer i__, il, ir, maxn; static doublereal temp; @@ -29159,9 +29036,6 @@ integer i__1, i__2; doublereal d__1, d__2, d__3; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__; static doublereal eps; @@ -29346,9 +29220,6 @@ integer i__1, i__2, i__3; doublereal d__1, d__2; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal d__, e, g; static integer k; @@ -29926,9 +29797,6 @@ integer i__1; doublereal d__1, d__2; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal s, t; static integer j4, nn; @@ -30260,9 +30128,6 @@ integer i__1; doublereal d__1, d__2; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal s, a2, b1, b2; static integer i4, nn, np; @@ -31827,9 +31692,6 @@ /* System generated locals */ doublereal d__1; - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - /* Local variables */ static doublereal a, d__, l, m, r__, s, t, fa, ga, ha, ft, gt, ht, mm, tt, clt, crt, slt, srt; @@ -35022,9 +34884,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2]; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i1, i2, nb, mi, ni, nq, nw; static logical left; @@ -35356,9 +35215,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i1, i2, nb, mi, nh, ni, nq, nw; static logical left; @@ -35790,9 +35646,6 @@ i__5; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i__; static doublereal t[4160] /* was [65][64] */; @@ -36097,9 +35950,6 @@ i__5; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i__; static doublereal t[4160] /* was [65][64] */; @@ -36397,9 +36247,6 @@ i__5; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i__; static doublereal t[4160] /* was [65][64] */; @@ -36696,9 +36543,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i1, i2, nb, mi, ni, nq, nw; static logical left; @@ -36963,9 +36807,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3; doublereal d__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer j; static doublereal ajj; @@ -37616,11 +37457,6 @@ integer z_dim1, z_offset, i__1, i__2; doublereal d__1, d__2; - /* Builtin functions */ - double log(doublereal); - integer pow_ii(integer *, integer *); - double sqrt(doublereal); - /* Local variables */ static integer i__, j, k, m; static doublereal p; @@ -38082,9 +37918,6 @@ integer z_dim1, z_offset, i__1, i__2; doublereal d__1, d__2; - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - /* Local variables */ static doublereal b, c__, f, g; static integer i__, j, k, l, m; @@ -38682,9 +38515,6 @@ integer i__1; doublereal d__1, d__2, d__3; - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - /* Local variables */ static doublereal c__; static integer i__, l, m; @@ -39124,9 +38954,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3; doublereal d__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal eps; static integer inde; @@ -40078,9 +39905,6 @@ i__2, i__3; doublereal d__1, d__2, d__3, d__4; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, j, k; static doublereal x[4] /* was [2][2] */; @@ -41838,9 +41662,6 @@ integer a_dim1, a_offset, i__1, i__2[2], i__3, i__4, i__5; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer j, jb, nb, nn; extern logical lsame_(char *, char *); diff -Nru python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c.h python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c.h --- python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c.h 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c.h 2018-06-12 18:28:52.000000000 +0000 @@ -214,4 +214,173 @@ #undef unix #undef vax #endif + +/* https://anonscm.debian.org/cgit/collab-maint/libf2c2.git/tree/f2ch.add */ + +/* If you are using a C++ compiler, append the following to f2c.h + for compiling libF77 and libI77. */ + +#ifdef __cplusplus +extern "C" { +#endif + +extern int abort_(void); +extern double c_abs(complex *); +extern void c_cos(complex *, complex *); +extern void c_div(complex *, complex *, complex *); +extern void c_exp(complex *, complex *); +extern void c_log(complex *, complex *); +extern void c_sin(complex *, complex *); +extern void c_sqrt(complex *, complex *); +extern double d_abs(double *); +extern double d_acos(double *); +extern double d_asin(double *); +extern double d_atan(double *); +extern double d_atn2(double *, double *); +extern void d_cnjg(doublecomplex *, doublecomplex *); +extern double d_cos(double *); +extern double d_cosh(double *); +extern double d_dim(double *, double *); +extern double d_exp(double *); +extern double d_imag(doublecomplex *); +extern double d_int(double *); +extern double d_lg10(double *); +extern double d_log(double *); +extern double d_mod(double *, double *); +extern double d_nint(double *); +extern double d_prod(float *, float *); +extern double d_sign(double *, double *); +extern double d_sin(double *); +extern double d_sinh(double *); +extern double d_sqrt(double *); +extern double d_tan(double *); +extern double d_tanh(double *); +extern double derf_(double *); +extern double derfc_(double *); +extern void do_fio(ftnint *, char *, ftnlen); +extern integer do_lio(ftnint *, ftnint *, char *, ftnlen); +extern integer do_uio(ftnint *, char *, ftnlen); +extern integer e_rdfe(void); +extern integer e_rdue(void); +extern integer e_rsfe(void); +extern integer e_rsfi(void); +extern integer e_rsle(void); +extern integer e_rsli(void); +extern integer e_rsue(void); +extern integer e_wdfe(void); +extern integer e_wdue(void); +extern void e_wsfe(void); +extern integer e_wsfi(void); +extern integer e_wsle(void); +extern integer e_wsli(void); +extern integer e_wsue(void); +extern int ef1asc_(ftnint *, ftnlen *, ftnint *, ftnlen *); +extern integer ef1cmc_(ftnint *, ftnlen *, ftnint *, ftnlen *); + +extern double erf_(float *); +extern double erfc_(float *); +extern integer f_back(alist *); +extern integer f_clos(cllist *); +extern integer f_end(alist *); +extern void f_exit(void); +extern integer f_inqu(inlist *); +extern integer f_open(olist *); +extern integer f_rew(alist *); +extern int flush_(void); +extern void getarg_(integer *, char *, ftnlen); +extern void getenv_(char *, char *, ftnlen, ftnlen); +extern short h_abs(short *); +extern short h_dim(short *, short *); +extern short h_dnnt(double *); +extern short h_indx(char *, char *, ftnlen, ftnlen); +extern short h_len(char *, ftnlen); +extern short h_mod(short *, short *); +extern short h_nint(float *); +extern short h_sign(short *, short *); +extern short hl_ge(char *, char *, ftnlen, ftnlen); +extern short hl_gt(char *, char *, ftnlen, ftnlen); +extern short hl_le(char *, char *, ftnlen, ftnlen); +extern short hl_lt(char *, char *, ftnlen, ftnlen); +extern integer i_abs(integer *); +extern integer i_dim(integer *, integer *); +extern integer i_dnnt(double *); +extern integer i_indx(char *, char *, ftnlen, ftnlen); +extern integer i_len(char *, ftnlen); +extern integer i_mod(integer *, integer *); +extern integer i_nint(float *); +extern integer i_sign(integer *, integer *); +extern integer iargc_(void); +extern ftnlen l_ge(char *, char *, ftnlen, ftnlen); +extern ftnlen l_gt(char *, char *, ftnlen, ftnlen); +extern ftnlen l_le(char *, char *, ftnlen, ftnlen); +extern ftnlen l_lt(char *, char *, ftnlen, ftnlen); +extern void pow_ci(complex *, complex *, integer *); +extern double pow_dd(double *, double *); +extern double pow_di(double *, integer *); +extern short pow_hh(short *, shortint *); +extern integer pow_ii(integer *, integer *); +extern double pow_ri(float *, integer *); +extern void pow_zi(doublecomplex *, doublecomplex *, integer *); +extern void pow_zz(doublecomplex *, doublecomplex *, doublecomplex *); +extern double r_abs(float *); +extern double r_acos(float *); +extern double r_asin(float *); +extern double r_atan(float *); +extern double r_atn2(float *, float *); +extern void r_cnjg(complex *, complex *); +extern double r_cos(float *); +extern double r_cosh(float *); +extern double r_dim(float *, float *); +extern double r_exp(float *); +extern float r_imag(complex *); +extern double r_int(float *); +extern float r_lg10(real *); +extern double r_log(float *); +extern double r_mod(float *, float *); +extern double r_nint(float *); +extern double r_sign(float *, float *); +extern double r_sin(float *); +extern double r_sinh(float *); +extern double r_sqrt(float *); +extern double r_tan(float *); +extern double r_tanh(float *); +extern void s_cat(char *, char **, integer *, integer *, ftnlen); +extern integer s_cmp(char *, char *, ftnlen, ftnlen); +extern void s_copy(char *, char *, ftnlen, ftnlen); +extern int s_paus(char *, ftnlen); +extern integer s_rdfe(cilist *); +extern integer s_rdue(cilist *); +extern integer s_rnge(char *, integer, char *, integer); +extern integer s_rsfe(cilist *); +extern integer s_rsfi(icilist *); +extern integer s_rsle(cilist *); +extern integer s_rsli(icilist *); +extern integer s_rsne(cilist *); +extern integer s_rsni(icilist *); +extern integer s_rsue(cilist *); +extern int s_stop(char *, ftnlen); +extern integer s_wdfe(cilist *); +extern integer s_wdue(cilist *); +extern void s_wsfe( cilist *); +extern integer s_wsfi(icilist *); +extern integer s_wsle(cilist *); +extern integer s_wsli(icilist *); +extern integer s_wsne(cilist *); +extern integer s_wsni(icilist *); +extern integer s_wsue(cilist *); +extern void sig_die(char *, int); +extern integer signal_(integer *, void (*)(int)); +extern integer system_(char *, ftnlen); +extern double z_abs(doublecomplex *); +extern void z_cos(doublecomplex *, doublecomplex *); +extern void z_div(doublecomplex *, doublecomplex *, doublecomplex *); +extern void z_exp(doublecomplex *, doublecomplex *); +extern void z_log(doublecomplex *, doublecomplex *); +extern void z_sin(doublecomplex *, doublecomplex *); +extern void z_sqrt(doublecomplex *, doublecomplex *); + +#ifdef __cplusplus + } +#endif + #endif diff -Nru python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c_lapack.c python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c_lapack.c --- python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c_lapack.c 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c_lapack.c 2018-06-12 17:31:56.000000000 +0000 @@ -476,10 +476,6 @@ /* System generated locals */ integer ret_val; - /* Builtin functions */ - /* Subroutine */ int s_copy(char *, char *, ftnlen, ftnlen); - integer s_cmp(char *, char *, ftnlen, ftnlen); - /* Local variables */ static integer i__; static char c1[1], c2[2], c3[3], c4[2]; @@ -1395,10 +1391,6 @@ integer ret_val, i__1, i__2; real r__1; - /* Builtin functions */ - double log(doublereal); - integer i_nint(real *); - /* Local variables */ static integer nh, ns; diff -Nru python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c_s_lapack.c python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c_s_lapack.c --- python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c_s_lapack.c 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c_s_lapack.c 2018-06-12 18:28:52.000000000 +0000 @@ -59,9 +59,6 @@ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; real r__1; - /* Builtin functions */ - double r_sign(real *, real *), log(doublereal); - /* Local variables */ static integer i__, j, k; static real p, r__; @@ -549,10 +546,6 @@ real r__1, r__2, r__3, r__4; doublereal d__1; - /* Builtin functions */ - double pow_dd(doublereal *, doublereal *), sqrt(doublereal), r_sign(real * - , real *); - /* Local variables */ static real f, g, h__; static integer i__, j, m; @@ -2648,9 +2641,6 @@ i__2, i__3; real r__1, r__2; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, k; static real r__, cs, sn; @@ -4407,9 +4397,6 @@ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2, i__3; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, ie, il, ir, iu, blk; static real dum[1], eps; @@ -6770,9 +6757,6 @@ real r__1; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i__; static real hl[2401] /* was [49][49] */; @@ -7258,9 +7242,6 @@ /* Subroutine */ int slabad_(real *small, real *large) { - /* Builtin functions */ - double r_lg10(real *), sqrt(doublereal); - /* -- LAPACK auxiliary routine (version 3.2) -- @@ -7881,9 +7862,6 @@ /* System generated locals */ real r__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real ab, df, tb, sm, rt, adf, acmn, acmx; @@ -8009,10 +7987,6 @@ integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2; real r__1; - /* Builtin functions */ - double log(doublereal); - integer pow_ii(integer *, integer *); - /* Local variables */ static integer i__, j, k, iq, lgn, msd2, smm1, spm1, spm2; static real temp; @@ -8648,9 +8622,6 @@ integer q_dim1, q_offset, i__1, i__2; real r__1, r__2, r__3, r__4; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real c__; static integer i__, j; @@ -9160,9 +9131,6 @@ integer q_dim1, q_offset, i__1, i__2; real r__1; - /* Builtin functions */ - double sqrt(doublereal), r_sign(real *, real *); - /* Local variables */ static integer i__, j, n2, n12, ii, n23, iq2; static real temp; @@ -9465,9 +9433,6 @@ integer i__1; real r__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real a, b, c__; static integer j; @@ -10410,9 +10375,6 @@ /* System generated locals */ real r__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real b, c__, w, del, tau, temp; @@ -10538,9 +10500,6 @@ integer i__1; real r__1, r__2, r__3, r__4; - /* Builtin functions */ - double sqrt(doublereal), log(doublereal), pow_ri(real *, integer *); - /* Local variables */ static real a, b, c__, f; static integer i__; @@ -10896,9 +10855,6 @@ /* System generated locals */ integer q_dim1, q_offset, i__1, i__2; - /* Builtin functions */ - integer pow_ii(integer *, integer *); - /* Local variables */ static integer i__, k, n1, n2, is, iw, iz, iq2, ptr, ldq2, indx, curr, indxc; @@ -11223,9 +11179,6 @@ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1; real r__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real c__; static integer i__, j; @@ -11682,9 +11635,6 @@ integer q_dim1, q_offset, s_dim1, s_offset, i__1, i__2; real r__1; - /* Builtin functions */ - double sqrt(doublereal), r_sign(real *, real *); - /* Local variables */ static integer i__, j; static real temp; @@ -11928,10 +11878,6 @@ /* System generated locals */ integer i__1, i__2, i__3; - /* Builtin functions */ - integer pow_ii(integer *, integer *); - double sqrt(doublereal); - /* Local variables */ static integer i__, k, mid, ptr, curr; extern /* Subroutine */ int srot_(integer *, real *, integer *, real *, @@ -12186,9 +12132,6 @@ /* System generated locals */ real r__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real ab, df, cs, ct, tb, sm, tn, rt, adf, acs; static integer sgn1, sgn2; @@ -12799,9 +12742,6 @@ integer h_dim1, h_offset, z_dim1, z_offset, i__1, i__2, i__3; real r__1, r__2, r__3, r__4; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, j, k, l, m; static real s, v[3]; @@ -14428,9 +14368,6 @@ integer a_dim1, a_offset, i__1, i__2; real ret_val, r__1, r__2, r__3; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, j; static real sum, scale; @@ -14596,9 +14533,6 @@ integer i__1; real ret_val, r__1, r__2, r__3, r__4, r__5; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__; static real sum, scale; @@ -14733,9 +14667,6 @@ integer a_dim1, a_offset, i__1, i__2; real ret_val, r__1, r__2, r__3; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, j; static real sum, absa, scale; @@ -14941,9 +14872,6 @@ /* System generated locals */ real r__1, r__2; - /* Builtin functions */ - double r_sign(real *, real *), sqrt(doublereal); - /* Local variables */ static real p, z__, aa, bb, cc, dd, cs1, sn1, sab, sac, eps, tau, temp, scale, bcmax, bcmis, sigma; @@ -15156,9 +15084,6 @@ /* System generated locals */ real ret_val, r__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real w, z__, xabs, yabs; @@ -15209,9 +15134,6 @@ /* System generated locals */ real ret_val, r__1, r__2, r__3; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real w, xabs, yabs, zabs; @@ -16147,9 +16069,6 @@ wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4; real r__1, r__2, r__3, r__4, r__5, r__6; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, j, k; static real s, aa, bb, cc, dd, cs, sn; @@ -16822,9 +16741,6 @@ wv_offset, z_dim1, z_offset, i__1, i__2, i__3, i__4; real r__1, r__2, r__3, r__4, r__5, r__6; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, j, k; static real s, aa, bb, cc, dd, cs, sn; @@ -20258,9 +20174,6 @@ integer i__1; real r__1; - /* Builtin functions */ - double r_sign(real *, real *); - /* Local variables */ static integer j, knt; static real beta; @@ -21404,9 +21317,6 @@ integer i__1; real r__1, r__2; - /* Builtin functions */ - double log(doublereal), pow_ri(real *, integer *), sqrt(doublereal); - /* Local variables */ static integer i__; static real f1, g1, eps, scale; @@ -21569,9 +21479,6 @@ /* System generated locals */ real r__1, r__2; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real c__, fa, ga, ha, as, at, au, fhmn, fhmx; @@ -22030,9 +21937,6 @@ /* System generated locals */ integer u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2; - /* Builtin functions */ - integer pow_ii(integer *, integer *); - /* Local variables */ static integer i__, j, m, i1, ic, lf, nd, ll, nl, nr, im1, ncc, nlf, nrf, iwk, lvl, ndb1, nlp1, nrp1; @@ -23153,9 +23057,6 @@ vt_offset, vt2_dim1, vt2_offset, i__1, i__2; real r__1, r__2; - /* Builtin functions */ - double sqrt(doublereal), r_sign(real *, real *); - /* Local variables */ static integer i__, j, m, n, jc; static real rho; @@ -23570,9 +23471,6 @@ integer i__1; real r__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real a, b, c__; static integer j; @@ -24579,9 +24477,6 @@ /* System generated locals */ real r__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real b, c__, w, del, tau, delsq; @@ -25592,9 +25487,6 @@ integer difr_dim1, difr_offset, i__1, i__2; real r__1, r__2; - /* Builtin functions */ - double sqrt(doublereal), r_sign(real *, real *); - /* Local variables */ static integer i__, j; static real dj, rho; @@ -25891,9 +25783,6 @@ poles_dim1, poles_offset, u_dim1, u_offset, vt_dim1, vt_offset, z_dim1, z_offset, i__1, i__2; - /* Builtin functions */ - integer pow_ii(integer *, integer *); - /* Local variables */ static integer i__, j, m, i1, ic, lf, nd, ll, nl, vf, nr, vl, im1, ncc, nlf, nrf, vfi, iwk, vli, lvl, nru, ndb1, nlp1, lvl2, nrp1; @@ -26703,9 +26592,6 @@ /* System generated locals */ integer i__1, i__2; - /* Builtin functions */ - double log(doublereal); - /* Local variables */ static integer i__, il, ir, maxn; static real temp; @@ -26954,9 +26840,6 @@ integer i__1, i__2; real r__1, r__2, r__3; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__; static real eps; @@ -27138,9 +27021,6 @@ integer i__1, i__2, i__3; real r__1, r__2; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real d__, e, g; static integer k; @@ -27719,9 +27599,6 @@ integer i__1; real r__1, r__2; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real s, t; static integer j4, nn; @@ -28049,9 +27926,6 @@ integer i__1; real r__1, r__2; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real s, a2, b1, b2; static integer i4, nn, np; @@ -29612,9 +29486,6 @@ /* System generated locals */ real r__1; - /* Builtin functions */ - double sqrt(doublereal), r_sign(real *, real *); - /* Local variables */ static real a, d__, l, m, r__, s, t, fa, ga, ha, ft, gt, ht, mm, tt, clt, crt, slt, srt; @@ -32792,9 +32663,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2]; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i1, i2, nb, mi, ni, nq, nw; static logical left; @@ -33124,9 +32992,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i1, i2, nb, mi, nh, ni, nq, nw; static logical left; @@ -33557,9 +33422,6 @@ i__5; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i__; static real t[4160] /* was [65][64] */; @@ -33864,9 +33726,6 @@ i__5; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i__; static real t[4160] /* was [65][64] */; @@ -34165,9 +34024,6 @@ i__5; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i__; static real t[4160] /* was [65][64] */; @@ -34464,9 +34320,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i1, i2, nb, mi, ni, nq, nw; static logical left; @@ -34731,9 +34584,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3; real r__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer j; static real ajj; @@ -35376,11 +35226,6 @@ integer z_dim1, z_offset, i__1, i__2; real r__1, r__2; - /* Builtin functions */ - double log(doublereal); - integer pow_ii(integer *, integer *); - double sqrt(doublereal); - /* Local variables */ static integer i__, j, k, m; static real p; @@ -35837,9 +35682,6 @@ integer z_dim1, z_offset, i__1, i__2; real r__1, r__2; - /* Builtin functions */ - double sqrt(doublereal), r_sign(real *, real *); - /* Local variables */ static real b, c__, f, g; static integer i__, j, k, l, m; @@ -36433,9 +36275,6 @@ integer i__1; real r__1, r__2, r__3; - /* Builtin functions */ - double sqrt(doublereal), r_sign(real *, real *); - /* Local variables */ static real c__; static integer i__, l, m; @@ -36875,9 +36714,6 @@ integer a_dim1, a_offset, i__1, i__2; real r__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static real eps; static integer inde; @@ -37813,9 +37649,6 @@ i__2, i__3; real r__1, r__2, r__3, r__4; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, j, k; static real x[4] /* was [2][2] */; @@ -39569,9 +39402,6 @@ integer a_dim1, a_offset, i__1, i__2[2], i__3, i__4, i__5; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer j, jb, nb, nn; extern logical lsame_(char *, char *); diff -Nru python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c_z_lapack.c python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c_z_lapack.c --- python-numpy-1.13.3/numpy/linalg/lapack_lite/f2c_z_lapack.c 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/linalg/lapack_lite/f2c_z_lapack.c 2018-06-12 17:31:56.000000000 +0000 @@ -270,9 +270,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3; doublereal d__1, d__2; - /* Builtin functions */ - double d_imag(doublecomplex *), z_abs(doublecomplex *); - /* Local variables */ static doublereal c__, f, g; static integer i__, j, k, l, m; @@ -664,9 +661,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3; doublecomplex z__1; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__; static doublecomplex alpha; @@ -1316,10 +1310,6 @@ doublereal d__1, d__2; doublecomplex z__1, z__2; - /* Builtin functions */ - double sqrt(doublereal), d_imag(doublecomplex *); - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, k, ihi; static doublereal scl; @@ -1840,9 +1830,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3; doublecomplex z__1; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__; static doublecomplex alpha; @@ -2721,9 +2708,6 @@ /* System generated locals */ integer a_dim1, a_offset, b_dim1, b_offset, i__1, i__2, i__3, i__4; - /* Builtin functions */ - double log(doublereal); - /* Local variables */ static integer ie, il, mm; static doublereal eps, anrm, bnrm; @@ -3461,9 +3445,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3; doublecomplex z__1; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, k; static doublecomplex alpha; @@ -3841,9 +3822,6 @@ integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1, i__2, i__3; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer i__, ie, il, ir, iu, blk; static doublereal dum[1], eps; @@ -6471,10 +6449,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3; doublecomplex z__1; - /* Builtin functions */ - double z_abs(doublecomplex *); - void z_div(doublecomplex *, doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j, jp; static doublereal sfmin; @@ -6993,9 +6967,6 @@ integer a_dim1, a_offset, i__1, i__2; doublereal d__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal eps; static integer inde; @@ -8042,9 +8013,6 @@ doublecomplex z__1; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static doublecomplex hl[2401] /* was [49][49] */; static integer kbot, nmin; @@ -8968,9 +8936,6 @@ integer i__1, i__2; doublecomplex z__1; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, ioff; @@ -9271,9 +9236,6 @@ doublereal d__1; doublecomplex z__1; - /* Builtin functions */ - double d_imag(doublecomplex *); - /* Local variables */ static integer i__, j, l; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, @@ -9416,9 +9378,6 @@ doublereal d__1, d__2, d__3, d__4; doublecomplex z__1; - /* Builtin functions */ - double d_imag(doublecomplex *); - /* Local variables */ static doublereal zi, zr; extern /* Subroutine */ int dladiv_(doublereal *, doublereal *, @@ -9472,10 +9431,6 @@ integer q_dim1, q_offset, qstore_dim1, qstore_offset, i__1, i__2; doublereal d__1; - /* Builtin functions */ - double log(doublereal); - integer pow_ii(integer *, integer *); - /* Local variables */ static integer i__, j, k, ll, iq, lgn, msd2, smm1, spm1, spm2; static doublereal temp; @@ -9824,9 +9779,6 @@ /* System generated locals */ integer q_dim1, q_offset, i__1, i__2; - /* Builtin functions */ - integer pow_ii(integer *, integer *); - /* Local variables */ static integer i__, k, n1, n2, iq, iw, iz, ptr, indx, curr, indxc, indxp; extern /* Subroutine */ int dlaed9_(integer *, integer *, integer *, @@ -10132,9 +10084,6 @@ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1; doublereal d__1; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static doublereal c__; static integer i__, j; @@ -10554,13 +10503,6 @@ doublereal d__1, d__2, d__3, d__4, d__5, d__6; doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7; - /* Builtin functions */ - double d_imag(doublecomplex *); - void d_cnjg(doublecomplex *, doublecomplex *); - double z_abs(doublecomplex *); - void z_sqrt(doublecomplex *, doublecomplex *), pow_zi(doublecomplex *, - doublecomplex *, integer *); - /* Local variables */ static integer i__, j, k, l, m; static doublereal s; @@ -11612,9 +11554,6 @@ doublereal d__1; doublecomplex z__1; - /* Builtin functions */ - double d_imag(doublecomplex *); - /* Local variables */ static integer i__, j, m, n; static doublereal dj; @@ -12154,10 +12093,6 @@ i__2, i__3, i__4, i__5, i__6; doublecomplex z__1; - /* Builtin functions */ - double d_imag(doublecomplex *); - integer pow_ii(integer *, integer *); - /* Local variables */ static integer i__, j, i1, ic, lf, nd, ll, nl, nr, im1, nlf, nrf, lvl, ndb1, nlp1, lvl2, nrp1, jcol, nlvl, sqre, jrow, jimag; @@ -12808,10 +12743,6 @@ doublereal d__1; doublecomplex z__1; - /* Builtin functions */ - double d_imag(doublecomplex *), log(doublereal), d_sign(doublereal *, - doublereal *); - /* Local variables */ static integer c__, i__, j, k; static doublereal r__; @@ -13550,9 +13481,6 @@ integer a_dim1, a_offset, i__1, i__2; doublereal ret_val, d__1, d__2; - /* Builtin functions */ - double z_abs(doublecomplex *), sqrt(doublereal); - /* Local variables */ static integer i__, j; static doublereal sum, scale; @@ -13719,9 +13647,6 @@ integer a_dim1, a_offset, i__1, i__2; doublereal ret_val, d__1, d__2, d__3; - /* Builtin functions */ - double z_abs(doublecomplex *), sqrt(doublereal); - /* Local variables */ static integer i__, j; static doublereal sum, absa, scale; @@ -13957,10 +13882,6 @@ doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8; doublecomplex z__1, z__2, z__3, z__4, z__5; - /* Builtin functions */ - double d_imag(doublecomplex *); - void z_sqrt(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, k; static doublereal s; @@ -14745,9 +14666,6 @@ doublereal d__1, d__2, d__3, d__4, d__5, d__6; doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7, z__8; - /* Builtin functions */ - double d_imag(doublecomplex *); - /* Local variables */ static doublereal s; static doublecomplex h21s, h31s; @@ -14921,10 +14839,6 @@ doublereal d__1, d__2, d__3, d__4, d__5, d__6; doublecomplex z__1, z__2; - /* Builtin functions */ - double d_imag(doublecomplex *); - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j; static doublecomplex s; @@ -15507,10 +15421,6 @@ doublereal d__1, d__2, d__3, d__4, d__5, d__6; doublecomplex z__1, z__2; - /* Builtin functions */ - double d_imag(doublecomplex *); - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j; static doublecomplex s; @@ -16110,10 +16020,6 @@ doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8; doublecomplex z__1, z__2, z__3, z__4, z__5; - /* Builtin functions */ - double d_imag(doublecomplex *); - void z_sqrt(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, k; static doublereal s; @@ -16903,10 +16809,6 @@ doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8, d__9, d__10; doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7, z__8; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - double d_imag(doublecomplex *); - /* Local variables */ static integer j, k, m, i2, j2, i4, j4, k1; static doublereal h11, h12, h21, h22; @@ -18253,9 +18155,6 @@ doublereal d__1; doublecomplex z__1; - /* Builtin functions */ - double d_imag(doublecomplex *); - /* Local variables */ static integer i__, j, l; extern /* Subroutine */ int dgemm_(char *, char *, integer *, integer *, @@ -18575,9 +18474,6 @@ work_offset, i__1, i__2, i__3, i__4, i__5; doublecomplex z__1, z__2; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j; extern logical lsame_(char *, char *); @@ -19420,9 +19316,6 @@ doublereal d__1, d__2; doublecomplex z__1, z__2; - /* Builtin functions */ - double d_imag(doublecomplex *), d_sign(doublereal *, doublereal *); - /* Local variables */ static integer j, knt; static doublereal beta, alphi, alphr; @@ -19923,11 +19816,6 @@ doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8, d__9, d__10; doublecomplex z__1, z__2, z__3; - /* Builtin functions */ - double log(doublereal), pow_di(doublereal *, integer *), d_imag( - doublecomplex *), sqrt(doublereal); - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static doublereal d__; static integer i__; @@ -21272,9 +21160,6 @@ integer i__1, i__2, i__3; doublereal d__1; - /* Builtin functions */ - double d_imag(doublecomplex *); - /* Local variables */ static integer ix; static doublereal temp1; @@ -21931,10 +21816,6 @@ doublereal d__1, d__2, d__3, d__4; doublecomplex z__1, z__2, z__3, z__4; - /* Builtin functions */ - double d_imag(doublecomplex *); - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j; static doublereal xj, rec, tjj; @@ -23456,9 +23337,6 @@ doublereal d__1; doublecomplex z__1, z__2; - /* Builtin functions */ - double sqrt(doublereal); - /* Local variables */ static integer j; static doublereal ajj; @@ -24133,9 +24011,6 @@ integer i__1, i__2, i__3, i__4; doublecomplex z__1, z__2, z__3, z__4; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, ix, iy; static doublecomplex stemp; @@ -24270,11 +24145,6 @@ integer z_dim1, z_offset, i__1, i__2, i__3, i__4; doublereal d__1, d__2; - /* Builtin functions */ - double log(doublereal); - integer pow_ii(integer *, integer *); - double sqrt(doublereal); - /* Local variables */ static integer i__, j, k, m; static doublereal p; @@ -24742,9 +24612,6 @@ integer z_dim1, z_offset, i__1, i__2; doublereal d__1, d__2; - /* Builtin functions */ - double sqrt(doublereal), d_sign(doublereal *, doublereal *); - /* Local variables */ static doublereal b, c__, f, g; static integer i__, j, k, l, m; @@ -25345,10 +25212,6 @@ doublereal d__1, d__2, d__3; doublecomplex z__1, z__2; - /* Builtin functions */ - double d_imag(doublecomplex *); - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j, k, ii, ki, is; static doublereal ulp; @@ -25848,9 +25711,6 @@ integer q_dim1, q_offset, t_dim1, t_offset, i__1, i__2, i__3; doublecomplex z__1; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer k, m1, m2, m3; static doublereal cs; @@ -26035,9 +25895,6 @@ integer a_dim1, a_offset, i__1, i__2; doublecomplex z__1; - /* Builtin functions */ - void z_div(doublecomplex *, doublecomplex *, doublecomplex *); - /* Local variables */ static integer j; static doublecomplex ajj; @@ -26204,9 +26061,6 @@ doublecomplex z__1; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer j, jb, nb, nn; extern logical lsame_(char *, char *); @@ -27059,9 +26913,6 @@ integer a_dim1, a_offset, i__1, i__2, i__3; doublecomplex z__1, z__2; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, j, l; extern /* Subroutine */ int zscal_(integer *, doublecomplex *, @@ -27757,9 +27608,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3; doublecomplex z__1; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, i1, i2, i3, mi, ni, nq; static doublecomplex aii; @@ -27972,9 +27820,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3; doublecomplex z__1; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, i1, i2, i3, ic, jc, mi, ni, nq; static doublecomplex aii; @@ -28193,9 +28038,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3[2]; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i1, i2, nb, mi, ni, nq, nw; static logical left; @@ -28534,9 +28376,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i1, i2, nb, mi, nh, ni, nq, nw; static logical left; @@ -28761,9 +28600,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1, i__2, i__3; doublecomplex z__1; - /* Builtin functions */ - void d_cnjg(doublecomplex *, doublecomplex *); - /* Local variables */ static integer i__, i1, i2, i3, ic, jc, mi, ni, nq; static doublecomplex aii; @@ -28991,9 +28827,6 @@ i__5; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i__; static doublecomplex t[4160] /* was [65][64] */; @@ -29302,9 +29135,6 @@ i__5; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i__; static doublecomplex t[4160] /* was [65][64] */; @@ -29607,9 +29437,6 @@ i__5; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i__; static doublecomplex t[4160] /* was [65][64] */; @@ -29911,9 +29738,6 @@ integer a_dim1, a_offset, c_dim1, c_offset, i__1[2], i__2, i__3; char ch__1[2]; - /* Builtin functions */ - /* Subroutine */ int s_cat(char *, char **, integer *, integer *, ftnlen); - /* Local variables */ static integer i1, i2, nb, mi, ni, nq, nw; static logical left; diff -Nru python-numpy-1.13.3/numpy/linalg/lapack_lite/python_xerbla.c python-numpy-1.14.5/numpy/linalg/lapack_lite/python_xerbla.c --- python-numpy-1.13.3/numpy/linalg/lapack_lite/python_xerbla.c 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/linalg/lapack_lite/python_xerbla.c 2018-06-12 17:31:56.000000000 +0000 @@ -1,4 +1,6 @@ #include "Python.h" + +#undef c_abs #include "f2c.h" /* diff -Nru python-numpy-1.13.3/numpy/linalg/linalg.py python-numpy-1.14.5/numpy/linalg/linalg.py --- python-numpy-1.13.3/numpy/linalg/linalg.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/linalg/linalg.py 2018-06-12 18:28:52.000000000 +0000 @@ -19,12 +19,13 @@ import warnings from numpy.core import ( - array, asarray, zeros, empty, empty_like, transpose, intc, single, double, + array, asarray, zeros, empty, empty_like, intc, single, double, csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot, add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size, - finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs, - broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones - ) + finfo, errstate, geterrobj, longdouble, moveaxis, amin, amax, product, abs, + broadcast, atleast_2d, intp, asanyarray, object_, ones, matmul, + swapaxes, divide, count_nonzero +) from numpy.core.multiarray import normalize_axis_index from numpy.lib import triu, asfarray from numpy.linalg import lapack_lite, _umath_linalg @@ -69,12 +70,8 @@ """ pass -# Dealing with errors in _umath_linalg - -_linalg_error_extobj = None def _determine_error_states(): - global _linalg_error_extobj errobj = geterrobj() bufsize = errobj[0] @@ -82,9 +79,11 @@ divide='ignore', under='ignore'): invalid_call_errmask = geterrobj()[1] - _linalg_error_extobj = [bufsize, invalid_call_errmask, None] + return [bufsize, invalid_call_errmask, None] -_determine_error_states() +# Dealing with errors in _umath_linalg +_linalg_error_extobj = _determine_error_states() +del _determine_error_states def _raise_linalgerror_singular(err, flag): raise LinAlgError("Singular matrix") @@ -99,7 +98,7 @@ raise LinAlgError("SVD did not converge") def get_linalg_error_extobj(callback): - extobj = list(_linalg_error_extobj) + extobj = list(_linalg_error_extobj) # make a copy extobj[2] = callback return extobj @@ -225,6 +224,22 @@ if _isEmpty2d(a): raise LinAlgError("Arrays cannot be empty") +def transpose(a): + """ + Transpose each matrix in a stack of matrices. + + Unlike np.transpose, this only swaps the last two axes, rather than all of + them + + Parameters + ---------- + a : (...,M,N) array_like + + Returns + ------- + aT : (...,N,M) ndarray + """ + return swapaxes(a, -1, -2) # Linear equations @@ -615,15 +630,15 @@ mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional If K = min(M, N), then - 'reduced' : returns q, r with dimensions (M, K), (K, N) (default) - 'complete' : returns q, r with dimensions (M, M), (M, N) - 'r' : returns r only with dimensions (K, N) - 'raw' : returns h, tau with dimensions (N, M), (K,) - 'full' : alias of 'reduced', deprecated - 'economic' : returns h from 'raw', deprecated. + * 'reduced' : returns q, r with dimensions (M, K), (K, N) (default) + * 'complete' : returns q, r with dimensions (M, M), (M, N) + * 'r' : returns r only with dimensions (K, N) + * 'raw' : returns h, tau with dimensions (N, M), (K,) + * 'full' : alias of 'reduced', deprecated + * 'economic' : returns h from 'raw', deprecated. The options 'reduced', 'complete, and 'raw' are new in numpy 1.8, - see the notes for more information. The default is 'reduced' and to + see the notes for more information. The default is 'reduced', and to maintain backward compatibility with earlier versions of numpy both it and the old default 'full' can be omitted. Note that array h returned in 'raw' mode is transposed for calling Fortran. The @@ -1281,35 +1296,44 @@ # Singular value decomposition -def svd(a, full_matrices=1, compute_uv=1): +def svd(a, full_matrices=True, compute_uv=True): """ Singular Value Decomposition. - Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v` - are unitary and `s` is a 1-d array of `a`'s singular values. + When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh + = (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D + array of `a`'s singular values. When `a` is higher-dimensional, SVD is + applied in stacked mode as explained below. Parameters ---------- a : (..., M, N) array_like - A real or complex matrix of shape (`M`, `N`) . + A real or complex array with ``a.ndim >= 2``. full_matrices : bool, optional - If True (default), `u` and `v` have the shapes (`M`, `M`) and - (`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`) - and (`K`, `N`), respectively, where `K` = min(`M`, `N`). + If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and + ``(..., N, N)``, respectively. Otherwise, the shapes are + ``(..., M, K)`` and ``(..., K, N)``, respectively, where + ``K = min(M, N)``. compute_uv : bool, optional - Whether or not to compute `u` and `v` in addition to `s`. True + Whether or not to compute `u` and `vh` in addition to `s`. True by default. Returns ------- u : { (..., M, M), (..., M, K) } array - Unitary matrices. The actual shape depends on the value of - ``full_matrices``. Only returned when ``compute_uv`` is True. + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. s : (..., K) array - The singular values for every matrix, sorted in descending order. - v : { (..., N, N), (..., K, N) } array - Unitary matrices. The actual shape depends on the value of - ``full_matrices``. Only returned when ``compute_uv`` is True. + Vector(s) with the singular values, within each vector sorted in + descending order. The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. + vh : { (..., N, N), (..., K, N) } array + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. Raises ------ @@ -1319,48 +1343,79 @@ Notes ----- - .. versionadded:: 1.8.0 - - Broadcasting rules apply, see the `numpy.linalg` documentation for - details. + .. versionchanged:: 1.8.0 + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The decomposition is performed using LAPACK routine ``_gesdd``. + + SVD is usually described for the factorization of a 2D matrix :math:`A`. + The higher-dimensional case will be discussed below. In the 2D case, SVD is + written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`, + :math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s` + contains the singular values of `a` and `u` and `vh` are unitary. The rows + of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are + the eigenvectors of :math:`A A^H`. In both cases the corresponding + (possibly non-zero) eigenvalues are given by ``s**2``. + + If `a` has more than two dimensions, then broadcasting rules apply, as + explained in :ref:`routines.linalg-broadcasting`. This means that SVD is + working in "stacked" mode: it iterates over all indices of the first + ``a.ndim - 2`` dimensions and for each combination SVD is applied to the + last two indices. The matrix `a` can be reconstructed from the + decomposition with either ``(u * s[..., None, :]) @ vh`` or + ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the + function ``np.matmul`` for python versions below 3.5.) - The decomposition is performed using LAPACK routine _gesdd + If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are + all the return values. - The SVD is commonly written as ``a = U S V.H``. The `v` returned - by this function is ``V.H`` and ``u = U``. + Examples + -------- + >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6) + >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3) - If ``U`` is a unitary matrix, it means that it - satisfies ``U.H = inv(U)``. + Reconstruction based on full SVD, 2D case: - The rows of `v` are the eigenvectors of ``a.H a``. The columns - of `u` are the eigenvectors of ``a a.H``. For row ``i`` in - `v` and column ``i`` in `u`, the corresponding eigenvalue is - ``s[i]**2``. + >>> u, s, vh = np.linalg.svd(a, full_matrices=True) + >>> u.shape, s.shape, vh.shape + ((9, 9), (6,), (6, 6)) + >>> np.allclose(a, np.dot(u[:, :6] * s, vh)) + True + >>> smat = np.zeros((9, 6), dtype=complex) + >>> smat[:6, :6] = np.diag(s) + >>> np.allclose(a, np.dot(u, np.dot(smat, vh))) + True - If `a` is a `matrix` object (as opposed to an `ndarray`), then so - are all the return values. + Reconstruction based on reduced SVD, 2D case: - Examples - -------- - >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6) + >>> u, s, vh = np.linalg.svd(a, full_matrices=False) + >>> u.shape, s.shape, vh.shape + ((9, 6), (6,), (6, 6)) + >>> np.allclose(a, np.dot(u * s, vh)) + True + >>> smat = np.diag(s) + >>> np.allclose(a, np.dot(u, np.dot(smat, vh))) + True - Reconstruction based on full SVD: + Reconstruction based on full SVD, 4D case: - >>> U, s, V = np.linalg.svd(a, full_matrices=True) - >>> U.shape, V.shape, s.shape - ((9, 9), (6, 6), (6,)) - >>> S = np.zeros((9, 6), dtype=complex) - >>> S[:6, :6] = np.diag(s) - >>> np.allclose(a, np.dot(U, np.dot(S, V))) + >>> u, s, vh = np.linalg.svd(b, full_matrices=True) + >>> u.shape, s.shape, vh.shape + ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh)) + True + >>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh)) True - Reconstruction based on reduced SVD: + Reconstruction based on reduced SVD, 4D case: - >>> U, s, V = np.linalg.svd(a, full_matrices=False) - >>> U.shape, V.shape, s.shape - ((9, 6), (6, 6), (6,)) - >>> S = np.diag(s) - >>> np.allclose(a, np.dot(U, np.dot(S, V))) + >>> u, s, vh = np.linalg.svd(b, full_matrices=False) + >>> u.shape, s.shape, vh.shape + ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(u * s[..., None, :], vh)) + True + >>> np.allclose(b, np.matmul(u, s[..., None] * vh)) True """ @@ -1386,11 +1441,11 @@ gufunc = _umath_linalg.svd_n_s signature = 'D->DdD' if isComplexType(t) else 'd->ddd' - u, s, vt = gufunc(a, signature=signature, extobj=extobj) + u, s, vh = gufunc(a, signature=signature, extobj=extobj) u = u.astype(result_t, copy=False) s = s.astype(_realType(result_t), copy=False) - vt = vt.astype(result_t, copy=False) - return wrap(u), s, wrap(vt) + vh = vh.astype(result_t, copy=False) + return wrap(u), s, wrap(vh) else: if m < n: gufunc = _umath_linalg.svd_m @@ -1402,6 +1457,7 @@ s = s.astype(_realType(result_t), copy=False) return s + def cond(x, p=None): """ Compute the condition number of a matrix. @@ -1489,22 +1545,34 @@ return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1)) -def matrix_rank(M, tol=None): +def matrix_rank(M, tol=None, hermitian=False): """ Return matrix rank of array using SVD method - Rank of the array is the number of SVD singular values of the array that are + Rank of the array is the number of singular values of the array that are greater than `tol`. + .. versionchanged:: 1.14 + Can now operate on stacks of matrices + Parameters ---------- M : {(M,), (..., M, N)} array_like input vector or stack of matrices - tol : {None, float}, optional - threshold below which SVD values are considered zero. If `tol` is - None, and ``S`` is an array with singular values for `M`, and - ``eps`` is the epsilon value for datatype of ``S``, then `tol` is - set to ``S.max() * max(M.shape) * eps``. + tol : (...) array_like, float, optional + threshold below which SVD values are considered zero. If `tol` is + None, and ``S`` is an array with singular values for `M`, and + ``eps`` is the epsilon value for datatype of ``S``, then `tol` is + set to ``S.max() * max(M.shape) * eps``. + + .. versionchanged:: 1.14 + Broadcasted against the stack of matrices + hermitian : bool, optional + If True, `M` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + + .. versionadded:: 1.14 Notes ----- @@ -1568,10 +1636,15 @@ M = asarray(M) if M.ndim < 2: return int(not all(M==0)) - S = svd(M, compute_uv=False) + if hermitian: + S = abs(eigvalsh(M)) + else: + S = svd(M, compute_uv=False) if tol is None: tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps - return (S > tol).sum(axis=-1) + else: + tol = asarray(tol)[..., newaxis] + return count_nonzero(S > tol, axis=-1) # Generalized inverse @@ -1584,26 +1657,29 @@ singular-value decomposition (SVD) and including all *large* singular values. + .. versionchanged:: 1.14 + Can now operate on stacks of matrices + Parameters ---------- - a : (M, N) array_like - Matrix to be pseudo-inverted. - rcond : float - Cutoff for small singular values. - Singular values smaller (in modulus) than - `rcond` * largest_singular_value (again, in modulus) - are set to zero. + a : (..., M, N) array_like + Matrix or stack of matrices to be pseudo-inverted. + rcond : (...) array_like of float + Cutoff for small singular values. + Singular values smaller (in modulus) than + `rcond` * largest_singular_value (again, in modulus) + are set to zero. Broadcasts against the stack of matrices Returns ------- - B : (N, M) ndarray - The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so - is `B`. + B : (..., N, M) ndarray + The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so + is `B`. Raises ------ LinAlgError - If the SVD computation does not converge. + If the SVD computation does not converge. Notes ----- @@ -1640,20 +1716,20 @@ """ a, wrap = _makearray(a) + rcond = asarray(rcond) if _isEmpty2d(a): res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype) return wrap(res) a = a.conjugate() - u, s, vt = svd(a, 0) - m = u.shape[0] - n = vt.shape[1] - cutoff = rcond*maximum.reduce(s) - for i in range(min(n, m)): - if s[i] > cutoff: - s[i] = 1./s[i] - else: - s[i] = 0. - res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u))) + u, s, vt = svd(a, full_matrices=False) + + # discard small singular values + cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True) + large = s > cutoff + s = divide(1, s, where=large, out=s) + s[~large] = 0 + + res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u))) return wrap(res) # Determinant @@ -1739,14 +1815,8 @@ real_t = _realType(result_t) signature = 'D->Dd' if isComplexType(t) else 'd->dd' sign, logdet = _umath_linalg.slogdet(a, signature=signature) - if isscalar(sign): - sign = sign.astype(result_t) - else: - sign = sign.astype(result_t, copy=False) - if isscalar(logdet): - logdet = logdet.astype(real_t) - else: - logdet = logdet.astype(real_t, copy=False) + sign = sign.astype(result_t, copy=False) + logdet = logdet.astype(real_t, copy=False) return sign, logdet def det(a): @@ -1802,15 +1872,12 @@ t, result_t = _commonType(a) signature = 'D->D' if isComplexType(t) else 'd->d' r = _umath_linalg.det(a, signature=signature) - if isscalar(r): - r = r.astype(result_t) - else: - r = r.astype(result_t, copy=False) + r = r.astype(result_t, copy=False) return r # Linear Least Squares -def lstsq(a, b, rcond=-1): +def lstsq(a, b, rcond="warn"): """ Return the least-squares solution to a linear matrix equation. @@ -1836,12 +1903,19 @@ as zero if they are smaller than `rcond` times the largest singular value of `a`. + .. versionchanged:: 1.14.0 + If not set, a FutureWarning is given. The previous default + of ``-1`` will use the machine precision as `rcond` parameter, + the new default will use the machine precision times `max(M, N)`. + To silence the warning and use the new default, use ``rcond=None``, + to keep using the old behavior, use ``rcond=-1``. + Returns ------- x : {(N,), (N, K)} ndarray Least-squares solution. If `b` is two-dimensional, the solutions are in the `K` columns of `x`. - residuals : {(), (1,), (K,)} ndarray + residuals : {(1,), (K,), (0,)} ndarray Sums of residuals; squared Euclidean 2-norm for each column in ``b - a*x``. If the rank of `a` is < N or M <= N, this is an empty array. @@ -1908,11 +1982,27 @@ ldb = max(n, m) if m != b.shape[0]: raise LinAlgError('Incompatible dimensions') + t, result_t = _commonType(a, b) - result_real_t = _realType(result_t) real_t = _linalgRealType(t) + result_real_t = _realType(result_t) + + # Determine default rcond value + if rcond == "warn": + # 2017-08-19, 1.14.0 + warnings.warn("`rcond` parameter will change to the default of " + "machine precision times ``max(M, N)`` where M and N " + "are the input matrix dimensions.\n" + "To use the future default and silence this warning " + "we advise to pass `rcond=None`, to keep using the old, " + "explicitly pass `rcond=-1`.", + FutureWarning, stacklevel=2) + rcond = -1 + if rcond is None: + rcond = finfo(t).eps * ldb + bstar = zeros((ldb, n_rhs), t) - bstar[:b.shape[0], :n_rhs] = b.copy() + bstar[:m, :n_rhs] = b a, bstar = _fastCopyAndTranspose(t, a, bstar) a, bstar = _to_native_byte_order(a, bstar) s = zeros((min(m, n),), real_t) @@ -1933,14 +2023,8 @@ work = zeros((lwork,), t) results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, -1, rwork, iwork, 0) - lwork = int(abs(work[0])) - rwork = zeros((lwork,), real_t) - a_real = zeros((m, n), real_t) - bstar_real = zeros((ldb, n_rhs,), real_t) - results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m, - bstar_real, ldb, s, rcond, - 0, rwork, -1, iwork, 0) lrwork = int(rwork[0]) + lwork = int(work[0].real) work = zeros((lwork,), t) rwork = zeros((lrwork,), real_t) results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond, @@ -1957,28 +2041,35 @@ 0, work, lwork, iwork, 0) if results['info'] > 0: raise LinAlgError('SVD did not converge in Linear Least Squares') - resids = array([], result_real_t) - if is_1d: - x = array(ravel(bstar)[:n], dtype=result_t, copy=True) - if results['rank'] == n and m > n: - if isComplexType(t): - resids = array([sum(abs(ravel(bstar)[n:])**2)], - dtype=result_real_t) - else: - resids = array([sum((ravel(bstar)[n:])**2)], - dtype=result_real_t) + + # undo transpose imposed by fortran-order arrays + b_out = bstar.T + + # b_out contains both the solution and the components of the residuals + x = b_out[:n,:] + r_parts = b_out[n:,:] + if isComplexType(t): + resids = sum(abs(r_parts)**2, axis=-2) else: - x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True) - if results['rank'] == n and m > n: - if isComplexType(t): - resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype( - result_real_t, copy=False) - else: - resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype( - result_real_t, copy=False) + resids = sum(r_parts**2, axis=-2) - st = s[:min(n, m)].astype(result_real_t, copy=True) - return wrap(x), wrap(resids), results['rank'], st + rank = results['rank'] + + # remove the axis we added + if is_1d: + x = x.squeeze(axis=-1) + # we probably should squeeze resids too, but we can't + # without breaking compatibility. + + # as documented + if rank != n or m <= n: + resids = array([], result_real_t) + + # coerce output arrays + s = s.astype(result_real_t, copy=False) + resids = resids.astype(result_real_t, copy=False) + x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed + return wrap(x), wrap(resids), rank, s def _multi_svd_norm(x, row_axis, col_axis, op): @@ -2004,9 +2095,7 @@ is `numpy.amin` or `numpy.amax` or `numpy.sum`. """ - if row_axis > col_axis: - row_axis -= 1 - y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1) + y = moveaxis(x, (row_axis, col_axis), (-2, -1)) result = op(svd(y, compute_uv=0), axis=-1) return result @@ -2177,7 +2266,7 @@ elif not isinstance(axis, tuple): try: axis = int(axis) - except: + except Exception: raise TypeError("'axis' must be None, an integer or a tuple of integers") axis = (axis,) @@ -2188,7 +2277,7 @@ return abs(x).min(axis=axis, keepdims=keepdims) elif ord == 0: # Zero norm - return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims) + return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims) elif ord == 1: # special case for speedup return add.reduce(abs(x), axis=axis, keepdims=keepdims) @@ -2201,20 +2290,11 @@ ord + 1 except TypeError: raise ValueError("Invalid norm order for vectors.") - if x.dtype.type is longdouble: - # Convert to a float type, so integer arrays give - # float results. Don't apply asfarray to longdouble arrays, - # because it will downcast to float64. - absx = abs(x) - else: - absx = x if isComplexType(x.dtype.type) else asfarray(x) - if absx.dtype is x.dtype: - absx = abs(absx) - else: - # if the type changed, we can safely overwrite absx - abs(absx, out=absx) + absx = abs(x) absx **= ord - return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord) + ret = add.reduce(absx, axis=axis, keepdims=keepdims) + ret **= (1 / ord) + return ret elif len(axis) == 2: row_axis, col_axis = axis row_axis = normalize_axis_index(row_axis, nd) diff -Nru python-numpy-1.13.3/numpy/linalg/tests/test_build.py python-numpy-1.14.5/numpy/linalg/tests/test_build.py --- python-numpy-1.13.3/numpy/linalg/tests/test_build.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/linalg/tests/test_build.py 2018-06-12 18:28:52.000000000 +0000 @@ -5,7 +5,7 @@ import re from numpy.linalg import lapack_lite -from numpy.testing import TestCase, dec, run_module_suite +from numpy.testing import run_module_suite, assert_, dec class FindDependenciesLdd(object): @@ -40,7 +40,7 @@ return founds -class TestF77Mismatch(TestCase): +class TestF77Mismatch(object): @dec.skipif(not(sys.platform[:5] == 'linux'), "Skipping fortran compiler mismatch on non Linux platform") @@ -48,7 +48,7 @@ f = FindDependenciesLdd() deps = f.grep_dependencies(lapack_lite.__file__, [b'libg2c', b'libgfortran']) - self.assertFalse(len(deps) > 1, + assert_(len(deps) <= 1, """Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to cause random crashes and wrong results. See numpy INSTALL.txt for more information.""") diff -Nru python-numpy-1.13.3/numpy/linalg/tests/test_linalg.py python-numpy-1.14.5/numpy/linalg/tests/test_linalg.py --- python-numpy-1.13.3/numpy/linalg/tests/test_linalg.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/linalg/tests/test_linalg.py 2018-06-12 18:28:52.000000000 +0000 @@ -7,7 +7,8 @@ import sys import itertools import traceback -import warnings +import textwrap +import subprocess import numpy as np from numpy import array, single, double, csingle, cdouble, dot, identity @@ -712,12 +713,16 @@ assert_almost_equal(linalg.cond(A, inf), 3.) -class TestPinv(LinalgSquareTestCase, LinalgNonsquareTestCase): +class TestPinv(LinalgSquareTestCase, + LinalgNonsquareTestCase, + LinalgGeneralizedSquareTestCase, + LinalgGeneralizedNonsquareTestCase): def do(self, a, b, tags): a_ginv = linalg.pinv(a) # `a @ a_ginv == I` does not hold if a is singular - assert_almost_equal(dot(a, a_ginv).dot(a), a, single_decimal=5, double_decimal=11) + dot = dot_generalized + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix))) @@ -793,7 +798,7 @@ arr = np.asarray(a) m, n = arr.shape u, s, vt = linalg.svd(a, 0) - x, residuals, rank, sv = linalg.lstsq(a, b) + x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1) if m <= n: assert_almost_equal(b, dot(a, x)) assert_equal(rank, m) @@ -814,6 +819,23 @@ assert_(imply(isinstance(b, matrix), isinstance(x, matrix))) assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix))) + def test_future_rcond(self): + a = np.array([[0., 1., 0., 1., 2., 0.], + [0., 2., 0., 0., 1., 0.], + [1., 0., 1., 0., 0., 4.], + [0., 0., 0., 2., 3., 0.]]).T + + b = np.array([1, 0, 0, 0, 0, 0]) + with suppress_warnings() as sup: + w = sup.record(FutureWarning, "`rcond` parameter will change") + x, residuals, rank, s = linalg.lstsq(a, b) + assert_(rank == 4) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1) + assert_(rank == 4) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + assert_(rank == 3) + # Warning should be raised exactly once (first command) + assert_(len(w) == 1) class TestMatrixPower(object): R90 = array([[0, 1], [-1, 0]]) @@ -1362,6 +1384,19 @@ # works on scalar yield assert_equal, matrix_rank(1), 1 + def test_symmetric_rank(self): + yield assert_equal, 4, matrix_rank(np.eye(4), hermitian=True) + yield assert_equal, 1, matrix_rank(np.ones((4, 4)), hermitian=True) + yield assert_equal, 0, matrix_rank(np.zeros((4, 4)), hermitian=True) + # rank deficient matrix + I = np.eye(4) + I[-1, -1] = 0. + yield assert_equal, 3, matrix_rank(I, hermitian=True) + # manually supplied tolerance + I[-1, -1] = 1e-8 + yield assert_equal, 4, matrix_rank(I, hermitian=True, tol=0.99e-8) + yield assert_equal, 3, matrix_rank(I, hermitian=True, tol=1.01e-8) + def test_reduced_rank(): # Test matrices with reduced rank @@ -1473,6 +1508,30 @@ class TestCholesky(object): # TODO: are there no other tests for cholesky? + def test_basic_property(self): + # Check A = L L^H + shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)] + dtypes = (np.float32, np.float64, np.complex64, np.complex128) + + for shape, dtype in itertools.product(shapes, dtypes): + np.random.seed(1) + a = np.random.randn(*shape) + if np.issubdtype(dtype, np.complexfloating): + a = a + 1j*np.random.randn(*shape) + + t = list(range(len(shape))) + t[-2:] = -1, -2 + + a = np.matmul(a.transpose(t).conj(), a) + a = np.asarray(a, dtype=dtype) + + c = np.linalg.cholesky(a) + + b = np.matmul(c, c.transpose(t).conj()) + assert_allclose(b, a, + err_msg="{} {}\n{}\n{}".format(shape, dtype, a, c), + atol=500 * a.shape[0] * np.finfo(dtype).eps) + def test_0_size(self): class ArraySubclass(np.ndarray): pass @@ -1550,7 +1609,7 @@ np.linalg.lapack_lite.xerbla() except ValueError: pass - except: + except Exception: os._exit(os.EX_CONFIG) try: @@ -1574,6 +1633,40 @@ raise SkipTest('Numpy xerbla not linked in.') +def test_sdot_bug_8577(): + # Regression test that loading certain other libraries does not + # result to wrong results in float32 linear algebra. + # + # There's a bug gh-8577 on OSX that can trigger this, and perhaps + # there are also other situations in which it occurs. + # + # Do the check in a separate process. + + bad_libs = ['PyQt5.QtWidgets', 'IPython'] + + template = textwrap.dedent(""" + import sys + {before} + try: + import {bad_lib} + except ImportError: + sys.exit(0) + {after} + x = np.ones(2, dtype=np.float32) + sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1) + """) + + for bad_lib in bad_libs: + code = template.format(before="import numpy as np", after="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + # Swapped import order + code = template.format(after="import numpy as np", before="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + class TestMultiDot(object): def test_basic_function_with_three_arguments(self): @@ -1645,7 +1738,7 @@ [0, 0, 0, 3, 3, 3], [0, 0, 0, 0, 4, 5], [0, 0, 0, 0, 0, 5], - [0, 0, 0, 0, 0, 0]], dtype=np.int) + [0, 0, 0, 0, 0, 0]], dtype=int) s_expected -= 1 # Cormen uses 1-based index, python does not. s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True) diff -Nru python-numpy-1.13.3/numpy/linalg/tests/test_regression.py python-numpy-1.14.5/numpy/linalg/tests/test_regression.py --- python-numpy-1.13.3/numpy/linalg/tests/test_regression.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/linalg/tests/test_regression.py 2018-06-12 18:28:52.000000000 +0000 @@ -7,17 +7,14 @@ import numpy as np from numpy import linalg, arange, float64, array, dot, transpose from numpy.testing import ( - TestCase, run_module_suite, assert_equal, assert_array_equal, + run_module_suite, assert_, assert_raises, assert_equal, assert_array_equal, assert_array_almost_equal, assert_array_less ) -rlevel = 1 +class TestRegression(object): - -class TestRegression(TestCase): - - def test_eig_build(self, level=rlevel): + def test_eig_build(self): # Ticket #652 rva = array([1.03221168e+02 + 0.j, -1.91843603e+01 + 0.j, @@ -40,7 +37,7 @@ rva.sort() assert_array_almost_equal(va, rva) - def test_eigh_build(self, level=rlevel): + def test_eigh_build(self): # Ticket 662. rvals = [68.60568999, 89.57756725, 106.67185574] @@ -51,7 +48,7 @@ vals, vecs = linalg.eigh(cov) assert_array_almost_equal(vals, rvals) - def test_svd_build(self, level=rlevel): + def test_svd_build(self): # Ticket 627. a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]]) m, n = a.shape @@ -64,7 +61,7 @@ def test_norm_vector_badarg(self): # Regression for #786: Froebenius norm for vectors raises # TypeError. - self.assertRaises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro') + assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro') def test_lapack_endian(self): # For bug #1482 @@ -98,47 +95,59 @@ norm = linalg.norm(testvector) assert_array_equal(norm, [0, 1]) - self.assertEqual(norm.dtype, np.dtype('float64')) + assert_(norm.dtype == np.dtype('float64')) norm = linalg.norm(testvector, ord=1) assert_array_equal(norm, [0, 1]) - self.assertNotEqual(norm.dtype, np.dtype('float64')) + assert_(norm.dtype != np.dtype('float64')) norm = linalg.norm(testvector, ord=2) assert_array_equal(norm, [0, 1]) - self.assertEqual(norm.dtype, np.dtype('float64')) + assert_(norm.dtype == np.dtype('float64')) - self.assertRaises(ValueError, linalg.norm, testvector, ord='fro') - self.assertRaises(ValueError, linalg.norm, testvector, ord='nuc') - self.assertRaises(ValueError, linalg.norm, testvector, ord=np.inf) - self.assertRaises(ValueError, linalg.norm, testvector, ord=-np.inf) + assert_raises(ValueError, linalg.norm, testvector, ord='fro') + assert_raises(ValueError, linalg.norm, testvector, ord='nuc') + assert_raises(ValueError, linalg.norm, testvector, ord=np.inf) + assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf) with warnings.catch_warnings(): warnings.simplefilter("error", DeprecationWarning) - self.assertRaises((AttributeError, DeprecationWarning), + assert_raises((AttributeError, DeprecationWarning), linalg.norm, testvector, ord=0) - self.assertRaises(ValueError, linalg.norm, testvector, ord=-1) - self.assertRaises(ValueError, linalg.norm, testvector, ord=-2) + assert_raises(ValueError, linalg.norm, testvector, ord=-1) + assert_raises(ValueError, linalg.norm, testvector, ord=-2) testmatrix = np.array([[np.array([0, 1]), 0, 0], [0, 0, 0]], dtype=object) norm = linalg.norm(testmatrix) assert_array_equal(norm, [0, 1]) - self.assertEqual(norm.dtype, np.dtype('float64')) + assert_(norm.dtype == np.dtype('float64')) norm = linalg.norm(testmatrix, ord='fro') assert_array_equal(norm, [0, 1]) - self.assertEqual(norm.dtype, np.dtype('float64')) + assert_(norm.dtype == np.dtype('float64')) - self.assertRaises(TypeError, linalg.norm, testmatrix, ord='nuc') - self.assertRaises(ValueError, linalg.norm, testmatrix, ord=np.inf) - self.assertRaises(ValueError, linalg.norm, testmatrix, ord=-np.inf) - self.assertRaises(ValueError, linalg.norm, testmatrix, ord=0) - self.assertRaises(ValueError, linalg.norm, testmatrix, ord=1) - self.assertRaises(ValueError, linalg.norm, testmatrix, ord=-1) - self.assertRaises(TypeError, linalg.norm, testmatrix, ord=2) - self.assertRaises(TypeError, linalg.norm, testmatrix, ord=-2) - self.assertRaises(ValueError, linalg.norm, testmatrix, ord=3) + assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc') + assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=0) + assert_raises(ValueError, linalg.norm, testmatrix, ord=1) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-1) + assert_raises(TypeError, linalg.norm, testmatrix, ord=2) + assert_raises(TypeError, linalg.norm, testmatrix, ord=-2) + assert_raises(ValueError, linalg.norm, testmatrix, ord=3) + + def test_lstsq_complex_larger_rhs(self): + # gh-9891 + size = 20 + n_rhs = 70 + G = np.random.randn(size, size) + 1j * np.random.randn(size, size) + u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs) + b = G.dot(u) + # This should work without segmentation fault. + u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None) + # check results just in case + assert_array_almost_equal(u_lstsq, u) if __name__ == '__main__': diff -Nru python-numpy-1.13.3/numpy/linalg/umath_linalg.c.src python-numpy-1.14.5/numpy/linalg/umath_linalg.c.src --- python-numpy-1.13.3/numpy/linalg/umath_linalg.c.src 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/linalg/umath_linalg.c.src 2018-06-12 18:28:52.000000000 +0000 @@ -294,20 +294,20 @@ FNAME(ddot)(int *n, double *sx, int *incx, double *sy, int *incy); -extern f2c_complex -FNAME(cdotu)(int *n, +extern void +FNAME(cdotu)(f2c_complex *ret, int *n, f2c_complex *sx, int *incx, f2c_complex *sy, int *incy); -extern f2c_doublecomplex -FNAME(zdotu)(int *n, +extern void +FNAME(zdotu)(f2c_doublecomplex *ret, int *n, f2c_doublecomplex *sx, int *incx, f2c_doublecomplex *sy, int *incy); -extern f2c_complex -FNAME(cdotc)(int *n, +extern void +FNAME(cdotc)(f2c_complex *ret, int *n, f2c_complex *sx, int *incx, f2c_complex *sy, int *incy); -extern f2c_doublecomplex -FNAME(zdotc)(int *n, +extern void +FNAME(zdotc)(f2c_doublecomplex *ret, int *n, f2c_doublecomplex *sx, int *incx, f2c_doublecomplex *sy, int *incy); @@ -368,17 +368,11 @@ ***************************************************************************** */ -static NPY_INLINE void * -offset_ptr(void* ptr, ptrdiff_t offset) -{ - return (void*)((npy_uint8*)ptr + offset); -} - static NPY_INLINE int get_fp_invalid_and_clear(void) { int status; - status = npy_clear_floatstatus(); + status = npy_clear_floatstatus_barrier((char*)&status); return !!(status & NPY_FPE_INVALID); } @@ -389,7 +383,7 @@ npy_set_floatstatus_invalid(); } else { - npy_clear_floatstatus(); + npy_clear_floatstatus_barrier((char*)&error_occurred); } } @@ -553,104 +547,6 @@ params->row_strides, params->column_strides); } - -static NPY_INLINE float -FLOAT_add(float op1, float op2) -{ - return op1 + op2; -} - -static NPY_INLINE double -DOUBLE_add(double op1, double op2) -{ - return op1 + op2; -} - -static NPY_INLINE COMPLEX_t -CFLOAT_add(COMPLEX_t op1, COMPLEX_t op2) -{ - COMPLEX_t result; - result.array[0] = op1.array[0] + op2.array[0]; - result.array[1] = op1.array[1] + op2.array[1]; - - return result; -} - -static NPY_INLINE DOUBLECOMPLEX_t -CDOUBLE_add(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2) -{ - DOUBLECOMPLEX_t result; - result.array[0] = op1.array[0] + op2.array[0]; - result.array[1] = op1.array[1] + op2.array[1]; - - return result; -} - -static NPY_INLINE float -FLOAT_mul(float op1, float op2) -{ - return op1*op2; -} - -static NPY_INLINE double -DOUBLE_mul(double op1, double op2) -{ - return op1*op2; -} - - -static NPY_INLINE COMPLEX_t -CFLOAT_mul(COMPLEX_t op1, COMPLEX_t op2) -{ - COMPLEX_t result; - result.array[0] = op1.array[0]*op2.array[0] - op1.array[1]*op2.array[1]; - result.array[1] = op1.array[1]*op2.array[0] + op1.array[0]*op2.array[1]; - - return result; -} - -static NPY_INLINE DOUBLECOMPLEX_t -CDOUBLE_mul(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2) -{ - DOUBLECOMPLEX_t result; - result.array[0] = op1.array[0]*op2.array[0] - op1.array[1]*op2.array[1]; - result.array[1] = op1.array[1]*op2.array[0] + op1.array[0]*op2.array[1]; - - return result; -} - -static NPY_INLINE float -FLOAT_mulc(float op1, float op2) -{ - return op1*op2; -} - -static NPY_INLINE double -DOUBLE_mulc(float op1, float op2) -{ - return op1*op2; -} - -static NPY_INLINE COMPLEX_t -CFLOAT_mulc(COMPLEX_t op1, COMPLEX_t op2) -{ - COMPLEX_t result; - result.array[0] = op1.array[0]*op2.array[0] + op1.array[1]*op2.array[1]; - result.array[1] = op1.array[0]*op2.array[1] - op1.array[1]*op2.array[0]; - - return result; -} - -static NPY_INLINE DOUBLECOMPLEX_t -CDOUBLE_mulc(DOUBLECOMPLEX_t op1, DOUBLECOMPLEX_t op2) -{ - DOUBLECOMPLEX_t result; - result.array[0] = op1.array[0]*op2.array[0] + op1.array[1]*op2.array[1]; - result.array[1] = op1.array[0]*op2.array[1] - op1.array[1]*op2.array[0]; - - return result; -} - static NPY_INLINE void print_FLOAT(npy_float s) { diff -Nru python-numpy-1.13.3/numpy/ma/bench.py python-numpy-1.14.5/numpy/ma/bench.py --- python-numpy-1.13.3/numpy/ma/bench.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/ma/bench.py 2018-06-12 17:31:56.000000000 +0000 @@ -1,4 +1,6 @@ #! /usr/bin/env python +# -*- coding: utf-8 -*- + from __future__ import division, print_function import timeit diff -Nru python-numpy-1.13.3/numpy/ma/core.py python-numpy-1.14.5/numpy/ma/core.py --- python-numpy-1.13.3/numpy/ma/core.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/ma/core.py 2018-06-12 18:28:52.000000000 +0000 @@ -25,6 +25,7 @@ import sys import operator import warnings +import textwrap from functools import reduce if sys.version_info[0] >= 3: @@ -130,6 +131,12 @@ return if note is None: return initialdoc + + # FIXME: disable this function for the moment until we figure out what to + # do with it. Currently it may result in duplicate Notes sections or Notes + # sections in the wrong place + return initialdoc + newdoc = """ %s @@ -186,7 +193,7 @@ 'O': '?', 'S': b'N/A', 'u': 999999, - 'V': '???', + 'V': b'???', 'U': u'N/A' } @@ -205,6 +212,31 @@ min_filler.update([(np.float128, +np.inf)]) +def _recursive_fill_value(dtype, f): + """ + Recursively produce a fill value for `dtype`, calling f on scalar dtypes + """ + if dtype.names: + vals = tuple(_recursive_fill_value(dtype[name], f) for name in dtype.names) + return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d + elif dtype.subdtype: + subtype, shape = dtype.subdtype + subval = _recursive_fill_value(subtype, f) + return np.full(shape, subval) + else: + return f(dtype) + + +def _get_dtype_of(obj): + """ Convert the argument for *_fill_value into a dtype """ + if isinstance(obj, np.dtype): + return obj + elif hasattr(obj, 'dtype'): + return obj.dtype + else: + return np.asanyarray(obj).dtype + + def default_fill_value(obj): """ Return the default fill value for the argument object. @@ -223,6 +255,11 @@ string 'N/A' ======== ======== + For structured types, a structured scalar is returned, with each field the + default fill value for its type. + + For subarray types, the fill value is an array of the same size containing + the default scalar fill value. Parameters ---------- @@ -245,39 +282,29 @@ (1e+20+0j) """ - if hasattr(obj, 'dtype'): - defval = _check_fill_value(None, obj.dtype) - elif isinstance(obj, np.dtype): - if obj.subdtype: - defval = default_filler.get(obj.subdtype[0].kind, '?') - elif obj.kind in 'Mm': - defval = default_filler.get(obj.str[1:], '?') - else: - defval = default_filler.get(obj.kind, '?') - elif isinstance(obj, float): - defval = default_filler['f'] - elif isinstance(obj, int) or isinstance(obj, long): - defval = default_filler['i'] - elif isinstance(obj, bytes): - defval = default_filler['S'] - elif isinstance(obj, unicode): - defval = default_filler['U'] - elif isinstance(obj, complex): - defval = default_filler['c'] - else: - defval = default_filler['O'] - return defval + def _scalar_fill_value(dtype): + if dtype.kind in 'Mm': + return default_filler.get(dtype.str[1:], '?') + else: + return default_filler.get(dtype.kind, '?') + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) -def _recursive_extremum_fill_value(ndtype, extremum): - names = ndtype.names - if names: - deflist = [] - for name in names: - fval = _recursive_extremum_fill_value(ndtype[name], extremum) - deflist.append(fval) - return tuple(deflist) - return extremum[ndtype] + +def _extremum_fill_value(obj, extremum, extremum_name): + + def _scalar_fill_value(dtype): + try: + return extremum[dtype] + except KeyError: + raise TypeError( + "Unsuitable type {} for calculating {}." + .format(dtype, extremum_name) + ) + + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) def minimum_fill_value(obj): @@ -289,7 +316,7 @@ Parameters ---------- - obj : ndarray or dtype + obj : ndarray, dtype or scalar An object that can be queried for it's numeric type. Returns @@ -328,19 +355,7 @@ inf """ - errmsg = "Unsuitable type for calculating minimum." - if hasattr(obj, 'dtype'): - return _recursive_extremum_fill_value(obj.dtype, min_filler) - elif isinstance(obj, float): - return min_filler[ntypes.typeDict['float_']] - elif isinstance(obj, int): - return min_filler[ntypes.typeDict['int_']] - elif isinstance(obj, long): - return min_filler[ntypes.typeDict['uint']] - elif isinstance(obj, np.dtype): - return min_filler[obj] - else: - raise TypeError(errmsg) + return _extremum_fill_value(obj, min_filler, "minimum") def maximum_fill_value(obj): @@ -352,7 +367,7 @@ Parameters ---------- - obj : {ndarray, dtype} + obj : ndarray, dtype or scalar An object that can be queried for it's numeric type. Returns @@ -391,48 +406,7 @@ -inf """ - errmsg = "Unsuitable type for calculating maximum." - if hasattr(obj, 'dtype'): - return _recursive_extremum_fill_value(obj.dtype, max_filler) - elif isinstance(obj, float): - return max_filler[ntypes.typeDict['float_']] - elif isinstance(obj, int): - return max_filler[ntypes.typeDict['int_']] - elif isinstance(obj, long): - return max_filler[ntypes.typeDict['uint']] - elif isinstance(obj, np.dtype): - return max_filler[obj] - else: - raise TypeError(errmsg) - - -def _recursive_set_default_fill_value(dt): - """ - Create the default fill value for a structured dtype. - - Parameters - ---------- - dt: dtype - The structured dtype for which to create the fill value. - - Returns - ------- - val: tuple - A tuple of values corresponding to the default structured fill value. - - """ - deflist = [] - for name in dt.names: - currenttype = dt[name] - if currenttype.subdtype: - currenttype = currenttype.subdtype[0] - - if currenttype.names: - deflist.append( - tuple(_recursive_set_default_fill_value(currenttype))) - else: - deflist.append(default_fill_value(currenttype)) - return tuple(deflist) + return _extremum_fill_value(obj, max_filler, "maximum") def _recursive_set_fill_value(fillvalue, dt): @@ -471,22 +445,16 @@ """ Private function validating the given `fill_value` for the given dtype. - If fill_value is None, it is set to the default corresponding to the dtype - if this latter is standard (no fields). If the datatype is flexible (named - fields), fill_value is set to a tuple whose elements are the default fill - values corresponding to each field. + If fill_value is None, it is set to the default corresponding to the dtype. If fill_value is not None, its value is forced to the given dtype. + The result is always a 0d array. """ ndtype = np.dtype(ndtype) fields = ndtype.fields if fill_value is None: - if fields: - fill_value = np.array(_recursive_set_default_fill_value(ndtype), - dtype=ndtype) - else: - fill_value = default_fill_value(ndtype) + fill_value = default_fill_value(ndtype) elif fields: fdtype = [(_[0], _[1]) for _ in ndtype.descr] if isinstance(fill_value, (ndarray, np.void)): @@ -823,7 +791,7 @@ ufunc_fills = {} -class _DomainCheckInterval: +class _DomainCheckInterval(object): """ Define a valid interval, so that : @@ -848,7 +816,7 @@ umath.less(x, self.a)) -class _DomainTan: +class _DomainTan(object): """ Define a valid interval for the `tan` function, so that: @@ -866,7 +834,7 @@ return umath.less(umath.absolute(umath.cos(x)), self.eps) -class _DomainSafeDivide: +class _DomainSafeDivide(object): """ Define a domain for safe division. @@ -887,7 +855,7 @@ return umath.absolute(a) * self.tolerance >= umath.absolute(b) -class _DomainGreater: +class _DomainGreater(object): """ DomainGreater(v)(x) is True where x <= v. @@ -903,7 +871,7 @@ return umath.less_equal(x, self.critical_value) -class _DomainGreaterEqual: +class _DomainGreaterEqual(object): """ DomainGreaterEqual(v)(x) is True where x < v. @@ -919,7 +887,17 @@ return umath.less(x, self.critical_value) -class _MaskedUnaryOperation: +class _MaskedUFunc(object): + def __init__(self, ufunc): + self.f = ufunc + self.__doc__ = ufunc.__doc__ + self.__name__ = ufunc.__name__ + + def __str__(self): + return "Masked version of {}".format(self.f) + + +class _MaskedUnaryOperation(_MaskedUFunc): """ Defines masked version of unary operations, where invalid values are pre-masked. @@ -938,11 +916,9 @@ """ def __init__(self, mufunc, fill=0, domain=None): - self.f = mufunc + super(_MaskedUnaryOperation, self).__init__(mufunc) self.fill = fill self.domain = domain - self.__doc__ = getattr(mufunc, "__doc__", str(mufunc)) - self.__name__ = getattr(mufunc, "__name__", str(mufunc)) ufunc_domain[mufunc] = domain ufunc_fills[mufunc] = fill @@ -994,11 +970,8 @@ masked_result._update_from(a) return masked_result - def __str__(self): - return "Masked version of %s. [Invalid values are masked]" % str(self.f) - -class _MaskedBinaryOperation: +class _MaskedBinaryOperation(_MaskedUFunc): """ Define masked version of binary operations, where invalid values are pre-masked. @@ -1025,11 +998,9 @@ abfunc(x, filly) = x for all x to enable reduce. """ - self.f = mbfunc + super(_MaskedBinaryOperation, self).__init__(mbfunc) self.fillx = fillx self.filly = filly - self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc)) - self.__name__ = getattr(mbfunc, "__name__", str(mbfunc)) ufunc_domain[mbfunc] = None ufunc_fills[mbfunc] = (fillx, filly) @@ -1068,7 +1039,7 @@ # any errors, just abort; impossible to guarantee masked values try: np.copyto(result, da, casting='unsafe', where=m) - except: + except Exception: pass # Transforms to a (subclass of) MaskedArray @@ -1146,11 +1117,9 @@ masked_result = result.view(tclass) return masked_result - def __str__(self): - return "Masked version of " + str(self.f) -class _DomainedBinaryOperation: +class _DomainedBinaryOperation(_MaskedUFunc): """ Define binary operations that have a domain, like divide. @@ -1175,12 +1144,10 @@ """abfunc(fillx, filly) must be defined. abfunc(x, filly) = x for all x to enable reduce. """ - self.f = dbfunc + super(_DomainedBinaryOperation, self).__init__(dbfunc) self.domain = domain self.fillx = fillx self.filly = filly - self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc)) - self.__name__ = getattr(dbfunc, "__name__", str(dbfunc)) ufunc_domain[dbfunc] = domain ufunc_fills[dbfunc] = (fillx, filly) @@ -1214,7 +1181,7 @@ # only add back if it can be cast safely if np.can_cast(masked_da.dtype, result.dtype, casting='safe'): result += masked_da - except: + except Exception: pass # Transforms to a (subclass of) MaskedArray @@ -1226,9 +1193,6 @@ masked_result._update_from(b) return masked_result - def __str__(self): - return "Masked version of " + str(self.f) - # Unary ufuncs exp = _MaskedUnaryOperation(umath.exp) @@ -1329,7 +1293,7 @@ descr.append((name, _recurse(field[0], primitive_dtype))) new_dtype = np.dtype(descr) - # Is this some kind of composite a la (np.float,2) + # Is this some kind of composite a la (float,2) elif dtype.subdtype: descr = list(dtype.subdtype) descr[0] = _recurse(dtype.subdtype[0], primitive_dtype) @@ -1381,7 +1345,7 @@ -------- >>> import numpy.ma as ma >>> dtype = np.dtype({'names':['foo', 'bar'], - 'formats':[np.float32, np.int]}) + 'formats':[np.float32, int]}) >>> dtype dtype([('foo', '>> ma.make_mask_descr(dtype) @@ -1425,13 +1389,13 @@ fill_value=999999) >>> ma.getmask(a) array([[False, True], - [False, False]], dtype=bool) + [False, False]]) Equivalently use the `MaskedArray` `mask` attribute. >>> a.mask array([[False, True], - [False, False]], dtype=bool) + [False, False]]) Result when mask == `nomask` @@ -1489,7 +1453,7 @@ fill_value=999999) >>> ma.getmaskarray(a) array([[False, True], - [False, False]], dtype=bool) + [False, False]]) Result when mask == ``nomask`` @@ -1503,7 +1467,7 @@ fill_value=999999) >>> >ma.getmaskarray(b) array([[False, False], - [False, False]], dtype=bool) + [False, False]]) """ mask = getmask(arr) @@ -1555,14 +1519,14 @@ False >>> m = np.array([False, True, False]) >>> m - array([False, True, False], dtype=bool) + array([False, True, False]) >>> ma.is_mask(m) True Arrays with complex dtypes don't return True. >>> dtype = np.dtype({'names':['monty', 'pithon'], - 'formats':[np.bool, np.bool]}) + 'formats':[bool, bool]}) >>> dtype dtype([('monty', '|b1'), ('pithon', '|b1')]) >>> m = np.array([(True, False), (False, True), (True, False)], @@ -1580,6 +1544,16 @@ return False +def _shrink_mask(m): + """ + Shrink a mask to nomask if possible + """ + if not m.dtype.names and not m.any(): + return nomask + else: + return m + + def make_mask(m, copy=False, shrink=True, dtype=MaskType): """ Create a boolean mask from an array. @@ -1613,13 +1587,13 @@ >>> import numpy.ma as ma >>> m = [True, False, True, True] >>> ma.make_mask(m) - array([ True, False, True, True], dtype=bool) + array([ True, False, True, True]) >>> m = [1, 0, 1, 1] >>> ma.make_mask(m) - array([ True, False, True, True], dtype=bool) + array([ True, False, True, True]) >>> m = [1, 0, 2, -3] >>> ma.make_mask(m) - array([ True, False, True, True], dtype=bool) + array([ True, False, True, True]) Effect of the `shrink` parameter. @@ -1629,7 +1603,7 @@ >>> ma.make_mask(m) False >>> ma.make_mask(m, shrink=False) - array([False, False, False, False], dtype=bool) + array([False, False, False, False]) Using a flexible `dtype`. @@ -1641,7 +1615,7 @@ >>> arr [(1, 0), (0, 1), (1, 0), (1, 0)] >>> dtype = np.dtype({'names':['man', 'mouse'], - 'formats':[np.int, np.int]}) + 'formats':[int, int]}) >>> arr = np.array(arr, dtype=dtype) >>> arr array([(1, 0), (0, 1), (1, 0), (1, 0)], @@ -1656,13 +1630,17 @@ # Make sure the input dtype is valid. dtype = make_mask_descr(dtype) + + # legacy boolean special case: "existence of fields implies true" + if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_: + return np.ones(m.shape, dtype=dtype) + # Fill the mask in case there are missing data; turn it into an ndarray. result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True) # Bas les masques ! - if shrink and (not result.dtype.names) and (not result.any()): - return nomask - else: - return result + if shrink: + result = _shrink_mask(result) + return result def make_mask_none(newshape, dtype=None): @@ -1695,12 +1673,12 @@ -------- >>> import numpy.ma as ma >>> ma.make_mask_none((3,)) - array([False, False, False], dtype=bool) + array([False, False, False]) Defining a more complex dtype. >>> dtype = np.dtype({'names':['foo', 'bar'], - 'formats':[np.float32, np.int]}) + 'formats':[np.float32, int]}) >>> dtype dtype([('foo', '>> ma.make_mask_none((3,), dtype=dtype) @@ -1748,7 +1726,7 @@ >>> m1 = np.ma.make_mask([0, 1, 1, 0]) >>> m2 = np.ma.make_mask([1, 0, 0, 0]) >>> np.ma.mask_or(m1, m2) - array([ True, True, True, False], dtype=bool) + array([ True, True, True, False]) """ @@ -1798,18 +1776,18 @@ Examples -------- - >>> mask = np.array([0, 0, 1], dtype=np.bool) + >>> mask = np.array([0, 0, 1]) >>> flatten_mask(mask) - array([False, False, True], dtype=bool) + array([False, False, True]) >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) >>> flatten_mask(mask) - array([False, False, False, True], dtype=bool) + array([False, False, False, True]) >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) >>> flatten_mask(mask) - array([False, False, False, False, False, True], dtype=bool) + array([False, False, False, False, False, True]) """ @@ -1949,7 +1927,7 @@ """ # Make sure that condition is a valid standard-type mask. - cond = make_mask(condition) + cond = make_mask(condition, shrink=False) a = np.array(a, copy=copy, subok=True) (cshape, ashape) = (cond.shape, a.shape) @@ -1963,7 +1941,7 @@ cls = MaskedArray result = a.view(cls) # Assign to *.mask so that structured masks are handled correctly. - result.mask = cond + result.mask = _shrink_mask(cond) return result @@ -2273,12 +2251,14 @@ Mask using floating point equality. Return a MaskedArray, masked where the data in array `x` are approximately - equal to `value`, i.e. where the following condition is True + equal to `value`, determined using `isclose`. The default tolerances for + `masked_values` are the same as those for `isclose`. - (abs(x - value) <= atol+rtol*abs(value)) + For integer types, exact equality is used, in the same way as + `masked_equal`. The fill_value is set to `value` and the mask is set to ``nomask`` if - possible. For integers, consider using ``masked_equal``. + possible. Parameters ---------- @@ -2286,10 +2266,8 @@ Array to mask. value : float Masking value. - rtol : float, optional - Tolerance parameter. - atol : float, optional - Tolerance parameter (1e-8). + rtol, atol : float, optional + Tolerance parameters passed on to `isclose` copy : bool, optional Whether to return a copy of `x`. shrink : bool, optional @@ -2337,17 +2315,13 @@ fill_value=999999) """ - mabs = umath.absolute xnew = filled(x, value) - if issubclass(xnew.dtype.type, np.floating): - condition = umath.less_equal( - mabs(xnew - value), atol + rtol * mabs(value)) - mask = getmask(x) + if np.issubdtype(xnew.dtype, np.floating): + mask = np.isclose(xnew, value, atol=atol, rtol=rtol) else: - condition = umath.equal(xnew, value) - mask = nomask - mask = mask_or(mask, make_mask(condition, shrink=shrink), shrink=shrink) - return masked_array(xnew, mask=mask, copy=copy, fill_value=value) + mask = umath.equal(xnew, value) + return masked_array( + xnew, mask=mask, copy=copy, fill_value=value, shrink=shrink) def masked_invalid(a, copy=True): @@ -2366,7 +2340,7 @@ Examples -------- >>> import numpy.ma as ma - >>> a = np.arange(5, dtype=np.float) + >>> a = np.arange(5, dtype=float) >>> a[2] = np.NaN >>> a[3] = np.PINF >>> a @@ -2397,7 +2371,7 @@ ############################################################################### -class _MaskedPrintOption: +class _MaskedPrintOption(object): """ Handle the string used to represent missing data in a masked array. @@ -2456,40 +2430,44 @@ """ names = result.dtype.names - for name in names: - (curdata, curmask) = (result[name], mask[name]) - if curdata.dtype.names: + if names: + for name in names: + curdata = result[name] + curmask = mask[name] _recursive_printoption(curdata, curmask, printopt) - else: - np.copyto(curdata, printopt, where=curmask) + else: + np.copyto(result, printopt, where=mask) return -_print_templates = dict(long_std="""\ -masked_%(name)s(data = - %(data)s, - %(nlen)s mask = - %(mask)s, - %(nlen)s fill_value = %(fill)s) -""", - short_std="""\ -masked_%(name)s(data = %(data)s, - %(nlen)s mask = %(mask)s, -%(nlen)s fill_value = %(fill)s) -""", - long_flx="""\ -masked_%(name)s(data = - %(data)s, - %(nlen)s mask = - %(mask)s, -%(nlen)s fill_value = %(fill)s, - %(nlen)s dtype = %(dtype)s) -""", - short_flx="""\ -masked_%(name)s(data = %(data)s, -%(nlen)s mask = %(mask)s, -%(nlen)s fill_value = %(fill)s, -%(nlen)s dtype = %(dtype)s) -""") +# For better or worse, these end in a newline +_legacy_print_templates = dict( + long_std=textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + long_flx=textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """), + short_std=textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + short_flx=textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """) +) ############################################################################### # MaskedArray class # @@ -2598,14 +2576,11 @@ result = result.view(type(self)) result._update_from(self) mask = self._mask - if result.ndim: - if not onmask: - result.__setmask__(mask) - elif mask is not nomask: - result.__setmask__(getattr(mask, funcname)(*args, **params)) - else: - if mask.ndim and (not mask.dtype.names and mask.all()): - return masked + if not onmask: + result.__setmask__(mask) + elif mask is not nomask: + # __setmask__ makes a copy, which we don't want + result._mask = getattr(mask, funcname)(*args, **params) return result methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None) if methdoc is not None: @@ -2935,7 +2910,7 @@ Copies some attributes of obj to self. """ - if obj is not None and isinstance(obj, ndarray): + if isinstance(obj, ndarray): _baseclass = type(obj) else: _baseclass = ndarray @@ -3041,18 +3016,20 @@ if context is not None: result._mask = result._mask.copy() - (func, args, _) = context - m = reduce(mask_or, [getmaskarray(arg) for arg in args]) + func, args, out_i = context + # args sometimes contains outputs (gh-10459), which we don't want + input_args = args[:func.nin] + m = reduce(mask_or, [getmaskarray(arg) for arg in input_args]) # Get the domain mask domain = ufunc_domain.get(func, None) if domain is not None: # Take the domain, and make sure it's a ndarray - if len(args) > 2: + if len(input_args) > 2: with np.errstate(divide='ignore', invalid='ignore'): - d = filled(reduce(domain, args), True) + d = filled(reduce(domain, input_args), True) else: with np.errstate(divide='ignore', invalid='ignore'): - d = filled(domain(*args), True) + d = filled(domain(*input_args), True) if d.any(): # Fill the result where the domain is wrong @@ -3191,16 +3168,16 @@ """ newtype = np.dtype(newtype) + newmasktype = make_mask_descr(newtype) + output = self._data.astype(newtype).view(type(self)) output._update_from(self) - names = output.dtype.names - if names is None: - output._mask = self._mask.astype(bool) + + if self._mask is nomask: + output._mask = nomask else: - if self._mask is nomask: - output._mask = nomask - else: - output._mask = self._mask.astype([(n, bool) for n in names]) + output._mask = self._mask.astype(newmasktype) + # Don't check _fill_value if it's None, that'll speed things up if self._fill_value is not None: output._fill_value = _check_fill_value(self._fill_value, newtype) @@ -3357,8 +3334,6 @@ _mask[indx] = tuple([True] * nbfields) else: _mask[indx] = True - if not self._isfield: - self._sharedmask = False return # Get the _data part of the new value @@ -3374,27 +3349,6 @@ _mask = self._mask = make_mask_none(self.shape, _dtype) _mask[indx] = mval elif not self._hardmask: - # Unshare the mask if necessary to avoid propagation - # We want to remove the unshare logic from this place in the - # future. Note that _sharedmask has lots of false positives. - if not self._isfield: - notthree = getattr(sys, 'getrefcount', False) and (sys.getrefcount(_mask) != 3) - if self._sharedmask and not ( - # If no one else holds a reference (we have two - # references (_mask and self._mask) -- add one for - # getrefcount) and the array owns its own data - # copying the mask should do nothing. - (not notthree) and _mask.flags.owndata): - # 2016.01.15 -- v1.11.0 - warnings.warn( - "setting an item on a masked array which has a shared " - "mask will not copy the mask and also change the " - "original mask array in the future.\n" - "Check the NumPy 1.11 release notes for more " - "information.", - MaskedArrayFutureWarning, stacklevel=2) - self.unshare_mask() - _mask = self._mask # Set the data, then the mask _data[indx] = dval _mask[indx] = mval @@ -3601,15 +3555,13 @@ >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) >>> x.mask array([[False, False], - [False, False]], dtype=bool) + [False, False]]) >>> x.shrink_mask() >>> x.mask False """ - m = self._mask - if m.ndim and not m.any(): - self._mask = nomask + self._mask = _shrink_mask(self._mask) return self baseclass = property(fget=lambda self: self._baseclass, @@ -3885,78 +3837,127 @@ _new._mask = _mask.compress(condition, axis=axis) return _new - def __str__(self): + def _insert_masked_print(self): """ - String representation. - + Replace masked values with masked_print_option, casting all innermost + dtypes to object. """ if masked_print_option.enabled(): - f = masked_print_option - if self is masked: - return str(f) - m = self._mask - if m is nomask: + mask = self._mask + if mask is nomask: res = self._data else: - if m.shape == () and m.itemsize==len(m.dtype): - if m.dtype.names: - m = m.view((bool, len(m.dtype))) - if m.any(): - return str(tuple((f if _m else _d) for _d, _m in - zip(self._data.tolist(), m))) - else: - return str(self._data) - elif m: - return str(f) - else: - return str(self._data) # convert to object array to make filled work - names = self.dtype.names - if names is None: - data = self._data - mask = m - # For big arrays, to avoid a costly conversion to the - # object dtype, extract the corners before the conversion. - print_width = (self._print_width if self.ndim > 1 - else self._print_width_1d) - for axis in range(self.ndim): - if data.shape[axis] > print_width: - ind = print_width // 2 - arr = np.split(data, (ind, -ind), axis=axis) - data = np.concatenate((arr[0], arr[2]), axis=axis) - arr = np.split(mask, (ind, -ind), axis=axis) - mask = np.concatenate((arr[0], arr[2]), axis=axis) - res = data.astype("O") - res.view(ndarray)[mask] = f - else: - rdtype = _replace_dtype_fields(self.dtype, "O") - res = self._data.astype(rdtype) - _recursive_printoption(res, m, f) + data = self._data + # For big arrays, to avoid a costly conversion to the + # object dtype, extract the corners before the conversion. + print_width = (self._print_width if self.ndim > 1 + else self._print_width_1d) + for axis in range(self.ndim): + if data.shape[axis] > print_width: + ind = print_width // 2 + arr = np.split(data, (ind, -ind), axis=axis) + data = np.concatenate((arr[0], arr[2]), axis=axis) + arr = np.split(mask, (ind, -ind), axis=axis) + mask = np.concatenate((arr[0], arr[2]), axis=axis) + + rdtype = _replace_dtype_fields(self.dtype, "O") + res = data.astype(rdtype) + _recursive_printoption(res, mask, masked_print_option) else: res = self.filled(self.fill_value) - return str(res) + return res + + def __str__(self): + return str(self._insert_masked_print()) + + if sys.version_info.major < 3: + def __unicode__(self): + return unicode(self._insert_masked_print()) def __repr__(self): """ Literal string representation. """ - n = self.ndim if self._baseclass is np.ndarray: name = 'array' else: name = self._baseclass.__name__ - parameters = dict(name=name, nlen=" " * len(name), - data=str(self), mask=str(self._mask), - fill=str(self.fill_value), dtype=str(self.dtype)) - if self.dtype.names: - if n <= 1: - return _print_templates['short_flx'] % parameters - return _print_templates['long_flx'] % parameters - elif n <= 1: - return _print_templates['short_std'] % parameters - return _print_templates['long_std'] % parameters + + # 2016-11-19: Demoted to legacy format + if np.get_printoptions()['legacy'] == '1.13': + is_long = self.ndim > 1 + parameters = dict( + name=name, + nlen=" " * len(name), + data=str(self), + mask=str(self._mask), + fill=str(self.fill_value), + dtype=str(self.dtype) + ) + is_structured = bool(self.dtype.names) + key = '{}_{}'.format( + 'long' if is_long else 'short', + 'flx' if is_structured else 'std' + ) + return _legacy_print_templates[key] % parameters + + prefix = 'masked_{}('.format(name) + + dtype_needed = ( + not np.core.arrayprint.dtype_is_implied(self.dtype) or + np.all(self.mask) or + self.size == 0 + ) + + # determine which keyword args need to be shown + keys = ['data', 'mask', 'fill_value'] + if dtype_needed: + keys.append('dtype') + + # array has only one row (non-column) + is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1]) + + # choose what to indent each keyword with + min_indent = 2 + if is_one_row: + # first key on the same line as the type, remaining keys + # aligned by equals + indents = {} + indents[keys[0]] = prefix + for k in keys[1:]: + n = builtins.max(min_indent, len(prefix + keys[0]) - len(k)) + indents[k] = ' ' * n + prefix = '' # absorbed into the first indent + else: + # each key on its own line, indented by two spaces + indents = {k: ' ' * min_indent for k in keys} + prefix = prefix + '\n' # first key on the next line + + # format the field values + reprs = {} + reprs['data'] = np.array2string( + self._insert_masked_print(), + separator=", ", + prefix=indents['data'] + 'data=', + suffix=',') + reprs['mask'] = np.array2string( + self._mask, + separator=", ", + prefix=indents['mask'] + 'mask=', + suffix=',') + reprs['fill_value'] = repr(self.fill_value) + if dtype_needed: + reprs['dtype'] = np.core.arrayprint.dtype_short_repr(self.dtype) + + # join keys with values and indentations + result = ',\n'.join( + '{}{}={}'.format(indents[k], k, reprs[k]) + for k in keys + ) + return prefix + result + ')' def _delegate_binop(self, other): # This emulates the logic in @@ -4022,6 +4023,7 @@ mask = np.broadcast_to(mask, check.shape).copy() check = check.view(type(self)) + check._update_from(self) check._mask = mask return check @@ -4305,6 +4307,18 @@ elif self._mask: raise MaskError('Cannot convert masked element to a Python int.') return int(self.item()) + + def __long__(self): + """ + Convert to long. + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be conveted " + "to Python scalars") + elif self._mask: + raise MaskError('Cannot convert masked element to a Python long.') + return long(self.item()) + def get_imag(self): """ @@ -4475,8 +4489,6 @@ return (~m).sum(axis=axis, dtype=np.intp, **kwargs) - flatten = _arraymethod('flatten') - def ravel(self, order='C'): """ Returns a 1D version of self, as a view. @@ -4522,8 +4534,6 @@ r._mask = nomask return r - repeat = _arraymethod('repeat') - def reshape(self, *s, **kwargs): """ @@ -4659,7 +4669,7 @@ if self._mask is nomask and getmask(values) is nomask: return - m = getmaskarray(self).copy() + m = getmaskarray(self) if getmask(values) is nomask: m.put(indices, False, mode=mode) @@ -4717,6 +4727,7 @@ OWNDATA : False WRITEABLE : True ALIGNED : True + WRITEBACKIFCOPY : False UPDATEIFCOPY : False """ @@ -4905,7 +4916,7 @@ return result.astype(dtype) else: D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) - return D.astype(dtype).filled(0).sum(axis=None, out=out) + return D.astype(dtype).filled(0).sum(axis=-1, out=out) trace.__doc__ = ndarray.trace.__doc__ def dot(self, b, out=None, strict=False): @@ -5810,14 +5821,15 @@ return out[()] # Array methods - copy = _arraymethod('copy') - diagonal = _arraymethod('diagonal') - transpose = _arraymethod('transpose') - T = property(fget=lambda self: self.transpose()) - swapaxes = _arraymethod('swapaxes') clip = _arraymethod('clip', onmask=False) copy = _arraymethod('copy') + diagonal = _arraymethod('diagonal') + flatten = _arraymethod('flatten') + repeat = _arraymethod('repeat') squeeze = _arraymethod('squeeze') + swapaxes = _arraymethod('swapaxes') + T = property(fget=lambda self: self.transpose()) + transpose = _arraymethod('transpose') def tolist(self, fill_value=None): """ @@ -6070,7 +6082,7 @@ def _get_data(self): # Make sure that the _data part is a np.void - return self.view(ndarray)[()] + return super(mvoid, self)._data[()] _data = property(fget=_get_data) @@ -6106,19 +6118,13 @@ def __str__(self): m = self._mask if m is nomask: - return self._data.__str__() - printopt = masked_print_option - rdtype = _replace_dtype_fields(self._data.dtype, "O") - - # temporary hack to fix gh-7493. A more permanent fix - # is proposed in gh-6053, after which the next two - # lines should be changed to - # res = np.array([self._data], dtype=rdtype) - res = np.empty(1, rdtype) - res[:1] = self._data + return str(self._data) - _recursive_printoption(res, self._mask, printopt) - return str(res[0]) + rdtype = _replace_dtype_fields(self._data.dtype, "O") + data_arr = super(mvoid, self)._data + res = data_arr.astype(rdtype) + _recursive_printoption(res, self._mask, masked_print_option) + return str(res) __repr__ = __str__ @@ -6246,17 +6252,46 @@ class MaskedConstant(MaskedArray): - # We define the masked singleton as a float for higher precedence. - # Note that it can be tricky sometimes w/ type comparison - _data = data = np.array(0.) - _mask = mask = np.array(True) - _baseclass = ndarray + # the lone np.ma.masked instance + __singleton = None + + @classmethod + def __has_singleton(cls): + # second case ensures `cls.__singleton` is not just a view on the + # superclass singleton + return cls.__singleton is not None and type(cls.__singleton) is cls + + def __new__(cls): + if not cls.__has_singleton(): + # We define the masked singleton as a float for higher precedence. + # Note that it can be tricky sometimes w/ type comparison + data = np.array(0.) + mask = np.array(True) + + # prevent any modifications + data.flags.writeable = False + mask.flags.writeable = False + + # don't fall back on MaskedArray.__new__(MaskedConstant), since + # that might confuse it - this way, the construction is entirely + # within our control + cls.__singleton = MaskedArray(data, mask=mask).view(cls) - def __new__(self): - return self._data.view(self) + return cls.__singleton def __array_finalize__(self, obj): - return + if not self.__has_singleton(): + # this handles the `.view` in __new__, which we want to copy across + # properties normally + return super(MaskedConstant, self).__array_finalize__(obj) + elif self is self.__singleton: + # not clear how this can happen, play it safe + pass + else: + # everywhere else, we want to downcast to MaskedArray, to prevent a + # duplicate maskedconstant. + self.__class__ = MaskedArray + MaskedArray.__array_finalize__(self, obj) def __array_prepare__(self, obj, context=None): return self.view(MaskedArray).__array_prepare__(obj, context) @@ -6267,17 +6302,41 @@ def __str__(self): return str(masked_print_option._display) - def __repr__(self): - return 'masked' + if sys.version_info.major < 3: + def __unicode__(self): + return unicode(masked_print_option._display) - def flatten(self): - return masked_array([self._data], dtype=float, mask=[True]) + def __repr__(self): + if self is MaskedConstant.__singleton: + return 'masked' + else: + # it's a subclass, or something is wrong, make it obvious + return object.__repr__(self) def __reduce__(self): """Override of MaskedArray's __reduce__. """ return (self.__class__, ()) + # inplace operations have no effect. We have to override them to avoid + # trying to modify the readonly data and mask arrays + def __iop__(self, other): + return self + __iadd__ = \ + __isub__ = \ + __imul__ = \ + __ifloordiv__ = \ + __itruediv__ = \ + __ipow__ = \ + __iop__ + del __iop__ # don't leave this around + + def copy(self, *args, **kwargs): + """ Copy is a no-op on the maskedconstant, as it is a scalar """ + # maskedconstant is a scalar, so copy doesn't need to copy. There's + # precedent for this with `np.bool_` scalars. + return self + masked = masked_singleton = MaskedConstant() masked_array = MaskedArray @@ -6358,7 +6417,7 @@ ############################################################################## -class _extrema_operation(object): +class _extrema_operation(_MaskedUFunc): """ Generic class for maximum/minimum functions. @@ -6368,11 +6427,9 @@ """ def __init__(self, ufunc, compare, fill_value): - self.ufunc = ufunc + super(_extrema_operation, self).__init__(ufunc) self.compare = compare self.fill_value_func = fill_value - self.__doc__ = ufunc.__doc__ - self.__name__ = ufunc.__name__ def __call__(self, a, b=None): "Executes the call behavior." @@ -6407,11 +6464,11 @@ kwargs = dict() if m is nomask: - t = self.ufunc.reduce(target, **kwargs) + t = self.f.reduce(target, **kwargs) else: target = target.filled( self.fill_value_func(target)).view(type(target)) - t = self.ufunc.reduce(target, **kwargs) + t = self.f.reduce(target, **kwargs) m = umath.logical_and.reduce(m, **kwargs) if hasattr(t, '_mask'): t._mask = m @@ -6429,7 +6486,7 @@ ma = getmaskarray(a) mb = getmaskarray(b) m = logical_or.outer(ma, mb) - result = self.ufunc.outer(filled(a), filled(b)) + result = self.f.outer(filled(a), filled(b)) if not isinstance(result, MaskedArray): result = result.view(MaskedArray) result._mask = m @@ -6479,7 +6536,7 @@ ############################################################################## -class _frommethod: +class _frommethod(object): """ Define functions from existing MaskedArray methods. @@ -6709,12 +6766,11 @@ return data # OK, so we have to concatenate the masks dm = np.concatenate([getmaskarray(a) for a in arrays], axis) + dm = dm.reshape(d.shape) + # If we decide to keep a '_shrinkmask' option, we want to check that # all of them are True, and then check for dm.any() - if not dm.dtype.fields and not dm.any(): - data._mask = nomask - else: - data._mask = dm.reshape(d.shape) + data._mask = _shrink_mask(dm) return data @@ -7132,8 +7188,7 @@ mask = np.where(cm, np.ones((), dtype=mask.dtype), mask) # collapse the mask, for backwards compatibility - if mask.dtype == np.bool_ and not mask.any(): - mask = nomask + mask = _shrink_mask(mask) return masked_array(data, mask=mask) @@ -7295,7 +7350,7 @@ Examples -------- >>> import numpy.ma as ma - >>> a = np.zeros((3, 3), dtype=np.int) + >>> a = np.zeros((3, 3), dtype=int) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], @@ -7432,11 +7487,7 @@ Returns the inner product of a and b for arrays of floating point types. Like the generic NumPy equivalent the product sum is over the last dimension - of a and b. - - Notes - ----- - The first argument is not conjugated. + of a and b. The first argument is not conjugated. """ fa = filled(a, 0) @@ -7476,8 +7527,8 @@ if propagate_mask: # results which are contributed to by either item in any pair being invalid mask = ( - f(getmaskarray(a), np.ones(np.shape(v), dtype=np.bool), mode=mode) - | f(np.ones(np.shape(a), dtype=np.bool), getmaskarray(v), mode=mode) + f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode) + | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode) ) data = f(getdata(a), getdata(v), mode=mode) else: @@ -7823,8 +7874,10 @@ """ if not hasattr(F, 'readline'): - F = open(F, 'w') - return pickle.dump(a, F) + with open(F, 'w') as F: + pickle.dump(a, F) + else: + pickle.dump(a, F) def dumps(a): @@ -7864,8 +7917,10 @@ """ if not hasattr(F, 'readline'): - F = open(F, 'r') - return pickle.load(F) + with open(F, 'r') as F: + return pickle.load(F) + else: + return pickle.load(F) def loads(strg): @@ -7957,7 +8012,7 @@ return masked_array(fxarray['_data'], mask=fxarray['_mask']) -class _convert2ma: +class _convert2ma(object): """ Convert functions from numpy to numpy.ma. diff -Nru python-numpy-1.13.3/numpy/ma/extras.py python-numpy-1.14.5/numpy/ma/extras.py --- python-numpy-1.13.3/numpy/ma/extras.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/ma/extras.py 2018-06-12 18:28:52.000000000 +0000 @@ -215,7 +215,7 @@ #####-------------------------------------------------------------------------- #---- --- Standard functions --- #####-------------------------------------------------------------------------- -class _fromnxfunction: +class _fromnxfunction(object): """ Defines a wrapper to adapt NumPy functions to masked arrays. @@ -778,7 +778,7 @@ # not necessary for scalar True/False masks try: np.copyto(low.mask, high.mask, where=odd) - except: + except Exception: pass if np.issubdtype(asorted.dtype, np.inexact): @@ -939,7 +939,7 @@ Examples -------- >>> import numpy.ma as ma - >>> a = np.zeros((3, 3), dtype=np.int) + >>> a = np.zeros((3, 3), dtype=int) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], @@ -984,7 +984,7 @@ Examples -------- >>> import numpy.ma as ma - >>> a = np.zeros((3, 3), dtype=np.int) + >>> a = np.zeros((3, 3), dtype=int) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], @@ -1208,7 +1208,7 @@ numpy.union1d : Equivalent function for ndarrays. """ - return unique(ma.concatenate((ar1, ar2))) + return unique(ma.concatenate((ar1, ar2), axis=None)) def setdiff1d(ar1, ar2, assume_unique=False): diff -Nru python-numpy-1.13.3/numpy/ma/__init__.py python-numpy-1.14.5/numpy/ma/__init__.py --- python-numpy-1.13.3/numpy/ma/__init__.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/ma/__init__.py 2018-06-12 18:28:52.000000000 +0000 @@ -51,6 +51,6 @@ __all__ += core.__all__ __all__ += extras.__all__ -from numpy.testing.nosetester import _numpy_tester +from numpy.testing import _numpy_tester test = _numpy_tester().test bench = _numpy_tester().bench diff -Nru python-numpy-1.13.3/numpy/ma/mrecords.py python-numpy-1.14.5/numpy/ma/mrecords.py --- python-numpy-1.13.3/numpy/ma/mrecords.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/ma/mrecords.py 2018-06-12 17:31:56.000000000 +0000 @@ -243,7 +243,7 @@ except IndexError: # Couldn't find a mask: use the default (nomask) pass - hasmasked = _mask.view((np.bool, (len(_mask.dtype) or 1))).any() + hasmasked = _mask.view((bool, (len(_mask.dtype) or 1))).any() if (obj.shape or hasmasked): obj = obj.view(MaskedArray) obj._baseclass = ndarray @@ -276,7 +276,7 @@ try: # Is attr a generic attribute ? ret = object.__setattr__(self, attr, val) - except: + except Exception: # Not a generic attribute: exit if it's not a valid field fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} optinfo = ndarray.__getattribute__(self, '_optinfo') or {} @@ -294,7 +294,7 @@ # internal attribute. try: object.__delattr__(self, attr) - except: + except Exception: return ret # Let's try to set the field try: diff -Nru python-numpy-1.13.3/numpy/ma/tests/test_core.py python-numpy-1.14.5/numpy/ma/tests/test_core.py --- python-numpy-1.13.3/numpy/ma/tests/test_core.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/ma/tests/test_core.py 2018-06-12 18:28:52.000000000 +0000 @@ -8,10 +8,13 @@ __author__ = "Pierre GF Gerard-Marchant" +import sys import warnings import pickle import operator import itertools +import sys +import textwrap from functools import reduce @@ -20,7 +23,8 @@ import numpy.core.fromnumeric as fromnumeric import numpy.core.umath as umath from numpy.testing import ( - TestCase, run_module_suite, assert_raises, assert_warns, suppress_warnings) + run_module_suite, assert_raises, assert_warns, suppress_warnings, dec + ) from numpy import ndarray from numpy.compat import asbytes, asbytes_nested from numpy.ma.testutils import ( @@ -45,6 +49,7 @@ ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum, take, tan, tanh, transpose, where, zeros, ) +from numpy.testing import dec pi = np.pi @@ -55,10 +60,10 @@ "setting an item on a masked array which has a shared mask will not copy") -class TestMaskedArray(TestCase): +class TestMaskedArray(object): # Base test class for MaskedArrays. - def setUp(self): + def setup(self): # Base data definition. x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) @@ -93,14 +98,14 @@ x = masked_array(0, mask=False) assert_equal(str(x), '0') x = array(0, mask=1) - self.assertTrue(x.filled().dtype is x._data.dtype) + assert_(x.filled().dtype is x._data.dtype) def test_basic1d(self): # Test of basic array creation and properties in 1 dimension. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - self.assertTrue(not isMaskedArray(x)) - self.assertTrue(isMaskedArray(xm)) - self.assertTrue((xm - ym).filled(0).any()) + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_((xm - ym).filled(0).any()) fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) s = x.shape assert_equal(np.shape(xm), s) @@ -123,8 +128,8 @@ ym.shape = s xf.shape = s - self.assertTrue(not isMaskedArray(x)) - self.assertTrue(isMaskedArray(xm)) + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) assert_equal(shape(xm), s) assert_equal(xm.shape, s) assert_equal(xm.size, reduce(lambda x, y:x * y, s)) @@ -217,7 +222,7 @@ x.mask = nomask data = array((x, x[::-1])) assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) - self.assertTrue(data.mask is nomask) + assert_(data.mask is nomask) def test_creation_from_ndarray_with_padding(self): x = np.array([('A', 0)], dtype={'names':['f0','f1'], @@ -238,18 +243,18 @@ def test_asarray_default_order(self): # See Issue #6646 m = np.eye(3).T - self.assertFalse(m.flags.c_contiguous) + assert_(not m.flags.c_contiguous) new_m = asarray(m) - self.assertTrue(new_m.flags.c_contiguous) + assert_(new_m.flags.c_contiguous) def test_asarray_enforce_order(self): # See Issue #6646 m = np.eye(3).T - self.assertFalse(m.flags.c_contiguous) + assert_(not m.flags.c_contiguous) new_m = asarray(m, order='C') - self.assertTrue(new_m.flags.c_contiguous) + assert_(new_m.flags.c_contiguous) def test_fix_invalid(self): # Checks fix_invalid. @@ -263,8 +268,8 @@ # Test of masked element x = arange(6) x[1] = masked - self.assertTrue(str(masked) == '--') - self.assertTrue(x[1] is masked) + assert_(str(masked) == '--') + assert_(x[1] is masked) assert_equal(filled(x[1], 0), 0) def test_set_element_as_object(self): @@ -273,12 +278,12 @@ x = (1, 2, 3, 4, 5) a[0] = x assert_equal(a[0], x) - self.assertTrue(a[0] is x) + assert_(a[0] is x) import datetime dt = datetime.datetime.now() a[0] = dt - self.assertTrue(a[0] is dt) + assert_(a[0] is dt) def test_indexing(self): # Tests conversions and indexing @@ -379,32 +384,43 @@ n = [0, 0, 1, 0, 0] m = make_mask(n) m2 = make_mask(m) - self.assertTrue(m is m2) + assert_(m is m2) m3 = make_mask(m, copy=1) - self.assertTrue(m is not m3) + assert_(m is not m3) x1 = np.arange(5) y1 = array(x1, mask=m) assert_equal(y1._data.__array_interface__, x1.__array_interface__) - self.assertTrue(allequal(x1, y1.data)) + assert_(allequal(x1, y1.data)) assert_equal(y1._mask.__array_interface__, m.__array_interface__) y1a = array(y1) - self.assertTrue(y1a._data.__array_interface__ == + assert_(y1a._data.__array_interface__ == y1._data.__array_interface__) - self.assertTrue(y1a.mask is y1.mask) + assert_(y1a.mask is y1.mask) - y2 = array(x1, mask=m) - self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__) - self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__) - self.assertTrue(y2[2] is masked) + y2 = array(x1, mask=m3) + assert_(y2._data.__array_interface__ == x1.__array_interface__) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(y2[2] is masked) y2[2] = 9 - self.assertTrue(y2[2] is not masked) - self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__) - self.assertTrue(allequal(y2.mask, 0)) + assert_(y2[2] is not masked) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a._data.__array_interface__ != x1.__array_interface__) + #assert_( y2a.mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + #assert_( y2a.mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(allequal(y2a.mask, 0)) y3 = array(x1 * 1.0, mask=m) - self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) x4 = arange(4) x4[2] = masked @@ -433,10 +449,16 @@ assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) + def test_copy_0d(self): + # gh-9430 + x = np.ma.array(43, mask=True) + xc = x.copy() + assert_equal(xc.mask, True) + def test_copy_on_python_builtins(self): # Tests copy works on python builtins (issue#8019) - self.assertTrue(isMaskedArray(np.ma.copy([1,2,3]))) - self.assertTrue(isMaskedArray(np.ma.copy((1,2,3)))) + assert_(isMaskedArray(np.ma.copy([1,2,3]))) + assert_(isMaskedArray(np.ma.copy((1,2,3)))) def test_copy_immutable(self): # Tests that the copy method is immutable, GitHub issue #5247 @@ -466,19 +488,106 @@ def test_str_repr(self): a = array([0, 1, 2], mask=[False, True, False]) assert_equal(str(a), '[0 -- 2]') - assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n' - ' mask = [False True False],\n' - ' fill_value = 999999)\n') + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999)''') + ) + # arrays with a continuation a = np.ma.arange(2000) a[1:50] = np.ma.masked assert_equal( repr(a), - 'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n' - ' mask = [False True True ..., False False False],\n' - ' fill_value = 999999)\n' + textwrap.dedent('''\ + masked_array(data=[0, --, --, ..., 1997, 1998, 1999], + mask=[False, True, True, ..., False, False, False], + fill_value=999999)''') + ) + + # line-wrapped 1d arrays are correctly aligned + a = np.ma.arange(20) + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19], + mask=False, + fill_value=999999)''') ) + # 2d arrays cause wrapping + a = array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) + a[1,1] = np.ma.masked + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value=999999, + dtype=int8)''') + ) + + # but not it they're a row vector + assert_equal( + repr(a[:1]), + textwrap.dedent('''\ + masked_array(data=[[1, 2, 3]], + mask=[[False, False, False]], + fill_value=999999, + dtype=int8)''') + ) + + # dtype=int is implied, so not shown + assert_equal( + repr(a.astype(int)), + textwrap.dedent('''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value=999999)''') + ) + + + + def test_str_repr_legacy(self): + oldopts = np.get_printoptions() + np.set_printoptions(legacy='1.13') + try: + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n' + ' mask = [False True False],\n' + ' fill_value = 999999)\n') + + a = np.ma.arange(2000) + a[1:50] = np.ma.masked + assert_equal( + repr(a), + 'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n' + ' mask = [False True True ..., False False False],\n' + ' fill_value = 999999)\n' + ) + finally: + np.set_printoptions(**oldopts) + + def test_0d_unicode(self): + u = u'caf\xe9' + utype = type(u) + + arr_nomask = np.ma.array(u) + arr_masked = np.ma.array(u, mask=True) + + assert_equal(utype(arr_nomask), u) + assert_equal(utype(arr_masked), u'--') + def test_pickling(self): # Tests pickling for dtype in (int, float, str, object): @@ -506,7 +615,7 @@ a_pickled = pickle.loads(a.dumps()) assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled, a) - self.assertTrue(isinstance(a_pickled._data, np.matrix)) + assert_(isinstance(a_pickled._data, np.matrix)) def test_pickling_maskedconstant(self): # Test pickling MaskedConstant @@ -546,19 +655,19 @@ assert_equal(1.0, float(array(1))) assert_equal(1, int(array([[[1]]]))) assert_equal(1.0, float(array([[1]]))) - self.assertRaises(TypeError, float, array([1, 1])) + assert_raises(TypeError, float, array([1, 1])) with suppress_warnings() as sup: sup.filter(UserWarning, 'Warning: converting a masked element') assert_(np.isnan(float(array([1], mask=[1])))) a = array([1, 2, 3], mask=[1, 0, 0]) - self.assertRaises(TypeError, lambda: float(a)) + assert_raises(TypeError, lambda: float(a)) assert_equal(float(a[-1]), 3.) - self.assertTrue(np.isnan(float(a[0]))) - self.assertRaises(TypeError, int, a) + assert_(np.isnan(float(a[0]))) + assert_raises(TypeError, int, a) assert_equal(int(a[-1]), 3) - self.assertRaises(MAError, lambda:int(a[0])) + assert_raises(MAError, lambda:int(a[0])) def test_oddfeatures_1(self): # Test of other odd features @@ -667,8 +776,8 @@ a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'), mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'), order='F') # this is currently ignored - self.assertTrue(a.flags['F_CONTIGUOUS']) - self.assertTrue(a.filled(0).flags['F_CONTIGUOUS']) + assert_(a.flags['F_CONTIGUOUS']) + assert_(a.filled(0).flags['F_CONTIGUOUS']) def test_optinfo_propagation(self): # Checks that _optinfo dictionary isn't back-propagated @@ -679,6 +788,25 @@ y._optinfo['info'] = '!!!' assert_equal(x._optinfo['info'], '???') + def test_optinfo_forward_propagation(self): + a = array([1,2,2,4]) + a._optinfo["key"] = "value" + assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a > 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a >= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a <= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a + 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a - 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], a[:2]._optinfo["key"]) + assert_equal(a._optinfo["key"], a[[0,0,2]]._optinfo["key"]) + assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.zeros_like(a)._optinfo["key"]) + def test_fancy_printoptions(self): # Test printing a masked array w/ fancy dtype. fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) @@ -706,14 +834,14 @@ ndtype = [('a', int), ('b', float)] a = np.array([(1, 1), (2, 2)], dtype=ndtype) test = flatten_structured_array(a) - control = np.array([[1., 1.], [2., 2.]], dtype=np.float) + control = np.array([[1., 1.], [2., 2.]], dtype=float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) # On masked_array a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) test = flatten_structured_array(a) control = array([[1., 1.], [2., 2.]], - mask=[[0, 1], [1, 0]], dtype=np.float) + mask=[[0, 1], [1, 0]], dtype=float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) assert_equal(test.mask, control.mask) @@ -723,7 +851,7 @@ mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) test = flatten_structured_array(a) control = array([[1., 1., 1.1], [2., 2., 2.2]], - mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float) + mask=[[0, 1, 0], [1, 0, 1]], dtype=float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) assert_equal(test.mask, control.mask) @@ -731,7 +859,7 @@ ndtype = [('a', int), ('b', float)] a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype) test = flatten_structured_array(a) - control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float) + control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float) assert_equal(test, control) assert_equal(test.dtype, control.dtype) @@ -756,14 +884,14 @@ dtype=ndtype) # w/o mask f = a[0] - self.assertTrue(isinstance(f, mvoid)) + assert_(isinstance(f, mvoid)) assert_equal((f[0], f['a']), (1, 1)) assert_equal(f['b'], 2) # w/ mask f = a[1] - self.assertTrue(isinstance(f, mvoid)) - self.assertTrue(f[0] is masked) - self.assertTrue(f['a'] is masked) + assert_(isinstance(f, mvoid)) + assert_(f[0] is masked) + assert_(f['a'] is masked) assert_equal(f[1], 4) # exotic dtype @@ -850,10 +978,10 @@ assert_(mx2[0] == 0.) -class TestMaskedArrayArithmetic(TestCase): +class TestMaskedArrayArithmetic(object): # Base test class for MaskedArrays. - def setUp(self): + def setup(self): # Base data definition. x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) @@ -870,7 +998,7 @@ self.err_status = np.geterr() np.seterr(divide='ignore', invalid='ignore') - def tearDown(self): + def teardown(self): np.seterr(**self.err_status) def test_basic_arithmetic(self): @@ -930,8 +1058,8 @@ # Tests mixed arithmetics. na = np.array([1]) ma = array([1]) - self.assertTrue(isinstance(na + ma, MaskedArray)) - self.assertTrue(isinstance(ma + na, MaskedArray)) + assert_(isinstance(na + ma, MaskedArray)) + assert_(isinstance(ma + na, MaskedArray)) def test_limits_arithmetic(self): tiny = np.finfo(float).tiny @@ -943,11 +1071,11 @@ # Tests some scalar arithmetics on MaskedArrays. # Masked singleton should remain masked no matter what xm = array(0, mask=1) - self.assertTrue((1 / array(0)).mask) - self.assertTrue((1 + xm).mask) - self.assertTrue((-xm).mask) - self.assertTrue(maximum(xm, xm).mask) - self.assertTrue(minimum(xm, xm).mask) + assert_((1 / array(0)).mask) + assert_((1 + xm).mask) + assert_((-xm).mask) + assert_(maximum(xm, xm).mask) + assert_(minimum(xm, xm).mask) def test_masked_singleton_equality(self): # Tests (in)equality on masked singleton @@ -1019,7 +1147,7 @@ ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) res = count(ott) - self.assertTrue(res.dtype.type is np.intp) + assert_(res.dtype.type is np.intp) assert_equal(3, res) ott = ott.reshape((2, 2)) @@ -1070,19 +1198,19 @@ def test_minimummaximum_func(self): a = np.ones((2, 2)) aminimum = minimum(a, a) - self.assertTrue(isinstance(aminimum, MaskedArray)) + assert_(isinstance(aminimum, MaskedArray)) assert_equal(aminimum, np.minimum(a, a)) aminimum = minimum.outer(a, a) - self.assertTrue(isinstance(aminimum, MaskedArray)) + assert_(isinstance(aminimum, MaskedArray)) assert_equal(aminimum, np.minimum.outer(a, a)) amaximum = maximum(a, a) - self.assertTrue(isinstance(amaximum, MaskedArray)) + assert_(isinstance(amaximum, MaskedArray)) assert_equal(amaximum, np.maximum(a, a)) amaximum = maximum.outer(a, a) - self.assertTrue(isinstance(amaximum, MaskedArray)) + assert_(isinstance(amaximum, MaskedArray)) assert_equal(amaximum, np.maximum.outer(a, a)) def test_minmax_reduce(self): @@ -1108,33 +1236,33 @@ pass nout = np.empty((4,), dtype=float) result = npfunc(xm, axis=0, out=nout) - self.assertTrue(result is nout) + assert_(result is nout) # Use the ma version nout.fill(-999) result = mafunc(xm, axis=0, out=nout) - self.assertTrue(result is nout) + assert_(result is nout) def test_minmax_methods(self): # Additional tests on max/min (_, _, _, _, _, xm, _, _, _, _) = self.d xm.shape = (xm.size,) assert_equal(xm.max(), 10) - self.assertTrue(xm[0].max() is masked) - self.assertTrue(xm[0].max(0) is masked) - self.assertTrue(xm[0].max(-1) is masked) + assert_(xm[0].max() is masked) + assert_(xm[0].max(0) is masked) + assert_(xm[0].max(-1) is masked) assert_equal(xm.min(), -10.) - self.assertTrue(xm[0].min() is masked) - self.assertTrue(xm[0].min(0) is masked) - self.assertTrue(xm[0].min(-1) is masked) + assert_(xm[0].min() is masked) + assert_(xm[0].min(0) is masked) + assert_(xm[0].min(-1) is masked) assert_equal(xm.ptp(), 20.) - self.assertTrue(xm[0].ptp() is masked) - self.assertTrue(xm[0].ptp(0) is masked) - self.assertTrue(xm[0].ptp(-1) is masked) + assert_(xm[0].ptp() is masked) + assert_(xm[0].ptp(0) is masked) + assert_(xm[0].ptp(-1) is masked) x = array([1, 2, 3], mask=True) - self.assertTrue(x.min() is masked) - self.assertTrue(x.max() is masked) - self.assertTrue(x.ptp() is masked) + assert_(x.min() is masked) + assert_(x.max() is masked) + assert_(x.ptp() is masked) def test_addsumprod(self): # Tests add, sum, product. @@ -1491,7 +1619,7 @@ assert_equal(a.mask, [0, 0, 0, 0, 1]) -class TestMaskedArrayAttributes(TestCase): +class TestMaskedArrayAttributes(object): def test_keepmask(self): # Tests the keep mask flag @@ -1519,8 +1647,8 @@ assert_equal(xh._data, [0, 10, 2, 3, 4]) assert_equal(xs._data, [0, 10, 2, 3, 40]) assert_equal(xs.mask, [0, 0, 0, 1, 0]) - self.assertTrue(xh._hardmask) - self.assertTrue(not xs._hardmask) + assert_(xh._hardmask) + assert_(not xs._hardmask) xh[1:4] = [10, 20, 30] xs[1:4] = [10, 20, 30] assert_equal(xh._data, [0, 10, 20, 3, 4]) @@ -1604,13 +1732,19 @@ assert_equal(a, b) assert_equal(a.mask, nomask) + # Mask cannot be shrunk on structured types, so is a no-op + a = np.ma.array([(1, 2.0)], [('a', int), ('b', float)]) + b = a.copy() + a.shrink_mask() + assert_equal(a.mask, b.mask) + def test_flat(self): # Test that flat can return all types of items [#4585, #4615] # test simple access test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) assert_equal(test.flat[1], 2) assert_equal(test.flat[2], masked) - self.assertTrue(np.all(test.flat[0:2] == test[0, 0:2])) + assert_(np.all(test.flat[0:2] == test[0, 0:2])) # Test flat on masked_matrices test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) @@ -1684,7 +1818,7 @@ assert_equal(m._mask, np.ma.nomask) -class TestFillingValues(TestCase): +class TestFillingValues(object): def test_check_on_scalar(self): # Test _check_fill_value set to valid and invalid values @@ -1699,8 +1833,8 @@ assert_equal(fval, b"0") fval = _check_fill_value(None, "|S3") assert_equal(fval, default_fill_value(b"camelot!")) - self.assertRaises(TypeError, _check_fill_value, 1e+20, int) - self.assertRaises(TypeError, _check_fill_value, 'stuff', int) + assert_raises(TypeError, _check_fill_value, 1e+20, int) + assert_raises(TypeError, _check_fill_value, 'stuff', int) def test_check_on_fields(self): # Tests _check_fill_value with records @@ -1708,49 +1842,45 @@ ndtype = [('a', int), ('b', float), ('c', "|S3")] # A check on a list should return a single record fval = _check_fill_value([-999, -12345678.9, "???"], ndtype) - self.assertTrue(isinstance(fval, ndarray)) + assert_(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, b"???"]) # A check on None should output the defaults fval = _check_fill_value(None, ndtype) - self.assertTrue(isinstance(fval, ndarray)) + assert_(isinstance(fval, ndarray)) assert_equal(fval.item(), [default_fill_value(0), default_fill_value(0.), asbytes(default_fill_value("0"))]) #.....Using a structured type as fill_value should work fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype) fval = _check_fill_value(fill_val, ndtype) - self.assertTrue(isinstance(fval, ndarray)) + assert_(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, b"???"]) #.....Using a flexible type w/ a different type shouldn't matter - # BEHAVIOR in 1.5 and earlier: match structured types by position - #fill_val = np.array((-999, -12345678.9, "???"), - # dtype=[("A", int), ("B", float), ("C", "|S3")]) - # BEHAVIOR in 1.6 and later: match structured types by name - fill_val = np.array(("???", -999, -12345678.9), - dtype=[("c", "|S3"), ("a", int), ("b", float), ]) - # suppress deprecation warning in 1.12 (remove in 1.13) - with assert_warns(FutureWarning): - fval = _check_fill_value(fill_val, ndtype) - self.assertTrue(isinstance(fval, ndarray)) + # BEHAVIOR in 1.5 and earlier, and 1.13 and later: match structured + # types by position + fill_val = np.array((-999, -12345678.9, "???"), + dtype=[("A", int), ("B", float), ("C", "|S3")]) + fval = _check_fill_value(fill_val, ndtype) + assert_(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, b"???"]) #.....Using an object-array shouldn't matter either fill_val = np.ndarray(shape=(1,), dtype=object) fill_val[0] = (-999, -12345678.9, b"???") fval = _check_fill_value(fill_val, object) - self.assertTrue(isinstance(fval, ndarray)) + assert_(isinstance(fval, ndarray)) assert_equal(fval.item(), [-999, -12345678.9, b"???"]) # NOTE: This test was never run properly as "fill_value" rather than # "fill_val" was assigned. Written properly, it fails. #fill_val = np.array((-999, -12345678.9, "???")) #fval = _check_fill_value(fill_val, ndtype) - #self.assertTrue(isinstance(fval, ndarray)) + #assert_(isinstance(fval, ndarray)) #assert_equal(fval.item(), [-999, -12345678.9, b"???"]) #.....One-field-only flexible type should work as well ndtype = [("a", int)] fval = _check_fill_value(-999999999, ndtype) - self.assertTrue(isinstance(fval, ndarray)) + assert_(isinstance(fval, ndarray)) assert_equal(fval.item(), (-999999999,)) def test_fillvalue_conversion(self): @@ -1777,6 +1907,31 @@ assert_equal(b['a']._data, a._data) assert_equal(b['a'].fill_value, a.fill_value) + def test_default_fill_value(self): + # check all calling conventions + f1 = default_fill_value(1.) + f2 = default_fill_value(np.array(1.)) + f3 = default_fill_value(np.array(1.).dtype) + assert_equal(f1, f2) + assert_equal(f1, f3) + + def test_default_fill_value_structured(self): + fields = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + + f1 = default_fill_value(fields) + f2 = default_fill_value(fields.dtype) + expected = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.)), dtype=fields.dtype) + assert_equal(f1, expected) + assert_equal(f2, expected) + + def test_default_fill_value_void(self): + dt = np.dtype([('v', 'V7')]) + f = default_fill_value(dt) + assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v'])) + def test_fillvalue(self): # Yet more fun with the fill_value data = masked_array([1, 2, 3], fill_value=-999) @@ -1841,33 +1996,47 @@ "h", "D", "W", "M", "Y"): control = numpy.datetime64("NaT", timecode) test = default_fill_value(numpy.dtype(" 0.5) mxsmall = (mx < 0.5) - self.assertFalse(mxbig.all()) - self.assertTrue(mxbig.any()) + assert_(not mxbig.all()) + assert_(mxbig.any()) assert_equal(mxbig.all(0), [False, False, True]) assert_equal(mxbig.all(1), [False, False, True]) assert_equal(mxbig.any(0), [False, False, True]) assert_equal(mxbig.any(1), [True, True, True]) - self.assertFalse(mxsmall.all()) - self.assertTrue(mxsmall.any()) + assert_(not mxsmall.all()) + assert_(mxsmall.any()) assert_equal(mxsmall.all(0), [True, True, False]) assert_equal(mxsmall.all(1), [False, False, False]) assert_equal(mxsmall.any(0), [True, True, False]) @@ -2736,15 +2905,15 @@ mXbig = (mX > 0.5) mXsmall = (mX < 0.5) - self.assertFalse(mXbig.all()) - self.assertTrue(mXbig.any()) + assert_(not mXbig.all()) + assert_(mXbig.any()) assert_equal(mXbig.all(0), np.matrix([False, False, True])) assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) assert_equal(mXbig.any(0), np.matrix([False, False, True])) assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) - self.assertFalse(mXsmall.all()) - self.assertTrue(mXsmall.any()) + assert_(not mXsmall.all()) + assert_(mXsmall.any()) assert_equal(mXsmall.all(0), np.matrix([True, True, False])) assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) assert_equal(mXsmall.any(0), np.matrix([True, True, False])) @@ -2755,18 +2924,18 @@ store = empty((), dtype=bool) full = array([1, 2, 3], mask=True) - self.assertTrue(full.all() is masked) + assert_(full.all() is masked) full.all(out=store) - self.assertTrue(store) - self.assertTrue(store._mask, True) - self.assertTrue(store is not masked) + assert_(store) + assert_(store._mask, True) + assert_(store is not masked) store = empty((), dtype=bool) - self.assertTrue(full.any() is masked) + assert_(full.any() is masked) full.any(out=store) - self.assertTrue(not store) - self.assertTrue(store._mask, True) - self.assertTrue(store is not masked) + assert_(not store) + assert_(store._mask, True) + assert_(store is not masked) def test_argmax_argmin(self): # Tests argmin & argmax on MaskedArrays. @@ -2851,7 +3020,7 @@ a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) b = a.compressed() assert_equal(b, a) - self.assertTrue(isinstance(b, np.matrix)) + assert_(isinstance(b, np.matrix)) a[0, 0] = masked b = a.compressed() assert_equal(b, [[2, 3, 4]]) @@ -2885,11 +3054,11 @@ n = [0, 0, 0, 1, 1] m = make_mask(n) x = array(d, mask=m) - self.assertTrue(x[3] is masked) - self.assertTrue(x[4] is masked) + assert_(x[3] is masked) + assert_(x[4] is masked) x[[1, 4]] = [10, 40] - self.assertTrue(x[3] is masked) - self.assertTrue(x[4] is not masked) + assert_(x[3] is masked) + assert_(x[4] is not masked) assert_equal(x, [0, 10, 2, -1, 40]) x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) @@ -2915,12 +3084,12 @@ z = array([3., -1.], mask=[False, True]) x.put([1, 2], z) - self.assertTrue(x[0] is not masked) + assert_(x[0] is not masked) assert_equal(x[0], 0) - self.assertTrue(x[1] is not masked) + assert_(x[1] is not masked) assert_equal(x[1], 3) - self.assertTrue(x[2] is masked) - self.assertTrue(x[3] is not masked) + assert_(x[2] is masked) + assert_(x[3] is not masked) assert_equal(x[3], 0) def test_put_hardmask(self): @@ -3021,7 +3190,7 @@ x = [1, 4, 2, 3] sortedx = sort(x) - self.assertTrue(not isinstance(sorted, MaskedArray)) + assert_(not isinstance(sorted, MaskedArray)) x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8) sortedx = sort(x, endwith=False) @@ -3086,27 +3255,41 @@ assert_equal(am, an) def test_sort_flexible(self): - # Test sort on flexible dtype. + # Test sort on structured dtype. a = array( data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], dtype=[('A', int), ('B', int)]) - - test = sort(a) - b = array( + mask_last = array( data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], dtype=[('A', int), ('B', int)]) - assert_equal(test, b) - assert_equal(test.mask, b.mask) + mask_first = array( + data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)], + mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)], + dtype=[('A', int), ('B', int)]) + + test = sort(a) + assert_equal(test, mask_last) + assert_equal(test.mask, mask_last.mask) test = sort(a, endwith=False) - b = array( - data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ], - mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ], - dtype=[('A', int), ('B', int)]) - assert_equal(test, b) - assert_equal(test.mask, b.mask) + assert_equal(test, mask_first) + assert_equal(test.mask, mask_first.mask) + + # Test sort on dtype with subarray (gh-8069) + dt = np.dtype([('v', int, 2)]) + a = a.view(dt) + mask_last = mask_last.view(dt) + mask_first = mask_first.view(dt) + + test = sort(a) + assert_equal(test, mask_last) + assert_equal(test.mask, mask_last.mask) + + test = sort(a, endwith=False) + assert_equal(test, mask_first) + assert_equal(test.mask, mask_first.mask) def test_argsort(self): # Test argsort @@ -3120,8 +3303,21 @@ data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]]) assert_equal(data.squeeze(), [1, 2, 3]) assert_equal(data.squeeze()._mask, [1, 1, 1]) - data = masked_array([[1]], mask=True) - self.assertTrue(data.squeeze() is masked) + + # normal ndarrays return a view + arr = np.array([[1]]) + arr_sq = arr.squeeze() + assert_equal(arr_sq, 1) + arr_sq[...] = 2 + assert_equal(arr[0,0], 2) + + # so maskedarrays should too + m_arr = masked_array([[1]], mask=True) + m_arr_sq = m_arr.squeeze() + assert_(m_arr_sq is not np.ma.masked) + assert_equal(m_arr_sq.mask, True) + m_arr_sq[...] = 2 + assert_equal(m_arr[0,0], 2) def test_swapaxes(self): # Tests swapaxes on MaskedArrays. @@ -3155,8 +3351,8 @@ masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]])) # assert_equal crashes when passed np.ma.mask - self.assertIs(x[1], np.ma.masked) - self.assertIs(x.take(1), np.ma.masked) + assert_(x[1] is np.ma.masked) + assert_(x.take(1) is np.ma.masked) x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]]) assert_equal(x.take([0, 2], axis=1), @@ -3200,8 +3396,8 @@ x = array(np.arange(12)) x[[1, -2]] = masked xlist = x.tolist() - self.assertTrue(xlist[1] is None) - self.assertTrue(xlist[-2] is None) + assert_(xlist[1] is None) + assert_(xlist[-2] is None) # ... on 2D x.shape = (3, 4) xlist = x.tolist() @@ -3304,10 +3500,37 @@ assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0)) + def test_arraymethod_0d(self): + # gh-9430 + x = np.ma.array(42, mask=True) + assert_equal(x.T.mask, x.mask) + assert_equal(x.T.data, x.data) + + def test_transpose_view(self): + x = np.ma.array([[1, 2, 3], [4, 5, 6]]) + x[0,1] = np.ma.masked + xt = x.T + + xt[1,0] = 10 + xt[0,1] = np.ma.masked + + assert_equal(x.data, xt.T.data) + assert_equal(x.mask, xt.T.mask) + + def test_diagonal_view(self): + x = np.ma.zeros((3,3)) + x[0,0] = 10 + x[1,1] = np.ma.masked + x[2,2] = 20 + xd = x.diagonal() + x[1,1] = 15 + assert_equal(xd.mask, x.diagonal().mask) + assert_equal(xd.data, x.diagonal().data) -class TestMaskedArrayMathMethods(TestCase): - def setUp(self): +class TestMaskedArrayMathMethods(object): + + def setup(self): # Base data definition. x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, @@ -3366,20 +3589,20 @@ output.fill(-9999) result = npfunc(xm, axis=0, out=output) # ... the result should be the given output - self.assertTrue(result is output) + assert_(result is output) assert_equal(result, xmmeth(axis=0, out=output)) output = empty((3, 4), dtype=int) result = xmmeth(axis=0, out=output) - self.assertTrue(result is output) + assert_(result is output) def test_ptp(self): # Tests ptp on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d (n, m) = X.shape assert_equal(mx.ptp(), mx.compressed().ptp()) - rows = np.zeros(n, np.float) - cols = np.zeros(m, np.float) + rows = np.zeros(n, float) + cols = np.zeros(m, float) for k in range(m): cols[k] = mX[:, k].compressed().ptp() for k in range(n): @@ -3395,21 +3618,21 @@ def test_sum_object(self): # Test sum on object dtype - a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object) + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) assert_equal(a.sum(), 5) a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) assert_equal(a.sum(axis=0), [5, 7, 9]) def test_prod_object(self): # Test prod on object dtype - a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object) + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) assert_equal(a.prod(), 2 * 3) a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) assert_equal(a.prod(axis=0), [4, 10, 18]) def test_meananom_object(self): # Test mean/anom on object dtype - a = masked_array([1, 2, 3], dtype=np.object) + a = masked_array([1, 2, 3], dtype=object) assert_equal(a.mean(), 2) assert_equal(a.anom(), [-1, 0, 1]) @@ -3423,6 +3646,11 @@ axis=0)) assert_equal(np.trace(mX), mX.trace()) + # gh-5560 + arr = np.arange(2*4*4).reshape(2,4,4) + m_arr = np.ma.masked_array(arr, False) + assert_equal(arr.trace(axis1=1, axis2=2), m_arr.trace(axis1=1, axis2=2)) + def test_dot(self): # Tests dot on MaskedArrays. (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d @@ -3494,6 +3722,8 @@ assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std()) + @dec.knownfailureif(sys.platform=='win32' and sys.version_info < (3, 6), + msg='Fails on Python < 3.6 (Issue #9671)') @suppress_copy_mask_on_assignment def test_varstd_specialcases(self): # Test a special case for var @@ -3503,31 +3733,31 @@ x = array(arange(10), mask=True) for methodname in ('var', 'std'): method = getattr(x, methodname) - self.assertTrue(method() is masked) - self.assertTrue(method(0) is masked) - self.assertTrue(method(-1) is masked) + assert_(method() is masked) + assert_(method(0) is masked) + assert_(method(-1) is masked) # Using a masked array as explicit output method(out=mout) - self.assertTrue(mout is not masked) + assert_(mout is not masked) assert_equal(mout.mask, True) # Using a ndarray as explicit output method(out=nout) - self.assertTrue(np.isnan(nout)) + assert_(np.isnan(nout)) x = array(arange(10), mask=True) x[-1] = 9 for methodname in ('var', 'std'): method = getattr(x, methodname) - self.assertTrue(method(ddof=1) is masked) - self.assertTrue(method(0, ddof=1) is masked) - self.assertTrue(method(-1, ddof=1) is masked) + assert_(method(ddof=1) is masked) + assert_(method(0, ddof=1) is masked) + assert_(method(-1, ddof=1) is masked) # Using a masked array as explicit output method(out=mout, ddof=1) - self.assertTrue(mout is not masked) + assert_(mout is not masked) assert_equal(mout.mask, True) # Using a ndarray as explicit output method(out=nout, ddof=1) - self.assertTrue(np.isnan(nout)) + assert_(np.isnan(nout)) def test_varstd_ddof(self): a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]]) @@ -3576,9 +3806,9 @@ assert_equal(a.max(1), [3, 6]) -class TestMaskedArrayMathMethodsComplex(TestCase): +class TestMaskedArrayMathMethodsComplex(object): # Test class for miscellaneous MaskedArrays methods. - def setUp(self): + def setup(self): # Base data definition. x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, @@ -3629,10 +3859,10 @@ mX[:, k].compressed().std()) -class TestMaskedArrayFunctions(TestCase): +class TestMaskedArrayFunctions(object): # Test class for miscellaneous functions. - def setUp(self): + def setup(self): x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] @@ -3706,6 +3936,12 @@ assert_equal(am["A"], np.ma.masked_array(np.zeros(10), np.ones(10))) + def test_masked_where_mismatch(self): + # gh-4520 + x = np.arange(10) + y = np.arange(5) + assert_raises(IndexError, np.ma.masked_where, y > 6, x) + def test_masked_otherfunctions(self): assert_equal(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]) @@ -3756,12 +3992,12 @@ output.fill(-9999) result = np.round(xm, decimals=2, out=output) # ... the result should be the given output - self.assertTrue(result is output) + assert_(result is output) assert_equal(result, xm.round(decimals=2, out=output)) output = empty((3, 4), dtype=float) result = xm.round(decimals=2, out=output) - self.assertTrue(result is output) + assert_(result is output) def test_round_with_scalar(self): # Testing round with scalar/zero dimension input @@ -3790,13 +4026,13 @@ def test_identity(self): a = identity(5) - self.assertTrue(isinstance(a, MaskedArray)) + assert_(isinstance(a, MaskedArray)) assert_equal(a, np.identity(5)) def test_power(self): x = -1.1 assert_almost_equal(power(x, 2.), 1.21) - self.assertTrue(power(x, masked) is masked) + assert_(power(x, masked) is masked) x = array([-1.1, -1.1, 1.1, 1.1, 0.]) b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1]) y = power(x, b) @@ -4004,7 +4240,7 @@ store = empty(4, dtype=int) chosen = choose([2, 3, 1, 0], choices, out=store) assert_equal(store, array([20, 31, 12, 3])) - self.assertTrue(store is chosen) + assert_(store is chosen) # Check with some masked indices + out store = empty(4, dtype=int) indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1]) @@ -4025,56 +4261,56 @@ # Try the default b = a.reshape((5, 2)) assert_equal(b.shape, (5, 2)) - self.assertTrue(b.flags['C']) + assert_(b.flags['C']) # Try w/ arguments as list instead of tuple b = a.reshape(5, 2) assert_equal(b.shape, (5, 2)) - self.assertTrue(b.flags['C']) + assert_(b.flags['C']) # Try w/ order b = a.reshape((5, 2), order='F') assert_equal(b.shape, (5, 2)) - self.assertTrue(b.flags['F']) + assert_(b.flags['F']) # Try w/ order b = a.reshape(5, 2, order='F') assert_equal(b.shape, (5, 2)) - self.assertTrue(b.flags['F']) + assert_(b.flags['F']) c = np.reshape(a, (2, 5)) - self.assertTrue(isinstance(c, MaskedArray)) + assert_(isinstance(c, MaskedArray)) assert_equal(c.shape, (2, 5)) - self.assertTrue(c[0, 0] is masked) - self.assertTrue(c.flags['C']) + assert_(c[0, 0] is masked) + assert_(c.flags['C']) def test_make_mask_descr(self): # Flexible - ntype = [('a', np.float), ('b', np.float)] + ntype = [('a', float), ('b', float)] test = make_mask_descr(ntype) - assert_equal(test, [('a', np.bool), ('b', np.bool)]) + assert_equal(test, [('a', bool), ('b', bool)]) assert_(test is make_mask_descr(test)) # Standard w/ shape - ntype = (np.float, 2) + ntype = (float, 2) test = make_mask_descr(ntype) - assert_equal(test, (np.bool, 2)) + assert_equal(test, (bool, 2)) assert_(test is make_mask_descr(test)) # Standard standard - ntype = np.float + ntype = float test = make_mask_descr(ntype) - assert_equal(test, np.dtype(np.bool)) + assert_equal(test, np.dtype(bool)) assert_(test is make_mask_descr(test)) # Nested - ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])] + ntype = [('a', float), ('b', [('ba', float), ('bb', float)])] test = make_mask_descr(ntype) control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])]) assert_equal(test, control) assert_(test is make_mask_descr(test)) # Named+ shape - ntype = [('a', (np.float, 2))] + ntype = [('a', (float, 2))] test = make_mask_descr(ntype) - assert_equal(test, np.dtype([('a', (np.bool, 2))])) + assert_equal(test, np.dtype([('a', (bool, 2))])) assert_(test is make_mask_descr(test)) # 2 names @@ -4099,25 +4335,25 @@ assert_equal(test.dtype, MaskType) assert_equal(test, [0, 1]) # w/ a ndarray as an input - mask = np.array([0, 1], dtype=np.bool) + mask = np.array([0, 1], dtype=bool) test = make_mask(mask) assert_equal(test.dtype, MaskType) assert_equal(test, [0, 1]) # w/ a flexible-type ndarray as an input - use default - mdtype = [('a', np.bool), ('b', np.bool)] + mdtype = [('a', bool), ('b', bool)] mask = np.array([(0, 0), (0, 1)], dtype=mdtype) test = make_mask(mask) assert_equal(test.dtype, MaskType) assert_equal(test, [1, 1]) # w/ a flexible-type ndarray as an input - use input dtype - mdtype = [('a', np.bool), ('b', np.bool)] + mdtype = [('a', bool), ('b', bool)] mask = np.array([(0, 0), (0, 1)], dtype=mdtype) test = make_mask(mask, dtype=mask.dtype) assert_equal(test.dtype, mdtype) assert_equal(test, mask) # w/ a flexible-type ndarray as an input - use input dtype - mdtype = [('a', np.float), ('b', np.float)] - bdtype = [('a', np.bool), ('b', np.bool)] + mdtype = [('a', float), ('b', float)] + bdtype = [('a', bool), ('b', bool)] mask = np.array([(0, 0), (0, 1)], dtype=mdtype) test = make_mask(mask, dtype=mask.dtype) assert_equal(test.dtype, bdtype) @@ -4133,7 +4369,7 @@ assert_equal(test2, test) # test that nomask is returned when m is nomask. bools = [True, False] - dtypes = [MaskType, np.float] + dtypes = [MaskType, float] msgformat = 'copy=%s, shrink=%s, dtype=%s' for cpy, shr, dt in itertools.product(bools, bools, dtypes): res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt) @@ -4141,7 +4377,7 @@ def test_mask_or(self): # Initialize - mtype = [('a', np.bool), ('b', np.bool)] + mtype = [('a', bool), ('b', bool)] mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) # Test using nomask as input test = mask_or(mask, nomask) @@ -4157,14 +4393,14 @@ control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) assert_equal(test, control) # Using another array w / a different dtype - othertype = [('A', np.bool), ('B', np.bool)] + othertype = [('A', bool), ('B', bool)] other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) try: test = mask_or(mask, other) except ValueError: pass # Using nested arrays - dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])] + dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) @@ -4173,7 +4409,7 @@ def test_flatten_mask(self): # Tests flatten mask # Standard dtype - mask = np.array([0, 0, 1], dtype=np.bool) + mask = np.array([0, 0, 1], dtype=bool) assert_equal(flatten_mask(mask), mask) # Flexible dtype mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) @@ -4266,9 +4502,9 @@ assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1)) -class TestMaskedFields(TestCase): +class TestMaskedFields(object): - def setUp(self): + def setup(self): ilist = [1, 2, 3, 4, 5] flist = [1.1, 2.2, 3.3, 4.4, 5.5] slist = ['one', 'two', 'three', 'four', 'five'] @@ -4367,7 +4603,7 @@ test = a.view((float, 2), np.matrix) assert_equal(test, data) - self.assertTrue(isinstance(test, np.matrix)) + assert_(isinstance(test, np.matrix)) def test_getitem(self): ndtype = [('a', float), ('b', float)] @@ -4432,7 +4668,7 @@ assert_equal(len(rec), len(self.data['ddtype'])) -class TestMaskedObjectArray(TestCase): +class TestMaskedObjectArray(object): def test_getitem(self): arr = np.ma.array([None, None]) @@ -4480,9 +4716,9 @@ assert_(arr[0] is np.ma.masked) -class TestMaskedView(TestCase): +class TestMaskedView(object): - def setUp(self): + def setup(self): iterator = list(zip(np.arange(10), np.random.rand(10))) data = np.array(iterator) a = array(iterator, dtype=[('a', float), ('b', float)]) @@ -4493,14 +4729,14 @@ def test_view_to_nothing(self): (data, a, controlmask) = self.data test = a.view() - self.assertTrue(isinstance(test, MaskedArray)) + assert_(isinstance(test, MaskedArray)) assert_equal(test._data, a._data) assert_equal(test._mask, a._mask) def test_view_to_type(self): (data, a, controlmask) = self.data test = a.view(np.ndarray) - self.assertTrue(not isinstance(test, MaskedArray)) + assert_(not isinstance(test, MaskedArray)) assert_equal(test, a._data) assert_equal_records(test, data.view(a.dtype).squeeze()) @@ -4508,7 +4744,7 @@ (data, a, controlmask) = self.data # View globally test = a.view(float) - self.assertTrue(isinstance(test, MaskedArray)) + assert_(isinstance(test, MaskedArray)) assert_equal(test, data.ravel()) assert_equal(test.mask, controlmask) @@ -4521,13 +4757,13 @@ assert_equal(test['B'], a['b']) test = a[0].view([('A', float), ('B', float)]) - self.assertTrue(isinstance(test, MaskedArray)) + assert_(isinstance(test, MaskedArray)) assert_equal(test.mask.dtype.names, ('A', 'B')) assert_equal(test['A'], a['a'][0]) assert_equal(test['B'], a['b'][0]) test = a[-1].view([('A', float), ('B', float)]) - self.assertTrue(isinstance(test, MaskedArray)) + assert_(isinstance(test, MaskedArray)) assert_equal(test.dtype.names, ('A', 'B')) assert_equal(test['A'], a['a'][-1]) assert_equal(test['B'], a['b'][-1]) @@ -4536,17 +4772,17 @@ (data, a, controlmask) = self.data # View globally test = a.view((float, 2)) - self.assertTrue(isinstance(test, MaskedArray)) + assert_(isinstance(test, MaskedArray)) assert_equal(test, data) assert_equal(test.mask, controlmask.reshape(-1, 2)) # View on 1 masked element test = a[0].view((float, 2)) - self.assertTrue(isinstance(test, MaskedArray)) + assert_(isinstance(test, MaskedArray)) assert_equal(test, data[0]) assert_equal(test.mask, (1, 0)) # View on 1 unmasked element test = a[-1].view((float, 2)) - self.assertTrue(isinstance(test, MaskedArray)) + assert_(isinstance(test, MaskedArray)) assert_equal(test, data[-1]) def test_view_to_dtype_and_type(self): @@ -4554,10 +4790,10 @@ test = a.view((float, 2), np.matrix) assert_equal(test, data) - self.assertTrue(isinstance(test, np.matrix)) - self.assertTrue(not isinstance(test, MaskedArray)) + assert_(isinstance(test, np.matrix)) + assert_(not isinstance(test, MaskedArray)) -class TestOptionalArgs(TestCase): +class TestOptionalArgs(object): def test_ndarrayfuncs(self): # test axis arg behaves the same as ndarray (including multiple axes) @@ -4644,10 +4880,10 @@ assert_raises(np.AxisError, count, np.ma.array(1), axis=1) -class TestMaskedConstant(TestCase): +class TestMaskedConstant(object): def _do_add_test(self, add): # sanity check - self.assertIs(add(np.ma.masked, 1), np.ma.masked) + assert_(add(np.ma.masked, 1) is np.ma.masked) # now try with a vector vector = np.array([1, 2, 3]) @@ -4673,6 +4909,93 @@ assert_(not isinstance(m, np.ma.core.MaskedConstant)) assert_(m is not np.ma.masked) + def test_repr(self): + # copies should not exist, but if they do, it should be obvious that + # something is wrong + assert_equal(repr(np.ma.masked), 'masked') + + # create a new instance in a weird way + masked2 = np.ma.MaskedArray.__new__(np.ma.core.MaskedConstant) + assert_not_equal(repr(masked2), 'masked') + + def test_pickle(self): + from io import BytesIO + import pickle + + with BytesIO() as f: + pickle.dump(np.ma.masked, f) + f.seek(0) + res = pickle.load(f) + assert_(res is np.ma.masked) + + def test_copy(self): + # gh-9328 + # copy is a no-op, like it is with np.True_ + assert_equal( + np.ma.masked.copy() is np.ma.masked, + np.True_.copy() is np.True_) + + def test_immutable(self): + orig = np.ma.masked + assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1) + assert_raises(ValueError,operator.setitem, orig.data, (), 1) + assert_raises(ValueError, operator.setitem, orig.mask, (), False) + + view = np.ma.masked.view(np.ma.MaskedArray) + assert_raises(ValueError, operator.setitem, view, (), 1) + assert_raises(ValueError, operator.setitem, view.data, (), 1) + assert_raises(ValueError, operator.setitem, view.mask, (), False) + + def test_coercion_int(self): + a_i = np.zeros((), int) + assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked) + assert_raises(MaskError, int, np.ma.masked) + + @dec.skipif(sys.version_info.major == 3, "long doesn't exist in Python 3") + def test_coercion_long(self): + assert_raises(MaskError, long, np.ma.masked) + + def test_coercion_float(self): + a_f = np.zeros((), float) + assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) + assert_(np.isnan(a_f[()])) + + @dec.knownfailureif(True, "See gh-9750") + def test_coercion_unicode(self): + a_u = np.zeros((), 'U10') + a_u[()] = np.ma.masked + assert_equal(a_u[()], u'--') + + @dec.knownfailureif(True, "See gh-9750") + def test_coercion_bytes(self): + a_b = np.zeros((), 'S10') + a_b[()] = np.ma.masked + assert_equal(a_b[()], b'--') + + def test_subclass(self): + # https://github.com/astropy/astropy/issues/6645 + class Sub(type(np.ma.masked)): pass + + a = Sub() + assert_(a is Sub()) + assert_(a is not np.ma.masked) + assert_not_equal(repr(a), 'masked') + + +class TestMaskedWhereAliases(object): + + # TODO: Test masked_object, masked_equal, ... + + def test_masked_values(self): + res = masked_values(np.array([-32768.0]), np.int16(-32768)) + assert_equal(res.mask, [True]) + + res = masked_values(np.inf, np.inf) + assert_equal(res.mask, True) + + res = np.ma.masked_values(np.inf, -np.inf) + assert_equal(res.mask, False) + def test_masked_array(): a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) @@ -4730,6 +5053,37 @@ assert_(y is x) +def test_ufunc_with_out_varied(): + """ Test that masked arrays are immune to gh-10459 """ + # the mask of the output should not affect the result, however it is passed + a = array([ 1, 2, 3], mask=[1, 0, 0]) + b = array([10, 20, 30], mask=[1, 0, 0]) + out = array([ 0, 0, 0], mask=[0, 0, 1]) + expected = array([11, 22, 33], mask=[1, 0, 0]) + + out_pos = out.copy() + res_pos = np.add(a, b, out_pos) + + out_kw = out.copy() + res_kw = np.add(a, b, out=out_kw) + + out_tup = out.copy() + res_tup = np.add(a, b, out=(out_tup,)) + + assert_equal(res_kw.mask, expected.mask) + assert_equal(res_kw.data, expected.data) + assert_equal(res_tup.mask, expected.mask) + assert_equal(res_tup.data, expected.data) + assert_equal(res_pos.mask, expected.mask) + assert_equal(res_pos.data, expected.data) + + +def test_astype(): + descr = [('v', int, 3), ('x', [('y', float)])] + x = array(([1, 2, 3], (1.0,)), dtype=descr) + assert_equal(x, x.astype(descr)) + + ############################################################################### if __name__ == "__main__": run_module_suite() diff -Nru python-numpy-1.13.3/numpy/ma/tests/test_deprecations.py python-numpy-1.14.5/numpy/ma/tests/test_deprecations.py --- python-numpy-1.13.3/numpy/ma/tests/test_deprecations.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/ma/tests/test_deprecations.py 2018-06-12 18:28:52.000000000 +0000 @@ -4,11 +4,11 @@ from __future__ import division, absolute_import, print_function import numpy as np -from numpy.testing import TestCase, run_module_suite, assert_warns +from numpy.testing import run_module_suite, assert_warns from numpy.ma.testutils import assert_equal from numpy.ma.core import MaskedArrayFutureWarning -class TestArgsort(TestCase): +class TestArgsort(object): """ gh-8701 """ def _test_base(self, argsort, cls): arr_0d = np.array(1).view(cls) @@ -37,7 +37,7 @@ return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray) -class TestMinimumMaximum(TestCase): +class TestMinimumMaximum(object): def test_minimum(self): assert_warns(DeprecationWarning, np.ma.minimum, np.ma.array([1, 2])) diff -Nru python-numpy-1.13.3/numpy/ma/tests/test_extras.py python-numpy-1.14.5/numpy/ma/tests/test_extras.py --- python-numpy-1.13.3/numpy/ma/tests/test_extras.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/ma/tests/test_extras.py 2018-06-12 18:28:52.000000000 +0000 @@ -14,8 +14,7 @@ import numpy as np from numpy.testing import ( - TestCase, run_module_suite, assert_warns, suppress_warnings, - assert_raises + run_module_suite, assert_warns, suppress_warnings, assert_raises, ) from numpy.ma.testutils import ( assert_, assert_array_equal, assert_equal, assert_almost_equal @@ -35,7 +34,7 @@ import numpy.ma.extras as mae -class TestGeneric(TestCase): +class TestGeneric(object): # def test_masked_all(self): # Tests masked_all @@ -140,7 +139,7 @@ assert_equal(test, None) -class TestAverage(TestCase): +class TestAverage(object): # Several tests of average. Why so many ? Good point... def test_testAverage1(self): # Test of average. @@ -149,7 +148,7 @@ assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.])) result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1) assert_equal(2.0, result) - self.assertTrue(wts == 4.0) + assert_(wts == 4.0) ott[:] = masked assert_equal(average(ott, axis=0).mask, [True]) ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) @@ -271,7 +270,7 @@ assert_almost_equal(wav1.imag, expected1.imag) -class TestConcatenator(TestCase): +class TestConcatenator(object): # Tests for mr_, the equivalent of r_ for masked arrays. def test_1d(self): @@ -281,7 +280,7 @@ m = [1, 0, 0, 0, 0] d = masked_array(b, mask=m) c = mr_[d, 0, 0, d] - self.assertTrue(isinstance(c, MaskedArray)) + assert_(isinstance(c, MaskedArray)) assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) assert_array_equal(c.mask, mr_[m, 0, 0, m]) @@ -295,12 +294,12 @@ b_2 = masked_array(a_2, mask=m_2) # append columns d = mr_['1', b_1, b_2] - self.assertTrue(d.shape == (5, 10)) + assert_(d.shape == (5, 10)) assert_array_equal(d[:, :5], b_1) assert_array_equal(d[:, 5:], b_2) assert_array_equal(d.mask, np.r_['1', m_1, m_2]) d = mr_[b_1, b_2] - self.assertTrue(d.shape == (10, 5)) + assert_(d.shape == (10, 5)) assert_array_equal(d[:5,:], b_1) assert_array_equal(d[5:,:], b_2) assert_array_equal(d.mask, np.r_[m_1, m_2]) @@ -318,7 +317,7 @@ assert_equal(type(actual.data), type(expected.data)) -class TestNotMasked(TestCase): +class TestNotMasked(object): # Tests notmasked_edges and notmasked_contiguous. def test_edges(self): @@ -367,19 +366,19 @@ assert_equal(tmp[-3], slice(0, 4, None)) # tmp = notmasked_contiguous(a, 0) - self.assertTrue(len(tmp[-1]) == 1) - self.assertTrue(tmp[-2] is None) + assert_(len(tmp[-1]) == 1) + assert_(tmp[-2] is None) assert_equal(tmp[-3], tmp[-1]) - self.assertTrue(len(tmp[0]) == 2) + assert_(len(tmp[0]) == 2) # tmp = notmasked_contiguous(a, 1) assert_equal(tmp[0][-1], slice(0, 4, None)) - self.assertTrue(tmp[1] is None) + assert_(tmp[1] is None) assert_equal(tmp[2][-1], slice(7, 8, None)) assert_equal(tmp[2][-2], slice(0, 6, None)) -class TestCompressFunctions(TestCase): +class TestCompressFunctions(object): def test_compress_nd(self): # Tests compress_nd @@ -538,12 +537,12 @@ assert_equal(mask_rowcols(x, 1,).mask, [[1, 1, 0], [1, 1, 0], [1, 1, 0]]) x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - self.assertTrue(mask_rowcols(x).all() is masked) - self.assertTrue(mask_rowcols(x, 0).all() is masked) - self.assertTrue(mask_rowcols(x, 1).all() is masked) - self.assertTrue(mask_rowcols(x).mask.all()) - self.assertTrue(mask_rowcols(x, 0).mask.all()) - self.assertTrue(mask_rowcols(x, 1).mask.all()) + assert_(mask_rowcols(x).all() is masked) + assert_(mask_rowcols(x, 0).all() is masked) + assert_(mask_rowcols(x, 1).all() is masked) + assert_(mask_rowcols(x).mask.all()) + assert_(mask_rowcols(x, 0).mask.all()) + assert_(mask_rowcols(x, 1).mask.all()) def test_dot(self): # Tests dot product @@ -632,7 +631,7 @@ assert_equal(a, res) -class TestApplyAlongAxis(TestCase): +class TestApplyAlongAxis(object): # Tests 2D functions def test_3d(self): a = arange(12.).reshape(2, 2, 3) @@ -654,20 +653,20 @@ assert_equal(xa, [[2, 5], [8, 11]]) -class TestApplyOverAxes(TestCase): +class TestApplyOverAxes(object): # Tests apply_over_axes def test_basic(self): a = arange(24).reshape(2, 3, 4) test = apply_over_axes(np.sum, a, [0, 2]) ctrl = np.array([[[60], [92], [124]]]) assert_equal(test, ctrl) - a[(a % 2).astype(np.bool)] = masked + a[(a % 2).astype(bool)] = masked test = apply_over_axes(np.sum, a, [0, 2]) ctrl = np.array([[[28], [44], [60]]]) assert_equal(test, ctrl) -class TestMedian(TestCase): +class TestMedian(object): def test_pytype(self): r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1) assert_equal(r, np.inf) @@ -737,7 +736,7 @@ for axis, over in args: try: np.ma.median(x, axis=axis, overwrite_input=over) - except: + except Exception: raise AssertionError(msg % (mask, ndmin, axis, over)) # Invalid axis values should raise exception @@ -886,7 +885,7 @@ def test_nan(self): with suppress_warnings() as w: w.record(RuntimeWarning) - for mask in (False, np.zeros(6, dtype=np.bool)): + for mask in (False, np.zeros(6, dtype=bool)): dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) dm.mask = mask @@ -1069,9 +1068,9 @@ assert_(type(np.ma.median(o.astype(object))), float) -class TestCov(TestCase): +class TestCov(object): - def setUp(self): + def setup(self): self.data = array(np.random.rand(12)) def test_1d_without_missing(self): @@ -1136,9 +1135,9 @@ x.shape[0] / frac)) -class TestCorrcoef(TestCase): +class TestCorrcoef(object): - def setUp(self): + def setup(self): self.data = array(np.random.rand(12)) self.data2 = array(np.random.rand(12)) @@ -1243,7 +1242,7 @@ control[:-1, :-1]) -class TestPolynomial(TestCase): +class TestPolynomial(object): # def test_polyfit(self): # Tests polyfit @@ -1301,13 +1300,13 @@ assert_almost_equal(a, a_) -class TestArraySetOps(TestCase): +class TestArraySetOps(object): def test_unique_onlist(self): # Test unique on list data = [1, 1, 1, 2, 2, 3] test = unique(data, return_index=True, return_inverse=True) - self.assertTrue(isinstance(test[0], MaskedArray)) + assert_(isinstance(test[0], MaskedArray)) assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) assert_equal(test[1], [0, 3, 5]) assert_equal(test[2], [0, 0, 0, 1, 1, 2]) @@ -1404,13 +1403,13 @@ test = ediff1d(x) control = array([1, 1, 1, 1], mask=[0, 0, 0, 0]) assert_equal(test, control) - self.assertTrue(isinstance(test, MaskedArray)) + assert_(isinstance(test, MaskedArray)) assert_equal(test.filled(0), control.filled(0)) assert_equal(test.mask, control.mask) # test = ediff1d(x, to_end=masked, to_begin=masked) control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1]) - self.assertTrue(isinstance(test, MaskedArray)) + assert_(isinstance(test, MaskedArray)) assert_equal(test.filled(0), control.filled(0)) assert_equal(test.mask, control.mask) @@ -1502,6 +1501,14 @@ test = union1d(a, b) control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) assert_equal(test, control) + + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = array([[0, 1, 2], [3, 4, 5]], mask=[[0, 0, 0], [0, 0, 1]]) + y = array([0, 1, 2, 3, 4], mask=[0, 0, 0, 0, 1]) + ez = array([0, 1, 2, 3, 4, 5], mask=[0, 0, 0, 0, 0, 1]) + z = union1d(x, y) + assert_equal(z, ez) # assert_array_equal([], union1d([], [])) @@ -1525,7 +1532,7 @@ assert_array_equal(setdiff1d(a, b), np.array(['c'])) -class TestShapeBase(TestCase): +class TestShapeBase(object): def test_atleast_2d(self): # Test atleast_2d diff -Nru python-numpy-1.13.3/numpy/ma/tests/test_mrecords.py python-numpy-1.14.5/numpy/ma/tests/test_mrecords.py --- python-numpy-1.13.3/numpy/ma/tests/test_mrecords.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/ma/tests/test_mrecords.py 2018-06-12 18:28:52.000000000 +0000 @@ -14,7 +14,7 @@ import numpy.ma as ma from numpy import recarray from numpy.ma import masked, nomask -from numpy.testing import TestCase, run_module_suite, temppath +from numpy.testing import run_module_suite, temppath from numpy.core.records import ( fromrecords as recfromrecords, fromarrays as recfromarrays ) @@ -28,21 +28,14 @@ ) -class TestMRecords(TestCase): - # Base test class for MaskedArrays. - def __init__(self, *args, **kwds): - TestCase.__init__(self, *args, **kwds) - self.setup() +class TestMRecords(object): - def setup(self): - # Generic setup - ilist = [1, 2, 3, 4, 5] - flist = [1.1, 2.2, 3.3, 4.4, 5.5] - slist = [b'one', b'two', b'three', b'four', b'five'] - ddtype = [('a', int), ('b', float), ('c', '|S8')] - mask = [0, 1, 0, 0, 1] - self.base = ma.array(list(zip(ilist, flist, slist)), - mask=mask, dtype=ddtype) + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = [b'one', b'two', b'three', b'four', b'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mask = [0, 1, 0, 0, 1] + base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) def test_byview(self): # Test creation by view @@ -279,16 +272,16 @@ base = self.base.copy() mbase = base.view(mrecarray) mbase.harden_mask() - self.assertTrue(mbase._hardmask) + assert_(mbase._hardmask) mbase.mask = nomask assert_equal_records(mbase._mask, base._mask) mbase.soften_mask() - self.assertTrue(not mbase._hardmask) + assert_(not mbase._hardmask) mbase.mask = nomask # So, the mask of a field is no longer set to nomask... assert_equal_records(mbase._mask, ma.make_mask_none(base.shape, base.dtype)) - self.assertTrue(ma.make_mask(mbase['b']._mask) is nomask) + assert_(ma.make_mask(mbase['b']._mask) is nomask) assert_equal(mbase['a']._mask, mbase['b']._mask) def test_pickling(self): @@ -356,11 +349,11 @@ dtype=mult.dtype)) -class TestView(TestCase): +class TestView(object): - def setUp(self): + def setup(self): (a, b) = (np.arange(10), np.random.rand(10)) - ndtype = [('a', np.float), ('b', np.float)] + ndtype = [('a', float), ('b', float)] arr = np.array(list(zip(a, b)), dtype=ndtype) mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.)) @@ -370,48 +363,42 @@ def test_view_by_itself(self): (mrec, a, b, arr) = self.data test = mrec.view() - self.assertTrue(isinstance(test, MaskedRecords)) + assert_(isinstance(test, MaskedRecords)) assert_equal_records(test, mrec) assert_equal_records(test._mask, mrec._mask) def test_view_simple_dtype(self): (mrec, a, b, arr) = self.data - ntype = (np.float, 2) + ntype = (float, 2) test = mrec.view(ntype) - self.assertTrue(isinstance(test, ma.MaskedArray)) - assert_equal(test, np.array(list(zip(a, b)), dtype=np.float)) - self.assertTrue(test[3, 1] is ma.masked) + assert_(isinstance(test, ma.MaskedArray)) + assert_equal(test, np.array(list(zip(a, b)), dtype=float)) + assert_(test[3, 1] is ma.masked) def test_view_flexible_type(self): (mrec, a, b, arr) = self.data - alttype = [('A', np.float), ('B', np.float)] + alttype = [('A', float), ('B', float)] test = mrec.view(alttype) - self.assertTrue(isinstance(test, MaskedRecords)) + assert_(isinstance(test, MaskedRecords)) assert_equal_records(test, arr.view(alttype)) - self.assertTrue(test['B'][3] is masked) + assert_(test['B'][3] is masked) assert_equal(test.dtype, np.dtype(alttype)) - self.assertTrue(test._fill_value is None) + assert_(test._fill_value is None) ############################################################################## -class TestMRecordsImport(TestCase): - # Base test class for MaskedArrays. - def __init__(self, *args, **kwds): - TestCase.__init__(self, *args, **kwds) - self.setup() +class TestMRecordsImport(object): - def setup(self): - # Generic setup - _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) - _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) - _c = ma.array([b'one', b'two', b'three'], - mask=[0, 0, 1], dtype='|S8') - ddtype = [('a', int), ('b', float), ('c', '|S8')] - mrec = fromarrays([_a, _b, _c], dtype=ddtype, - fill_value=(b'99999', b'99999.', - b'N/A')) - nrec = recfromarrays((_a._data, _b._data, _c._data), dtype=ddtype) - self.data = (mrec, nrec, ddtype) + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array([b'one', b'two', b'three'], + mask=[0, 0, 1], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(b'99999', b'99999.', + b'N/A')) + nrec = recfromarrays((_a._data, _b._data, _c._data), dtype=ddtype) + data = (mrec, nrec, ddtype) def test_fromarrays(self): _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) @@ -485,7 +472,7 @@ with open(path, 'w') as f: f.write(fcontent) mrectxt = fromtextfile(path, delimitor=',', varnames='ABCDEFG') - self.assertTrue(isinstance(mrectxt, MaskedRecords)) + assert_(isinstance(mrectxt, MaskedRecords)) assert_equal(mrectxt.F, [1, 1, 1, 1]) assert_equal(mrectxt.E._mask, [1, 1, 1, 1]) assert_equal(mrectxt.C, [1, 2, 3.e+5, -1e-10]) @@ -504,7 +491,7 @@ y = ma.masked_array( [(1, '2'), (3, '4')], mask=[(0, 0), (0, 1)], - dtype=[('a', int), ('b', np.object)]) + dtype=[('a', int), ('b', object)]) # getting an item used to fail y[1] diff -Nru python-numpy-1.13.3/numpy/ma/tests/test_old_ma.py python-numpy-1.14.5/numpy/ma/tests/test_old_ma.py --- python-numpy-1.13.3/numpy/ma/tests/test_old_ma.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/ma/tests/test_old_ma.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,7 +6,8 @@ import numpy.core.umath as umath import numpy.core.fromnumeric as fromnumeric from numpy.testing import ( - TestCase, run_module_suite, assert_, suppress_warnings) + run_module_suite, assert_, assert_raises, assert_equal, + ) from numpy.ma.testutils import assert_array_equal from numpy.ma import ( MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue, @@ -32,9 +33,9 @@ return result -class TestMa(TestCase): +class TestMa(object): - def setUp(self): + def setup(self): x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. @@ -52,16 +53,16 @@ def test_testBasic1d(self): # Test of basic array creation and properties in 1 dimension. (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - self.assertFalse(isMaskedArray(x)) - self.assertTrue(isMaskedArray(xm)) - self.assertEqual(shape(xm), s) - self.assertEqual(xm.shape, s) - self.assertEqual(xm.dtype, x.dtype) - self.assertEqual(xm.size, reduce(lambda x, y:x * y, s)) - self.assertEqual(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) - self.assertTrue(eq(xm, xf)) - self.assertTrue(eq(filled(xm, 1.e20), xf)) - self.assertTrue(eq(x, xm)) + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_equal(shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.dtype, x.dtype) + assert_equal(xm.size, reduce(lambda x, y:x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_(eq(xm, xf)) + assert_(eq(filled(xm, 1.e20), xf)) + assert_(eq(x, xm)) def test_testBasic2d(self): # Test of basic array creation and properties in 2 dimensions. @@ -73,107 +74,107 @@ ym.shape = s xf.shape = s - self.assertFalse(isMaskedArray(x)) - self.assertTrue(isMaskedArray(xm)) - self.assertEqual(shape(xm), s) - self.assertEqual(xm.shape, s) - self.assertEqual(xm.size, reduce(lambda x, y:x * y, s)) - self.assertEqual(count(xm), + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_equal(shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.size, reduce(lambda x, y:x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) - self.assertTrue(eq(xm, xf)) - self.assertTrue(eq(filled(xm, 1.e20), xf)) - self.assertTrue(eq(x, xm)) - self.setUp() + assert_(eq(xm, xf)) + assert_(eq(filled(xm, 1.e20), xf)) + assert_(eq(x, xm)) + self.setup() def test_testArithmetic(self): # Test of basic arithmetic. (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d a2d = array([[1, 2], [0, 4]]) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) - self.assertTrue(eq(a2d * a2d, a2d * a2dm)) - self.assertTrue(eq(a2d + a2d, a2d + a2dm)) - self.assertTrue(eq(a2d - a2d, a2d - a2dm)) + assert_(eq(a2d * a2d, a2d * a2dm)) + assert_(eq(a2d + a2d, a2d + a2dm)) + assert_(eq(a2d - a2d, a2d - a2dm)) for s in [(12,), (4, 3), (2, 6)]: x = x.reshape(s) y = y.reshape(s) xm = xm.reshape(s) ym = ym.reshape(s) xf = xf.reshape(s) - self.assertTrue(eq(-x, -xm)) - self.assertTrue(eq(x + y, xm + ym)) - self.assertTrue(eq(x - y, xm - ym)) - self.assertTrue(eq(x * y, xm * ym)) + assert_(eq(-x, -xm)) + assert_(eq(x + y, xm + ym)) + assert_(eq(x - y, xm - ym)) + assert_(eq(x * y, xm * ym)) with np.errstate(divide='ignore', invalid='ignore'): - self.assertTrue(eq(x / y, xm / ym)) - self.assertTrue(eq(a10 + y, a10 + ym)) - self.assertTrue(eq(a10 - y, a10 - ym)) - self.assertTrue(eq(a10 * y, a10 * ym)) + assert_(eq(x / y, xm / ym)) + assert_(eq(a10 + y, a10 + ym)) + assert_(eq(a10 - y, a10 - ym)) + assert_(eq(a10 * y, a10 * ym)) with np.errstate(divide='ignore', invalid='ignore'): - self.assertTrue(eq(a10 / y, a10 / ym)) - self.assertTrue(eq(x + a10, xm + a10)) - self.assertTrue(eq(x - a10, xm - a10)) - self.assertTrue(eq(x * a10, xm * a10)) - self.assertTrue(eq(x / a10, xm / a10)) - self.assertTrue(eq(x ** 2, xm ** 2)) - self.assertTrue(eq(abs(x) ** 2.5, abs(xm) ** 2.5)) - self.assertTrue(eq(x ** y, xm ** ym)) - self.assertTrue(eq(np.add(x, y), add(xm, ym))) - self.assertTrue(eq(np.subtract(x, y), subtract(xm, ym))) - self.assertTrue(eq(np.multiply(x, y), multiply(xm, ym))) + assert_(eq(a10 / y, a10 / ym)) + assert_(eq(x + a10, xm + a10)) + assert_(eq(x - a10, xm - a10)) + assert_(eq(x * a10, xm * a10)) + assert_(eq(x / a10, xm / a10)) + assert_(eq(x ** 2, xm ** 2)) + assert_(eq(abs(x) ** 2.5, abs(xm) ** 2.5)) + assert_(eq(x ** y, xm ** ym)) + assert_(eq(np.add(x, y), add(xm, ym))) + assert_(eq(np.subtract(x, y), subtract(xm, ym))) + assert_(eq(np.multiply(x, y), multiply(xm, ym))) with np.errstate(divide='ignore', invalid='ignore'): - self.assertTrue(eq(np.divide(x, y), divide(xm, ym))) + assert_(eq(np.divide(x, y), divide(xm, ym))) def test_testMixedArithmetic(self): na = np.array([1]) ma = array([1]) - self.assertTrue(isinstance(na + ma, MaskedArray)) - self.assertTrue(isinstance(ma + na, MaskedArray)) + assert_(isinstance(na + ma, MaskedArray)) + assert_(isinstance(ma + na, MaskedArray)) def test_testUfuncs1(self): # Test various functions such as sin, cos. (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - self.assertTrue(eq(np.cos(x), cos(xm))) - self.assertTrue(eq(np.cosh(x), cosh(xm))) - self.assertTrue(eq(np.sin(x), sin(xm))) - self.assertTrue(eq(np.sinh(x), sinh(xm))) - self.assertTrue(eq(np.tan(x), tan(xm))) - self.assertTrue(eq(np.tanh(x), tanh(xm))) + assert_(eq(np.cos(x), cos(xm))) + assert_(eq(np.cosh(x), cosh(xm))) + assert_(eq(np.sin(x), sin(xm))) + assert_(eq(np.sinh(x), sinh(xm))) + assert_(eq(np.tan(x), tan(xm))) + assert_(eq(np.tanh(x), tanh(xm))) with np.errstate(divide='ignore', invalid='ignore'): - self.assertTrue(eq(np.sqrt(abs(x)), sqrt(xm))) - self.assertTrue(eq(np.log(abs(x)), log(xm))) - self.assertTrue(eq(np.log10(abs(x)), log10(xm))) - self.assertTrue(eq(np.exp(x), exp(xm))) - self.assertTrue(eq(np.arcsin(z), arcsin(zm))) - self.assertTrue(eq(np.arccos(z), arccos(zm))) - self.assertTrue(eq(np.arctan(z), arctan(zm))) - self.assertTrue(eq(np.arctan2(x, y), arctan2(xm, ym))) - self.assertTrue(eq(np.absolute(x), absolute(xm))) - self.assertTrue(eq(np.equal(x, y), equal(xm, ym))) - self.assertTrue(eq(np.not_equal(x, y), not_equal(xm, ym))) - self.assertTrue(eq(np.less(x, y), less(xm, ym))) - self.assertTrue(eq(np.greater(x, y), greater(xm, ym))) - self.assertTrue(eq(np.less_equal(x, y), less_equal(xm, ym))) - self.assertTrue(eq(np.greater_equal(x, y), greater_equal(xm, ym))) - self.assertTrue(eq(np.conjugate(x), conjugate(xm))) - self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, ym)))) - self.assertTrue(eq(np.concatenate((x, y)), concatenate((x, y)))) - self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, y)))) - self.assertTrue(eq(np.concatenate((x, y, x)), concatenate((x, ym, x)))) + assert_(eq(np.sqrt(abs(x)), sqrt(xm))) + assert_(eq(np.log(abs(x)), log(xm))) + assert_(eq(np.log10(abs(x)), log10(xm))) + assert_(eq(np.exp(x), exp(xm))) + assert_(eq(np.arcsin(z), arcsin(zm))) + assert_(eq(np.arccos(z), arccos(zm))) + assert_(eq(np.arctan(z), arctan(zm))) + assert_(eq(np.arctan2(x, y), arctan2(xm, ym))) + assert_(eq(np.absolute(x), absolute(xm))) + assert_(eq(np.equal(x, y), equal(xm, ym))) + assert_(eq(np.not_equal(x, y), not_equal(xm, ym))) + assert_(eq(np.less(x, y), less(xm, ym))) + assert_(eq(np.greater(x, y), greater(xm, ym))) + assert_(eq(np.less_equal(x, y), less_equal(xm, ym))) + assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym))) + assert_(eq(np.conjugate(x), conjugate(xm))) + assert_(eq(np.concatenate((x, y)), concatenate((xm, ym)))) + assert_(eq(np.concatenate((x, y)), concatenate((x, y)))) + assert_(eq(np.concatenate((x, y)), concatenate((xm, y)))) + assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x)))) def test_xtestCount(self): # Test count ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - self.assertTrue(count(ott).dtype.type is np.intp) - self.assertEqual(3, count(ott)) - self.assertEqual(1, count(1)) - self.assertTrue(eq(0, array(1, mask=[1]))) + assert_(count(ott).dtype.type is np.intp) + assert_equal(3, count(ott)) + assert_equal(1, count(1)) + assert_(eq(0, array(1, mask=[1]))) ott = ott.reshape((2, 2)) - self.assertTrue(count(ott).dtype.type is np.intp) + assert_(count(ott).dtype.type is np.intp) assert_(isinstance(count(ott, 0), np.ndarray)) - self.assertTrue(count(ott).dtype.type is np.intp) - self.assertTrue(eq(3, count(ott))) + assert_(count(ott).dtype.type is np.intp) + assert_(eq(3, count(ott))) assert_(getmask(count(ott, 0)) is nomask) - self.assertTrue(eq([1, 2], count(ott, 0))) + assert_(eq([1, 2], count(ott, 0))) def test_testMinMax(self): # Test minimum and maximum. @@ -182,29 +183,29 @@ xmr = ravel(xm) # true because of careful selection of data - self.assertTrue(eq(max(xr), maximum.reduce(xmr))) - self.assertTrue(eq(min(xr), minimum.reduce(xmr))) + assert_(eq(max(xr), maximum.reduce(xmr))) + assert_(eq(min(xr), minimum.reduce(xmr))) def test_testAddSumProd(self): # Test add, sum, product. (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d - self.assertTrue(eq(np.add.reduce(x), add.reduce(x))) - self.assertTrue(eq(np.add.accumulate(x), add.accumulate(x))) - self.assertTrue(eq(4, sum(array(4), axis=0))) - self.assertTrue(eq(4, sum(array(4), axis=0))) - self.assertTrue(eq(np.sum(x, axis=0), sum(x, axis=0))) - self.assertTrue(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))) - self.assertTrue(eq(np.sum(x, 0), sum(x, 0))) - self.assertTrue(eq(np.product(x, axis=0), product(x, axis=0))) - self.assertTrue(eq(np.product(x, 0), product(x, 0))) - self.assertTrue(eq(np.product(filled(xm, 1), axis=0), + assert_(eq(np.add.reduce(x), add.reduce(x))) + assert_(eq(np.add.accumulate(x), add.accumulate(x))) + assert_(eq(4, sum(array(4), axis=0))) + assert_(eq(4, sum(array(4), axis=0))) + assert_(eq(np.sum(x, axis=0), sum(x, axis=0))) + assert_(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))) + assert_(eq(np.sum(x, 0), sum(x, 0))) + assert_(eq(np.product(x, axis=0), product(x, axis=0))) + assert_(eq(np.product(x, 0), product(x, 0))) + assert_(eq(np.product(filled(xm, 1), axis=0), product(xm, axis=0))) if len(s) > 1: - self.assertTrue(eq(np.concatenate((x, y), 1), + assert_(eq(np.concatenate((x, y), 1), concatenate((xm, ym), 1))) - self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1))) - self.assertTrue(eq(np.sum(x, 1), sum(x, 1))) - self.assertTrue(eq(np.product(x, 1), product(x, 1))) + assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1))) + assert_(eq(np.sum(x, 1), sum(x, 1))) + assert_(eq(np.product(x, 1), product(x, 1))) def test_testCI(self): # Test of conversions and indexing @@ -251,80 +252,105 @@ x2 = np.array([1, 'hello', 2, 3], object) s1 = x1[1] s2 = x2[1] - self.assertEqual(type(s2), str) - self.assertEqual(type(s1), str) - self.assertEqual(s1, s2) + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) assert_(x1[1:1].shape == (0,)) def test_testCopySize(self): # Tests of some subtle points of copying and sizing. - with suppress_warnings() as sup: - sup.filter( - np.ma.core.MaskedArrayFutureWarning, - "setting an item on a masked array which has a " - "shared mask will not copy") - - n = [0, 0, 1, 0, 0] - m = make_mask(n) - m2 = make_mask(m) - self.assertTrue(m is m2) - m3 = make_mask(m, copy=1) - self.assertTrue(m is not m3) - - x1 = np.arange(5) - y1 = array(x1, mask=m) - self.assertTrue(y1._data is not x1) - self.assertTrue(allequal(x1, y1._data)) - self.assertTrue(y1.mask is m) - - y1a = array(y1, copy=0) - self.assertTrue(y1a.mask is y1.mask) - - y2 = array(x1, mask=m, copy=0) - self.assertTrue(y2.mask is m) - self.assertTrue(y2[2] is masked) - y2[2] = 9 - self.assertTrue(y2[2] is not masked) - self.assertTrue(y2.mask is not m) - self.assertTrue(allequal(y2.mask, 0)) - - y3 = array(x1 * 1.0, mask=m) - self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype) - - x4 = arange(4) - x4[2] = masked - y4 = resize(x4, (8,)) - self.assertTrue(eq(concatenate([x4, x4]), y4)) - self.assertTrue(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])) - y5 = repeat(x4, (2, 2, 2, 2), axis=0) - self.assertTrue(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3])) - y6 = repeat(x4, 2, axis=0) - self.assertTrue(eq(y5, y6)) + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + assert_(m is m2) + m3 = make_mask(m, copy=1) + assert_(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + assert_(y1._data is not x1) + assert_(allequal(x1, y1._data)) + assert_(y1.mask is m) + + y1a = array(y1, copy=0) + assert_(y1a.mask is y1.mask) + + y2 = array(x1, mask=m3, copy=0) + assert_(y2.mask is m3) + assert_(y2[2] is masked) + y2[2] = 9 + assert_(y2[2] is not masked) + assert_(y2.mask is m3) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a.mask is not m) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + assert_(y2a.mask is not m) + assert_(allequal(y2a.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_(eq(concatenate([x4, x4]), y4)) + assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3])) + y6 = repeat(x4, 2, axis=0) + assert_(eq(y5, y6)) def test_testPut(self): # Test of put - with suppress_warnings() as sup: - sup.filter( - np.ma.core.MaskedArrayFutureWarning, - "setting an item on a masked array which has a " - "shared mask will not copy") - d = arange(5) - n = [0, 0, 0, 1, 1] - m = make_mask(n) - x = array(d, mask=m) - self.assertTrue(x[3] is masked) - self.assertTrue(x[4] is masked) - x[[1, 4]] = [10, 40] - self.assertTrue(x.mask is not m) - self.assertTrue(x[3] is masked) - self.assertTrue(x[4] is not masked) - self.assertTrue(eq(x, [0, 10, 2, -1, 40])) - - x = array(d, mask=m) - x.put([0, 1, 2], [-1, 100, 200]) - self.assertTrue(eq(x, [-1, 100, 200, 0, 0])) - self.assertTrue(x[3] is masked) - self.assertTrue(x[4] is masked) + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + m2 = m.copy() + x = array(d, mask=m) + assert_(x[3] is masked) + assert_(x[4] is masked) + x[[1, 4]] = [10, 40] + assert_(x.mask is m) + assert_(x[3] is masked) + assert_(x[4] is not masked) + assert_(eq(x, [0, 10, 2, -1, 40])) + + x = array(d, mask=m2, copy=True) + x.put([0, 1, 2], [-1, 100, 200]) + assert_(x.mask is not m2) + assert_(x[3] is masked) + assert_(x[4] is masked) + assert_(eq(x, [-1, 100, 200, 0, 0])) + + def test_testPut2(self): + # Test of put + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + x[2:4] = z + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) + + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + y = x[2:4] + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + y[:] = z + assert_(y[0] is masked) + assert_(y[1] is not masked) + assert_(eq(y, [10, 40])) + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) def test_testMaPut(self): (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d @@ -531,147 +557,147 @@ # Test of masked element xx = arange(6) xx[1] = masked - self.assertTrue(str(masked) == '--') - self.assertTrue(xx[1] is masked) - self.assertEqual(filled(xx[1], 0), 0) + assert_(str(masked) == '--') + assert_(xx[1] is masked) + assert_equal(filled(xx[1], 0), 0) def test_testAverage1(self): # Test of average. ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) - self.assertTrue(eq(2.0, average(ott, axis=0))) - self.assertTrue(eq(2.0, average(ott, weights=[1., 1., 2., 1.]))) + assert_(eq(2.0, average(ott, axis=0))) + assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.]))) result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1) - self.assertTrue(eq(2.0, result)) - self.assertTrue(wts == 4.0) + assert_(eq(2.0, result)) + assert_(wts == 4.0) ott[:] = masked - self.assertTrue(average(ott, axis=0) is masked) + assert_(average(ott, axis=0) is masked) ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) ott = ott.reshape(2, 2) ott[:, 1] = masked - self.assertTrue(eq(average(ott, axis=0), [2.0, 0.0])) - self.assertTrue(average(ott, axis=1)[0] is masked) - self.assertTrue(eq([2., 0.], average(ott, axis=0))) + assert_(eq(average(ott, axis=0), [2.0, 0.0])) + assert_(average(ott, axis=1)[0] is masked) + assert_(eq([2., 0.], average(ott, axis=0))) result, wts = average(ott, axis=0, returned=1) - self.assertTrue(eq(wts, [1., 0.])) + assert_(eq(wts, [1., 0.])) def test_testAverage2(self): # More tests of average. w1 = [0, 1, 1, 1, 1, 0] w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] x = arange(6) - self.assertTrue(allclose(average(x, axis=0), 2.5)) - self.assertTrue(allclose(average(x, axis=0, weights=w1), 2.5)) + assert_(allclose(average(x, axis=0), 2.5)) + assert_(allclose(average(x, axis=0, weights=w1), 2.5)) y = array([arange(6), 2.0 * arange(6)]) - self.assertTrue(allclose(average(y, None), + assert_(allclose(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.)) - self.assertTrue(allclose(average(y, axis=0), np.arange(6) * 3. / 2.)) - self.assertTrue(allclose(average(y, axis=1), + assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.)) + assert_(allclose(average(y, axis=1), [average(x, axis=0), average(x, axis=0)*2.0])) - self.assertTrue(allclose(average(y, None, weights=w2), 20. / 6.)) - self.assertTrue(allclose(average(y, axis=0, weights=w2), + assert_(allclose(average(y, None, weights=w2), 20. / 6.)) + assert_(allclose(average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.])) - self.assertTrue(allclose(average(y, axis=1), + assert_(allclose(average(y, axis=1), [average(x, axis=0), average(x, axis=0)*2.0])) m1 = zeros(6) m2 = [0, 0, 1, 1, 0, 0] m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] m4 = ones(6) m5 = [0, 1, 1, 1, 1, 1] - self.assertTrue(allclose(average(masked_array(x, m1), axis=0), 2.5)) - self.assertTrue(allclose(average(masked_array(x, m2), axis=0), 2.5)) - self.assertTrue(average(masked_array(x, m4), axis=0) is masked) - self.assertEqual(average(masked_array(x, m5), axis=0), 0.0) - self.assertEqual(count(average(masked_array(x, m4), axis=0)), 0) + assert_(allclose(average(masked_array(x, m1), axis=0), 2.5)) + assert_(allclose(average(masked_array(x, m2), axis=0), 2.5)) + assert_(average(masked_array(x, m4), axis=0) is masked) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) z = masked_array(y, m3) - self.assertTrue(allclose(average(z, None), 20. / 6.)) - self.assertTrue(allclose(average(z, axis=0), + assert_(allclose(average(z, None), 20. / 6.)) + assert_(allclose(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5])) - self.assertTrue(allclose(average(z, axis=1), [2.5, 5.0])) - self.assertTrue(allclose(average(z, axis=0, weights=w2), + assert_(allclose(average(z, axis=1), [2.5, 5.0])) + assert_(allclose(average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0])) a = arange(6) b = arange(6) * 3 r1, w1 = average([[a, b], [b, a]], axis=1, returned=1) - self.assertEqual(shape(r1), shape(w1)) - self.assertEqual(r1.shape, w1.shape) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1) - self.assertEqual(shape(w2), shape(r2)) + assert_equal(shape(w2), shape(r2)) r2, w2 = average(ones((2, 2, 3)), returned=1) - self.assertEqual(shape(w2), shape(r2)) + assert_equal(shape(w2), shape(r2)) r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1) - self.assertTrue(shape(w2) == shape(r2)) + assert_(shape(w2) == shape(r2)) a2d = array([[1, 2], [0, 4]], float) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) a2da = average(a2d, axis=0) - self.assertTrue(eq(a2da, [0.5, 3.0])) + assert_(eq(a2da, [0.5, 3.0])) a2dma = average(a2dm, axis=0) - self.assertTrue(eq(a2dma, [1.0, 3.0])) + assert_(eq(a2dma, [1.0, 3.0])) a2dma = average(a2dm, axis=None) - self.assertTrue(eq(a2dma, 7. / 3.)) + assert_(eq(a2dma, 7. / 3.)) a2dma = average(a2dm, axis=1) - self.assertTrue(eq(a2dma, [1.5, 4.0])) + assert_(eq(a2dma, [1.5, 4.0])) def test_testToPython(self): - self.assertEqual(1, int(array(1))) - self.assertEqual(1.0, float(array(1))) - self.assertEqual(1, int(array([[[1]]]))) - self.assertEqual(1.0, float(array([[1]]))) - self.assertRaises(TypeError, float, array([1, 1])) - self.assertRaises(ValueError, bool, array([0, 1])) - self.assertRaises(ValueError, bool, array([0, 0], mask=[0, 1])) + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + assert_raises(TypeError, float, array([1, 1])) + assert_raises(ValueError, bool, array([0, 1])) + assert_raises(ValueError, bool, array([0, 0], mask=[0, 1])) def test_testScalarArithmetic(self): xm = array(0, mask=1) #TODO FIXME: Find out what the following raises a warning in r8247 with np.errstate(divide='ignore'): - self.assertTrue((1 / array(0)).mask) - self.assertTrue((1 + xm).mask) - self.assertTrue((-xm).mask) - self.assertTrue((-xm).mask) - self.assertTrue(maximum(xm, xm).mask) - self.assertTrue(minimum(xm, xm).mask) - self.assertTrue(xm.filled().dtype is xm._data.dtype) + assert_((1 / array(0)).mask) + assert_((1 + xm).mask) + assert_((-xm).mask) + assert_((-xm).mask) + assert_(maximum(xm, xm).mask) + assert_(minimum(xm, xm).mask) + assert_(xm.filled().dtype is xm._data.dtype) x = array(0, mask=0) - self.assertTrue(x.filled() == x._data) - self.assertEqual(str(xm), str(masked_print_option)) + assert_(x.filled() == x._data) + assert_equal(str(xm), str(masked_print_option)) def test_testArrayMethods(self): a = array([1, 3, 2]) - self.assertTrue(eq(a.any(), a._data.any())) - self.assertTrue(eq(a.all(), a._data.all())) - self.assertTrue(eq(a.argmax(), a._data.argmax())) - self.assertTrue(eq(a.argmin(), a._data.argmin())) - self.assertTrue(eq(a.choose(0, 1, 2, 3, 4), + assert_(eq(a.any(), a._data.any())) + assert_(eq(a.all(), a._data.all())) + assert_(eq(a.argmax(), a._data.argmax())) + assert_(eq(a.argmin(), a._data.argmin())) + assert_(eq(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))) - self.assertTrue(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))) - self.assertTrue(eq(a.conj(), a._data.conj())) - self.assertTrue(eq(a.conjugate(), a._data.conjugate())) + assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))) + assert_(eq(a.conj(), a._data.conj())) + assert_(eq(a.conjugate(), a._data.conjugate())) m = array([[1, 2], [3, 4]]) - self.assertTrue(eq(m.diagonal(), m._data.diagonal())) - self.assertTrue(eq(a.sum(), a._data.sum())) - self.assertTrue(eq(a.take([1, 2]), a._data.take([1, 2]))) - self.assertTrue(eq(m.transpose(), m._data.transpose())) + assert_(eq(m.diagonal(), m._data.diagonal())) + assert_(eq(a.sum(), a._data.sum())) + assert_(eq(a.take([1, 2]), a._data.take([1, 2]))) + assert_(eq(m.transpose(), m._data.transpose())) def test_testArrayAttributes(self): a = array([1, 3, 2]) - self.assertEqual(a.ndim, 1) + assert_equal(a.ndim, 1) def test_testAPI(self): - self.assertFalse([m for m in dir(np.ndarray) - if m not in dir(MaskedArray) and - not m.startswith('_')]) + assert_(not [m for m in dir(np.ndarray) + if m not in dir(MaskedArray) and + not m.startswith('_')]) def test_testSingleElementSubscript(self): a = array([1, 3, 2]) b = array([1, 3, 2], mask=[1, 0, 1]) - self.assertEqual(a[0].shape, ()) - self.assertEqual(b[0].shape, ()) - self.assertEqual(b[1].shape, ()) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) -class TestUfuncs(TestCase): - def setUp(self): +class TestUfuncs(object): + def setup(self): self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) @@ -709,35 +735,35 @@ np.seterr(divide='ignore') ur = uf(*args) mr = mf(*args) - self.assertTrue(eq(ur.filled(0), mr.filled(0), f)) - self.assertTrue(eqmask(ur.mask, mr.mask)) + assert_(eq(ur.filled(0), mr.filled(0), f)) + assert_(eqmask(ur.mask, mr.mask)) def test_reduce(self): a = self.d[0] - self.assertFalse(alltrue(a, axis=0)) - self.assertTrue(sometrue(a, axis=0)) - self.assertEqual(sum(a[:3], axis=0), 0) - self.assertEqual(product(a, axis=0), 0) + assert_(not alltrue(a, axis=0)) + assert_(sometrue(a, axis=0)) + assert_equal(sum(a[:3], axis=0), 0) + assert_equal(product(a, axis=0), 0) def test_minmax(self): a = arange(1, 13).reshape(3, 4) amask = masked_where(a < 5, a) - self.assertEqual(amask.max(), a.max()) - self.assertEqual(amask.min(), 5) - self.assertTrue((amask.max(0) == a.max(0)).all()) - self.assertTrue((amask.min(0) == [5, 6, 7, 8]).all()) - self.assertTrue(amask.max(1)[0].mask) - self.assertTrue(amask.min(1)[0].mask) + assert_equal(amask.max(), a.max()) + assert_equal(amask.min(), 5) + assert_((amask.max(0) == a.max(0)).all()) + assert_((amask.min(0) == [5, 6, 7, 8]).all()) + assert_(amask.max(1)[0].mask) + assert_(amask.min(1)[0].mask) def test_nonzero(self): for t in "?bhilqpBHILQPfdgFDGO": x = array([1, 0, 2, 0], mask=[0, 0, 1, 1]) - self.assertTrue(eq(nonzero(x), [0])) + assert_(eq(nonzero(x), [0])) -class TestArrayMethods(TestCase): +class TestArrayMethods(object): - def setUp(self): + def setup(self): x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, @@ -762,63 +788,63 @@ def test_trace(self): (x, X, XX, m, mx, mX, mXX,) = self.d mXdiag = mX.diagonal() - self.assertEqual(mX.trace(), mX.diagonal().compressed().sum()) - self.assertTrue(eq(mX.trace(), + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_(eq(mX.trace(), X.trace() - sum(mXdiag.mask * X.diagonal(), axis=0))) def test_clip(self): (x, X, XX, m, mx, mX, mXX,) = self.d clipped = mx.clip(2, 8) - self.assertTrue(eq(clipped.mask, mx.mask)) - self.assertTrue(eq(clipped._data, x.clip(2, 8))) - self.assertTrue(eq(clipped._data, mx._data.clip(2, 8))) + assert_(eq(clipped.mask, mx.mask)) + assert_(eq(clipped._data, x.clip(2, 8))) + assert_(eq(clipped._data, mx._data.clip(2, 8))) def test_ptp(self): (x, X, XX, m, mx, mX, mXX,) = self.d (n, m) = X.shape - self.assertEqual(mx.ptp(), mx.compressed().ptp()) + assert_equal(mx.ptp(), mx.compressed().ptp()) rows = np.zeros(n, np.float_) cols = np.zeros(m, np.float_) for k in range(m): cols[k] = mX[:, k].compressed().ptp() for k in range(n): rows[k] = mX[k].compressed().ptp() - self.assertTrue(eq(mX.ptp(0), cols)) - self.assertTrue(eq(mX.ptp(1), rows)) + assert_(eq(mX.ptp(0), cols)) + assert_(eq(mX.ptp(1), rows)) def test_swapaxes(self): (x, X, XX, m, mx, mX, mXX,) = self.d mXswapped = mX.swapaxes(0, 1) - self.assertTrue(eq(mXswapped[-1], mX[:, -1])) + assert_(eq(mXswapped[-1], mX[:, -1])) mXXswapped = mXX.swapaxes(0, 2) - self.assertEqual(mXXswapped.shape, (2, 2, 3, 3)) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) def test_cumprod(self): (x, X, XX, m, mx, mX, mXX,) = self.d mXcp = mX.cumprod(0) - self.assertTrue(eq(mXcp._data, mX.filled(1).cumprod(0))) + assert_(eq(mXcp._data, mX.filled(1).cumprod(0))) mXcp = mX.cumprod(1) - self.assertTrue(eq(mXcp._data, mX.filled(1).cumprod(1))) + assert_(eq(mXcp._data, mX.filled(1).cumprod(1))) def test_cumsum(self): (x, X, XX, m, mx, mX, mXX,) = self.d mXcp = mX.cumsum(0) - self.assertTrue(eq(mXcp._data, mX.filled(0).cumsum(0))) + assert_(eq(mXcp._data, mX.filled(0).cumsum(0))) mXcp = mX.cumsum(1) - self.assertTrue(eq(mXcp._data, mX.filled(0).cumsum(1))) + assert_(eq(mXcp._data, mX.filled(0).cumsum(1))) def test_varstd(self): (x, X, XX, m, mx, mX, mXX,) = self.d - self.assertTrue(eq(mX.var(axis=None), mX.compressed().var())) - self.assertTrue(eq(mX.std(axis=None), mX.compressed().std())) - self.assertTrue(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) - self.assertTrue(eq(mX.var().shape, X.var().shape)) + assert_(eq(mX.var(axis=None), mX.compressed().var())) + assert_(eq(mX.std(axis=None), mX.compressed().std())) + assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) + assert_(eq(mX.var().shape, X.var().shape)) (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) for k in range(6): - self.assertTrue(eq(mXvar1[k], mX[k].compressed().var())) - self.assertTrue(eq(mXvar0[k], mX[:, k].compressed().var())) - self.assertTrue(eq(np.sqrt(mXvar0[k]), + assert_(eq(mXvar1[k], mX[k].compressed().var())) + assert_(eq(mXvar0[k], mX[:, k].compressed().var())) + assert_(eq(np.sqrt(mXvar0[k]), mX[:, k].compressed().std())) diff -Nru python-numpy-1.13.3/numpy/ma/tests/test_regression.py python-numpy-1.14.5/numpy/ma/tests/test_regression.py --- python-numpy-1.13.3/numpy/ma/tests/test_regression.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/ma/tests/test_regression.py 2018-06-12 18:28:52.000000000 +0000 @@ -3,25 +3,24 @@ import warnings import numpy as np -from numpy.testing import (assert_, TestCase, assert_array_equal, - assert_allclose, run_module_suite, - suppress_warnings) +from numpy.testing import ( + assert_, assert_array_equal, assert_allclose, run_module_suite, + suppress_warnings + ) -rlevel = 1 - -class TestRegression(TestCase): - def test_masked_array_create(self,level=rlevel): +class TestRegression(object): + def test_masked_array_create(self): # Ticket #17 x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6], mask=[0, 0, 0, 1, 1, 1, 0, 0]) assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]]) - def test_masked_array(self,level=rlevel): + def test_masked_array(self): # Ticket #61 np.ma.array(1, mask=[1]) - def test_mem_masked_where(self,level=rlevel): + def test_mem_masked_where(self): # Ticket #62 from numpy.ma import masked_where, MaskType a = np.zeros((1, 1)) @@ -29,7 +28,7 @@ c = masked_where(b, a) a-c - def test_masked_array_multiply(self,level=rlevel): + def test_masked_array_multiply(self): # Ticket #254 a = np.ma.zeros((4, 1)) a[2, 0] = np.ma.masked @@ -37,7 +36,7 @@ a*b b*a - def test_masked_array_repeat(self, level=rlevel): + def test_masked_array_repeat(self): # Ticket #271 np.ma.array([1], mask=False).repeat(10) diff -Nru python-numpy-1.13.3/numpy/ma/tests/test_subclassing.py python-numpy-1.14.5/numpy/ma/tests/test_subclassing.py --- python-numpy-1.13.3/numpy/ma/tests/test_subclassing.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/ma/tests/test_subclassing.py 2018-06-12 18:28:52.000000000 +0000 @@ -9,7 +9,7 @@ from __future__ import division, absolute_import, print_function import numpy as np -from numpy.testing import TestCase, run_module_suite, assert_raises, dec +from numpy.testing import run_module_suite, assert_, assert_raises, dec from numpy.ma.testutils import assert_equal from numpy.ma.core import ( array, arange, masked, MaskedArray, masked_array, log, add, hypot, @@ -17,6 +17,9 @@ ) # from numpy.ma.core import ( +def assert_startswith(a, b): + # produces a better error message than assert_(a.startswith(b)) + assert_equal(a[:len(b)], b) class SubArray(np.ndarray): # Defines a generic np.ndarray subclass, that stores some metadata @@ -172,10 +175,10 @@ return obj -class TestSubclassing(TestCase): +class TestSubclassing(object): # Test suite for masked subclasses of ndarray. - def setUp(self): + def setup(self): x = np.arange(5, dtype='float') mx = mmatrix(x, mask=[0, 1, 0, 0, 0]) self.data = (x, mx) @@ -186,41 +189,41 @@ m = [0, 0, 1, 0, 0] xsub = SubArray(x) xmsub = masked_array(xsub, mask=m) - self.assertTrue(isinstance(xmsub, MaskedArray)) + assert_(isinstance(xmsub, MaskedArray)) assert_equal(xmsub._data, xsub) - self.assertTrue(isinstance(xmsub._data, SubArray)) + assert_(isinstance(xmsub._data, SubArray)) def test_maskedarray_subclassing(self): # Tests subclassing MaskedArray (x, mx) = self.data - self.assertTrue(isinstance(mx._data, np.matrix)) + assert_(isinstance(mx._data, np.matrix)) def test_masked_unary_operations(self): # Tests masked_unary_operation (x, mx) = self.data with np.errstate(divide='ignore'): - self.assertTrue(isinstance(log(mx), mmatrix)) + assert_(isinstance(log(mx), mmatrix)) assert_equal(log(x), np.log(x)) def test_masked_binary_operations(self): # Tests masked_binary_operation (x, mx) = self.data # Result should be a mmatrix - self.assertTrue(isinstance(add(mx, mx), mmatrix)) - self.assertTrue(isinstance(add(mx, x), mmatrix)) + assert_(isinstance(add(mx, mx), mmatrix)) + assert_(isinstance(add(mx, x), mmatrix)) # Result should work assert_equal(add(mx, x), mx+x) - self.assertTrue(isinstance(add(mx, mx)._data, np.matrix)) - self.assertTrue(isinstance(add.outer(mx, mx), mmatrix)) - self.assertTrue(isinstance(hypot(mx, mx), mmatrix)) - self.assertTrue(isinstance(hypot(mx, x), mmatrix)) + assert_(isinstance(add(mx, mx)._data, np.matrix)) + assert_(isinstance(add.outer(mx, mx), mmatrix)) + assert_(isinstance(hypot(mx, mx), mmatrix)) + assert_(isinstance(hypot(mx, x), mmatrix)) def test_masked_binary_operations2(self): # Tests domained_masked_binary_operation (x, mx) = self.data xmx = masked_array(mx.data.__array__(), mask=mx.mask) - self.assertTrue(isinstance(divide(mx, mx), mmatrix)) - self.assertTrue(isinstance(divide(mx, x), mmatrix)) + assert_(isinstance(divide(mx, mx), mmatrix)) + assert_(isinstance(divide(mx, x), mmatrix)) assert_equal(divide(mx, mx), divide(xmx, xmx)) def test_attributepropagation(self): @@ -229,22 +232,22 @@ ym = msubarray(x) # z = (my+1) - self.assertTrue(isinstance(z, MaskedArray)) - self.assertTrue(not isinstance(z, MSubArray)) - self.assertTrue(isinstance(z._data, SubArray)) + assert_(isinstance(z, MaskedArray)) + assert_(not isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) assert_equal(z._data.info, {}) # z = (ym+1) - self.assertTrue(isinstance(z, MaskedArray)) - self.assertTrue(isinstance(z, MSubArray)) - self.assertTrue(isinstance(z._data, SubArray)) - self.assertTrue(z._data.info['added'] > 0) + assert_(isinstance(z, MaskedArray)) + assert_(isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) + assert_(z._data.info['added'] > 0) # Test that inplace methods from data get used (gh-4617) ym += 1 - self.assertTrue(isinstance(ym, MaskedArray)) - self.assertTrue(isinstance(ym, MSubArray)) - self.assertTrue(isinstance(ym._data, SubArray)) - self.assertTrue(ym._data.info['iadded'] > 0) + assert_(isinstance(ym, MaskedArray)) + assert_(isinstance(ym, MSubArray)) + assert_(isinstance(ym._data, SubArray)) + assert_(ym._data.info['iadded'] > 0) # ym._set_mask([1, 0, 0, 0, 1]) assert_equal(ym._mask, [1, 0, 0, 0, 1]) @@ -253,7 +256,7 @@ # xsub = subarray(x, info={'name':'x'}) mxsub = masked_array(xsub) - self.assertTrue(hasattr(mxsub, 'info')) + assert_(hasattr(mxsub, 'info')) assert_equal(mxsub.info, xsub.info) def test_subclasspreservation(self): @@ -264,22 +267,22 @@ xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) # mxsub = masked_array(xsub, subok=False) - self.assertTrue(not isinstance(mxsub, MSubArray)) - self.assertTrue(isinstance(mxsub, MaskedArray)) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) assert_equal(mxsub._mask, m) # mxsub = asarray(xsub) - self.assertTrue(not isinstance(mxsub, MSubArray)) - self.assertTrue(isinstance(mxsub, MaskedArray)) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) assert_equal(mxsub._mask, m) # mxsub = masked_array(xsub, subok=True) - self.assertTrue(isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MSubArray)) assert_equal(mxsub.info, xsub.info) assert_equal(mxsub._mask, xsub._mask) # mxsub = asanyarray(xsub) - self.assertTrue(isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MSubArray)) assert_equal(mxsub.info, xsub.info) assert_equal(mxsub._mask, m) @@ -290,21 +293,21 @@ mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) # getter should return a ComplicatedSubArray, even for single item # first check we wrote ComplicatedSubArray correctly - self.assertTrue(isinstance(xcsub[1], ComplicatedSubArray)) - self.assertTrue(isinstance(xcsub[1,...], ComplicatedSubArray)) - self.assertTrue(isinstance(xcsub[1:4], ComplicatedSubArray)) + assert_(isinstance(xcsub[1], ComplicatedSubArray)) + assert_(isinstance(xcsub[1,...], ComplicatedSubArray)) + assert_(isinstance(xcsub[1:4], ComplicatedSubArray)) # now that it propagates inside the MaskedArray - self.assertTrue(isinstance(mxcsub[1], ComplicatedSubArray)) - self.assertTrue(isinstance(mxcsub[1,...].data, ComplicatedSubArray)) - self.assertTrue(mxcsub[0] is masked) - self.assertTrue(isinstance(mxcsub[0,...].data, ComplicatedSubArray)) - self.assertTrue(isinstance(mxcsub[1:4].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub[1,...].data, ComplicatedSubArray)) + assert_(mxcsub[0] is masked) + assert_(isinstance(mxcsub[0,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray)) # also for flattened version (which goes via MaskedIterator) - self.assertTrue(isinstance(mxcsub.flat[1].data, ComplicatedSubArray)) - self.assertTrue(mxcsub.flat[0] is masked) - self.assertTrue(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray)) + assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray)) + assert_(mxcsub.flat[0] is masked) + assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray)) # setter should only work with ComplicatedSubArray input # first check we wrote ComplicatedSubArray correctly @@ -325,22 +328,22 @@ xcsub = ComplicatedSubArray(x) mxcsub_nomask = masked_array(xcsub) - self.assertTrue(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray)) - self.assertTrue(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray)) - self.assertTrue(isinstance(mxcsub_nomask[1], ComplicatedSubArray)) - self.assertTrue(isinstance(mxcsub_nomask[0], ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray)) def test_subclass_repr(self): """test that repr uses the name of the subclass and 'array' for np.ndarray""" x = np.arange(5) mx = masked_array(x, mask=[True, False, True, False, False]) - self.assertTrue(repr(mx).startswith('masked_array')) + assert_startswith(repr(mx), 'masked_array') xsub = SubArray(x) mxsub = masked_array(xsub, mask=[True, False, True, False, False]) - self.assertTrue(repr(mxsub).startswith( - 'masked_{0}(data = [-- 1 -- 3 4]'.format(SubArray.__name__))) + assert_startswith(repr(mxsub), + 'masked_{0}(data=[--, 1, --, 3, 4]'.format(SubArray.__name__)) def test_subclass_str(self): """test str with subclass that has overridden str, setitem""" @@ -348,13 +351,13 @@ x = np.arange(5) xsub = SubArray(x) mxsub = masked_array(xsub, mask=[True, False, True, False, False]) - self.assertTrue(str(mxsub) == '[-- 1 -- 3 4]') + assert_equal(str(mxsub), '[-- 1 -- 3 4]') xcsub = ComplicatedSubArray(x) assert_raises(ValueError, xcsub.__setitem__, 0, np.ma.core.masked_print_option) mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) - self.assertTrue(str(mxcsub) == 'myprefix [-- 1 -- 3 4] mypostfix') + assert_equal(str(mxcsub), 'myprefix [-- 1 -- 3 4] mypostfix') def test_pure_subclass_info_preservation(self): # Test that ufuncs and methods conserve extra information consistently; @@ -362,11 +365,11 @@ arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6]) arr2 = SubMaskedArray(data=[0,1,2,3,4,5]) diff1 = np.subtract(arr1, arr2) - self.assertTrue('info' in diff1._optinfo) - self.assertTrue(diff1._optinfo['info'] == 'test') + assert_('info' in diff1._optinfo) + assert_(diff1._optinfo['info'] == 'test') diff2 = arr1 - arr2 - self.assertTrue('info' in diff2._optinfo) - self.assertTrue(diff2._optinfo['info'] == 'test') + assert_('info' in diff2._optinfo) + assert_(diff2._optinfo['info'] == 'test') ############################################################################### diff -Nru python-numpy-1.13.3/numpy/ma/testutils.py python-numpy-1.14.5/numpy/ma/testutils.py --- python-numpy-1.13.3/numpy/ma/testutils.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/ma/testutils.py 2018-06-12 18:28:52.000000000 +0000 @@ -12,11 +12,11 @@ import numpy as np from numpy import ndarray, float_ import numpy.core.umath as umath +import numpy.testing from numpy.testing import ( TestCase, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_raises, build_err_msg, run_module_suite ) -import numpy.testing.utils as utils from .core import mask_or, getmask, masked_array, nomask, masked, filled __all__masked = [ @@ -211,11 +211,11 @@ header=header, names=('x', 'y')) raise ValueError(msg) # OK, now run the basic tests on filled versions - return utils.assert_array_compare(comparison, - x.filled(fill_value), - y.filled(fill_value), - err_msg=err_msg, - verbose=verbose, header=header) + return np.testing.assert_array_compare(comparison, + x.filled(fill_value), + y.filled(fill_value), + err_msg=err_msg, + verbose=verbose, header=header) def assert_array_equal(x, y, err_msg='', verbose=True): diff -Nru python-numpy-1.13.3/numpy/ma/timer_comparison.py python-numpy-1.14.5/numpy/ma/timer_comparison.py --- python-numpy-1.13.3/numpy/ma/timer_comparison.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/ma/timer_comparison.py 2018-06-12 17:31:56.000000000 +0000 @@ -7,7 +7,7 @@ from numpy import float_ import numpy.core.fromnumeric as fromnumeric -from numpy.testing.utils import build_err_msg +from numpy.testing import build_err_msg # Fixme: this does not look right. np.seterr(all='ignore') diff -Nru python-numpy-1.13.3/numpy/matlib.py python-numpy-1.14.5/numpy/matlib.py --- python-numpy-1.13.3/numpy/matlib.py 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/numpy/matlib.py 2018-06-12 17:31:56.000000000 +0000 @@ -173,7 +173,7 @@ b.flat = a return b -def eye(n,M=None, k=0, dtype=float): +def eye(n,M=None, k=0, dtype=float, order='C'): """ Return a matrix with ones on the diagonal and zeros elsewhere. @@ -189,6 +189,11 @@ and a negative value to a lower diagonal. dtype : dtype, optional Data-type of the returned matrix. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + + .. versionadded:: 1.14.0 Returns ------- @@ -210,7 +215,7 @@ [ 0., 0., 0.]]) """ - return asmatrix(np.eye(n, M, k, dtype)) + return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order)) def rand(*args): """ diff -Nru python-numpy-1.13.3/numpy/matrixlib/defmatrix.py python-numpy-1.14.5/numpy/matrixlib/defmatrix.py --- python-numpy-1.13.3/numpy/matrixlib/defmatrix.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/matrixlib/defmatrix.py 2018-06-12 18:28:52.000000000 +0000 @@ -137,7 +137,7 @@ M = asanyarray(M) if M.ndim != 2 or M.shape[0] != M.shape[1]: raise ValueError("input must be a square array") - if not issubdtype(type(n), int): + if not issubdtype(type(n), N.integer): raise TypeError("exponent must be an integer") from numpy.linalg import inv @@ -295,7 +295,7 @@ # Determine when we should have a column array try: n = len(index) - except: + except Exception: n = 0 if n > 1 and isscalar(index[1]): out.shape = (sh, 1) @@ -328,19 +328,6 @@ def __rpow__(self, other): return NotImplemented - def __repr__(self): - s = repr(self.__array__()).replace('array', 'matrix') - # now, 'matrix' has 6 letters, and 'array' 5, so the columns don't - # line up anymore. We need to add a space. - l = s.splitlines() - for i in range(1, len(l)): - if l[i]: - l[i] = ' ' + l[i] - return '\n'.join(l) - - def __str__(self): - return str(self.__array__()) - def _align(self, axis): """A convenience function for operations that need to preserve axis orientation. @@ -699,15 +686,15 @@ >>> (x == y) matrix([[ True, True, True, True], [False, False, False, False], - [False, False, False, False]], dtype=bool) + [False, False, False, False]]) >>> (x == y).all() False >>> (x == y).all(0) - matrix([[False, False, False, False]], dtype=bool) + matrix([[False, False, False, False]]) >>> (x == y).all(1) matrix([[ True], [False], - [False]], dtype=bool) + [False]]) """ return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis) diff -Nru python-numpy-1.13.3/numpy/matrixlib/__init__.py python-numpy-1.14.5/numpy/matrixlib/__init__.py --- python-numpy-1.13.3/numpy/matrixlib/__init__.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/matrixlib/__init__.py 2018-06-12 18:28:52.000000000 +0000 @@ -7,6 +7,6 @@ __all__ = defmatrix.__all__ -from numpy.testing.nosetester import _numpy_tester +from numpy.testing import _numpy_tester test = _numpy_tester().test bench = _numpy_tester().bench diff -Nru python-numpy-1.13.3/numpy/matrixlib/tests/test_defmatrix.py python-numpy-1.14.5/numpy/matrixlib/tests/test_defmatrix.py --- python-numpy-1.13.3/numpy/matrixlib/tests/test_defmatrix.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/matrixlib/tests/test_defmatrix.py 2018-06-12 18:28:52.000000000 +0000 @@ -5,13 +5,13 @@ import numpy as np from numpy import matrix, asmatrix, bmat from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal, assert_almost_equal, + run_module_suite, assert_, assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_raises ) from numpy.matrixlib.defmatrix import matrix_power from numpy.matrixlib import mat -class TestCtor(TestCase): +class TestCtor(object): def test_basic(self): A = np.array([[1, 2], [3, 4]]) mA = matrix(A) @@ -58,7 +58,7 @@ assert_(np.all(b2 == mixresult)) -class TestProperties(TestCase): +class TestProperties(object): def test_sum(self): """Test whether matrix.sum(axis=1) preserves orientation. Fails in NumPy <= 0.9.6.2127. @@ -191,7 +191,7 @@ B = matrix([[True], [True], [False]]) assert_array_equal(A, B) -class TestCasting(TestCase): +class TestCasting(object): def test_basic(self): A = np.arange(100).reshape(10, 10) mA = matrix(A) @@ -210,7 +210,7 @@ assert_(np.all(mA != mB)) -class TestAlgebra(TestCase): +class TestAlgebra(object): def test_basic(self): import numpy.linalg as linalg @@ -249,6 +249,12 @@ assert_array_almost_equal(m4, np.dot(m2, m2)) assert_array_almost_equal(np.dot(mi, m), np.eye(2)) + def test_scalar_type_pow(self): + m = matrix([[1, 2], [3, 4]]) + for scalar_t in [np.int8, np.uint8]: + two = scalar_t(2) + assert_array_almost_equal(m ** 2, m ** two) + def test_notimplemented(self): '''Check that 'not implemented' operations produce a failure.''' A = matrix([[1., 2.], @@ -271,7 +277,7 @@ self.fail("matrix.__mul__ with non-numeric object doesn't raise" "a TypeError") -class TestMatrixReturn(TestCase): +class TestMatrixReturn(object): def test_instance_methods(self): a = matrix([1.0], dtype='f8') methodargs = { @@ -313,7 +319,7 @@ assert_(type(d) is np.ndarray) -class TestIndexing(TestCase): +class TestIndexing(object): def test_basic(self): x = asmatrix(np.zeros((3, 2), float)) y = np.zeros((3, 1), float) @@ -322,9 +328,8 @@ assert_equal(x, [[0, 1], [0, 0], [0, 0]]) -class TestNewScalarIndexing(TestCase): - def setUp(self): - self.a = matrix([[1, 2], [3, 4]]) +class TestNewScalarIndexing(object): + a = matrix([[1, 2], [3, 4]]) def test_dimesions(self): a = self.a @@ -390,7 +395,7 @@ assert_array_equal(x[[2, 1, 0],:], x[::-1,:]) -class TestPower(TestCase): +class TestPower(object): def test_returntype(self): a = np.array([[0, 1], [0, 0]]) assert_(type(matrix_power(a, 2)) is np.ndarray) @@ -401,10 +406,10 @@ assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]]) -class TestShape(TestCase): - def setUp(self): - self.a = np.array([[1], [2]]) - self.m = matrix([[1], [2]]) +class TestShape(object): + + a = np.array([[1], [2]]) + m = matrix([[1], [2]]) def test_shape(self): assert_equal(self.a.shape, (2, 1)) diff -Nru python-numpy-1.13.3/numpy/matrixlib/tests/test_multiarray.py python-numpy-1.14.5/numpy/matrixlib/tests/test_multiarray.py --- python-numpy-1.13.3/numpy/matrixlib/tests/test_multiarray.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/matrixlib/tests/test_multiarray.py 2018-06-12 18:28:52.000000000 +0000 @@ -2,10 +2,10 @@ import numpy as np from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_equal, assert_array_equal + run_module_suite, assert_, assert_equal, assert_array_equal ) -class TestView(TestCase): +class TestView(object): def test_type(self): x = np.array([1, 2, 3]) assert_(isinstance(x.view(np.matrix), np.matrix)) diff -Nru python-numpy-1.13.3/numpy/matrixlib/tests/test_numeric.py python-numpy-1.14.5/numpy/matrixlib/tests/test_numeric.py --- python-numpy-1.13.3/numpy/matrixlib/tests/test_numeric.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/matrixlib/tests/test_numeric.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,9 +1,9 @@ from __future__ import division, absolute_import, print_function import numpy as np -from numpy.testing import assert_equal, TestCase, run_module_suite +from numpy.testing import assert_equal, run_module_suite -class TestDot(TestCase): +class TestDot(object): def test_matscalar(self): b1 = np.matrix(np.ones((3, 3), dtype=complex)) assert_equal(b1*1.0, b1) diff -Nru python-numpy-1.13.3/numpy/matrixlib/tests/test_regression.py python-numpy-1.14.5/numpy/matrixlib/tests/test_regression.py --- python-numpy-1.13.3/numpy/matrixlib/tests/test_regression.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/matrixlib/tests/test_regression.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,17 +1,18 @@ from __future__ import division, absolute_import, print_function import numpy as np -from numpy.testing import TestCase, run_module_suite, assert_, assert_equal +from numpy.testing import ( + run_module_suite, assert_, assert_equal, assert_raises + ) -rlevel = 1 -class TestRegression(TestCase): - def test_kron_matrix(self, level=rlevel): +class TestRegression(object): + def test_kron_matrix(self): # Ticket #71 x = np.matrix('[1 0; 1 0]') assert_equal(type(np.kron(x, x)), type(x)) - def test_matrix_properties(self,level=rlevel): + def test_matrix_properties(self): # Ticket #125 a = np.matrix([1.0], dtype=float) assert_(type(a.real) is np.matrix) @@ -20,18 +21,18 @@ assert_(type(c) is np.ndarray) assert_(type(d) is np.ndarray) - def test_matrix_multiply_by_1d_vector(self, level=rlevel): + def test_matrix_multiply_by_1d_vector(self): # Ticket #473 def mul(): np.mat(np.eye(2))*np.ones(2) - self.assertRaises(ValueError, mul) + assert_raises(ValueError, mul) - def test_matrix_std_argmax(self,level=rlevel): + def test_matrix_std_argmax(self): # Ticket #83 x = np.asmatrix(np.random.uniform(0, 1, (3, 3))) - self.assertEqual(x.std().shape, ()) - self.assertEqual(x.argmax().shape, ()) + assert_equal(x.std().shape, ()) + assert_equal(x.argmax().shape, ()) if __name__ == "__main__": run_module_suite() diff -Nru python-numpy-1.13.3/numpy/polynomial/chebyshev.py python-numpy-1.14.5/numpy/polynomial/chebyshev.py --- python-numpy-1.13.3/numpy/polynomial/chebyshev.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/chebyshev.py 2018-06-12 17:31:56.000000000 +0000 @@ -52,6 +52,7 @@ - `chebline` -- Chebyshev series representing given straight line. - `cheb2poly` -- convert a Chebyshev series to a polynomial. - `poly2cheb` -- convert a polynomial to a Chebyshev series. +- `chebinterpolate` -- interpolate a function at the Chebyshev points. Classes ------- @@ -87,6 +88,7 @@ """ from __future__ import division, absolute_import, print_function +import numbers import warnings import numpy as np import numpy.linalg as la @@ -102,7 +104,7 @@ 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1', 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d', 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion', - 'chebgauss', 'chebweight'] + 'chebgauss', 'chebweight', 'chebinterpolate'] chebtrim = pu.trimcoef @@ -359,10 +361,10 @@ >>> from numpy import polynomial as P >>> p = P.Polynomial(range(4)) >>> p - Polynomial([ 0., 1., 2., 3.], [-1., 1.]) + Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) >>> c = p.convert(kind=P.Chebyshev) >>> c - Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.]) + Chebyshev([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) >>> P.poly2cheb(range(4)) array([ 1. , 3.25, 1. , 0.75]) @@ -942,7 +944,7 @@ if cnt == 0: return c - c = np.rollaxis(c, iaxis) + c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: c = c[:1]*0 @@ -958,7 +960,7 @@ der[1] = 4*c[2] der[0] = c[1] c = der - c = np.rollaxis(c, 0, iaxis + 1) + c = np.moveaxis(c, 0, iaxis) return c @@ -1010,8 +1012,8 @@ Raises ------ ValueError - If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or - ``np.isscalar(scl) == False``. + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. See Also -------- @@ -1060,6 +1062,10 @@ raise ValueError("The order of integration must be non-negative") if len(k) > cnt: raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) @@ -1067,7 +1073,7 @@ if cnt == 0: return c - c = np.rollaxis(c, iaxis) + c = np.moveaxis(c, iaxis, 0) k = list(k) + [0]*(cnt - len(k)) for i in range(cnt): n = len(c) @@ -1086,7 +1092,7 @@ tmp[j - 1] -= c[j]/(2*(j - 1)) tmp[0] += k[i] - chebval(lbnd, tmp) c = tmp - c = np.rollaxis(c, 0, iaxis + 1) + c = np.moveaxis(c, 0, iaxis) return c @@ -1225,7 +1231,7 @@ """ try: x, y = np.array((x, y), copy=0) - except: + except Exception: raise ValueError('x, y are incompatible') c = chebval(x, c) @@ -1338,7 +1344,7 @@ """ try: x, y, z = np.array((x, y, z), copy=0) - except: + except Exception: raise ValueError('x, y, z are incompatible') c = chebval(x, c) @@ -1458,7 +1464,7 @@ v[1] = x for i in range(2, ideg + 1): v[i] = v[i-1]*x2 - v[i-2] - return np.rollaxis(v, 0, v.ndim) + return np.moveaxis(v, 0, -1) def chebvander2d(x, y, deg): @@ -1613,7 +1619,7 @@ points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. deg : int or 1-D array_like - Degree(s) of the fitting polynomials. If `deg` is a single integer + Degree(s) of the fitting polynomials. If `deg` is a single integer, all terms up to and including the `deg`'th term are included in the fit. For NumPy versions >= 1.11.0 a list of integers specifying the degrees of the terms to include may be used instead. @@ -1886,6 +1892,73 @@ return r +def chebinterpolate(func, deg, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the Chebyshev series that interpolates `func` at the Chebyshev + points of the first kind in the interval [-1, 1]. The interpolating + series tends to a minmax approximation to `func` with increasing `deg` + if the function is continuous in the interval. + + .. versionadded:: 1.14.0 + + Parameters + ---------- + func : function + The function to be approximated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial + args : tuple, optional + Extra arguments to be used in the function call. Default is no extra + arguments. + + Returns + ------- + coef : ndarray, shape (deg + 1,) + Chebyshev coefficients of the interpolating series ordered from low to + high. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebfromfunction(lambda x: np.tanh(x) + 0.5, 8) + array([ 5.00000000e-01, 8.11675684e-01, -9.86864911e-17, + -5.42457905e-02, -2.71387850e-16, 4.51658839e-03, + 2.46716228e-17, -3.79694221e-04, -3.26899002e-16]) + + Notes + ----- + + The Chebyshev polynomials used in the interpolation are orthogonal when + sampled at the Chebyshev points of the first kind. If it is desired to + constrain some of the coefficients they can simply be set to the desired + value after the interpolation, no new interpolation or fit is needed. This + is especially useful if it is known apriori that some of coefficients are + zero. For instance, if the function is even then the coefficients of the + terms of odd degree in the result can be set to zero. + + """ + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 0 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int") + if deg < 0: + raise ValueError("expected deg >= 0") + + order = deg + 1 + xcheb = chebpts1(order) + yfunc = func(xcheb, *args) + m = chebvander(xcheb, deg) + c = np.dot(m.T, yfunc) + c[0] /= order + c[1:] /= 0.5*order + + return c + + def chebgauss(deg): """ Gauss-Chebyshev quadrature. @@ -2069,6 +2142,48 @@ _roots = staticmethod(chebroots) _fromroots = staticmethod(chebfromroots) + @classmethod + def interpolate(cls, func, deg, domain=None, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the series that interpolates `func` at the Chebyshev points of + the first kind scaled and shifted to the `domain`. The resulting series + tends to a minmax approximation of `func` when the function is + continuous in the domain. + + .. versionadded:: 1.14.0 + + Parameters + ---------- + func : function + The function to be interpolated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial. + domain : {None, [beg, end]}, optional + Domain over which `func` is interpolated. The default is None, in + which case the domain is [-1, 1]. + args : tuple, optional + Extra arguments to be used in the function call. Default is no + extra arguments. + + Returns + ------- + polynomial : Chebyshev instance + Interpolating Chebyshev instance. + + Notes + ----- + See `numpy.polynomial.chebfromfunction` for more details. + + """ + if domain is None: + domain = cls.domain + xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args) + coef = chebinterpolate(xfunc, deg) + return cls(coef, domain=domain) + # Virtual properties nickname = 'cheb' domain = np.array(chebdomain) diff -Nru python-numpy-1.13.3/numpy/polynomial/hermite_e.py python-numpy-1.14.5/numpy/polynomial/hermite_e.py --- python-numpy-1.13.3/numpy/polynomial/hermite_e.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/hermite_e.py 2018-06-12 17:31:56.000000000 +0000 @@ -705,7 +705,7 @@ if cnt == 0: return c - c = np.rollaxis(c, iaxis) + c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: return c[:1]*0 @@ -717,7 +717,7 @@ for j in range(n, 0, -1): der[j - 1] = j*c[j] c = der - c = np.rollaxis(c, 0, iaxis + 1) + c = np.moveaxis(c, 0, iaxis) return c @@ -769,8 +769,8 @@ Raises ------ ValueError - If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or - ``np.isscalar(scl) == False``. + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. See Also -------- @@ -817,6 +817,10 @@ raise ValueError("The order of integration must be non-negative") if len(k) > cnt: raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) @@ -824,7 +828,7 @@ if cnt == 0: return c - c = np.rollaxis(c, iaxis) + c = np.moveaxis(c, iaxis, 0) k = list(k) + [0]*(cnt - len(k)) for i in range(cnt): n = len(c) @@ -839,7 +843,7 @@ tmp[j + 1] = c[j]/(j + 1) tmp[0] += k[i] - hermeval(lbnd, tmp) c = tmp - c = np.rollaxis(c, 0, iaxis + 1) + c = np.moveaxis(c, 0, iaxis) return c @@ -986,7 +990,7 @@ """ try: x, y = np.array((x, y), copy=0) - except: + except Exception: raise ValueError('x, y are incompatible') c = hermeval(x, c) @@ -1099,7 +1103,7 @@ """ try: x, y, z = np.array((x, y, z), copy=0) - except: + except Exception: raise ValueError('x, y, z are incompatible') c = hermeval(x, c) @@ -1226,7 +1230,7 @@ v[1] = x for i in range(2, ideg + 1): v[i] = (v[i-1]*x - v[i-2]*(i - 1)) - return np.rollaxis(v, 0, v.ndim) + return np.moveaxis(v, 0, -1) def hermevander2d(x, y, deg): diff -Nru python-numpy-1.13.3/numpy/polynomial/hermite.py python-numpy-1.14.5/numpy/polynomial/hermite.py --- python-numpy-1.13.3/numpy/polynomial/hermite.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/hermite.py 2018-06-12 17:31:56.000000000 +0000 @@ -706,7 +706,7 @@ if cnt == 0: return c - c = np.rollaxis(c, iaxis) + c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: c = c[:1]*0 @@ -718,7 +718,7 @@ for j in range(n, 0, -1): der[j - 1] = (2*j)*c[j] c = der - c = np.rollaxis(c, 0, iaxis + 1) + c = np.moveaxis(c, 0, iaxis) return c @@ -770,8 +770,8 @@ Raises ------ ValueError - If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or - ``np.isscalar(scl) == False``. + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. See Also -------- @@ -818,6 +818,10 @@ raise ValueError("The order of integration must be non-negative") if len(k) > cnt: raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) @@ -825,7 +829,7 @@ if cnt == 0: return c - c = np.rollaxis(c, iaxis) + c = np.moveaxis(c, iaxis, 0) k = list(k) + [0]*(cnt - len(k)) for i in range(cnt): n = len(c) @@ -840,7 +844,7 @@ tmp[j + 1] = c[j]/(2*(j + 1)) tmp[0] += k[i] - hermval(lbnd, tmp) c = tmp - c = np.rollaxis(c, 0, iaxis + 1) + c = np.moveaxis(c, 0, iaxis) return c @@ -988,7 +992,7 @@ """ try: x, y = np.array((x, y), copy=0) - except: + except Exception: raise ValueError('x, y are incompatible') c = hermval(x, c) @@ -1101,7 +1105,7 @@ """ try: x, y, z = np.array((x, y, z), copy=0) - except: + except Exception: raise ValueError('x, y, z are incompatible') c = hermval(x, c) @@ -1229,7 +1233,7 @@ v[1] = x2 for i in range(2, ideg + 1): v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1))) - return np.rollaxis(v, 0, v.ndim) + return np.moveaxis(v, 0, -1) def hermvander2d(x, y, deg): diff -Nru python-numpy-1.13.3/numpy/polynomial/__init__.py python-numpy-1.14.5/numpy/polynomial/__init__.py --- python-numpy-1.13.3/numpy/polynomial/__init__.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/__init__.py 2018-06-12 18:28:52.000000000 +0000 @@ -22,6 +22,6 @@ from .hermite_e import HermiteE from .laguerre import Laguerre -from numpy.testing.nosetester import _numpy_tester +from numpy.testing import _numpy_tester test = _numpy_tester().test bench = _numpy_tester().bench diff -Nru python-numpy-1.13.3/numpy/polynomial/laguerre.py python-numpy-1.14.5/numpy/polynomial/laguerre.py --- python-numpy-1.13.3/numpy/polynomial/laguerre.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/laguerre.py 2018-06-12 17:31:56.000000000 +0000 @@ -703,7 +703,7 @@ if cnt == 0: return c - c = np.rollaxis(c, iaxis) + c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: c = c[:1]*0 @@ -717,7 +717,7 @@ c[j - 1] += c[j] der[0] = -c[1] c = der - c = np.rollaxis(c, 0, iaxis + 1) + c = np.moveaxis(c, 0, iaxis) return c @@ -770,8 +770,8 @@ Raises ------ ValueError - If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or - ``np.isscalar(scl) == False``. + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. See Also -------- @@ -818,6 +818,10 @@ raise ValueError("The order of integration must be non-negative") if len(k) > cnt: raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) @@ -825,7 +829,7 @@ if cnt == 0: return c - c = np.rollaxis(c, iaxis) + c = np.moveaxis(c, iaxis, 0) k = list(k) + [0]*(cnt - len(k)) for i in range(cnt): n = len(c) @@ -841,7 +845,7 @@ tmp[j + 1] = -c[j] tmp[0] += k[i] - lagval(lbnd, tmp) c = tmp - c = np.rollaxis(c, 0, iaxis + 1) + c = np.moveaxis(c, 0, iaxis) return c @@ -988,7 +992,7 @@ """ try: x, y = np.array((x, y), copy=0) - except: + except Exception: raise ValueError('x, y are incompatible') c = lagval(x, c) @@ -1101,7 +1105,7 @@ """ try: x, y, z = np.array((x, y, z), copy=0) - except: + except Exception: raise ValueError('x, y, z are incompatible') c = lagval(x, c) @@ -1228,7 +1232,7 @@ v[1] = 1 - x for i in range(2, ideg + 1): v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i - return np.rollaxis(v, 0, v.ndim) + return np.moveaxis(v, 0, -1) def lagvander2d(x, y, deg): diff -Nru python-numpy-1.13.3/numpy/polynomial/legendre.py python-numpy-1.14.5/numpy/polynomial/legendre.py --- python-numpy-1.13.3/numpy/polynomial/legendre.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/legendre.py 2018-06-12 17:31:56.000000000 +0000 @@ -136,10 +136,10 @@ >>> from numpy import polynomial as P >>> p = P.Polynomial(np.arange(4)) >>> p - Polynomial([ 0., 1., 2., 3.], [-1., 1.]) - >>> c = P.Legendre(P.poly2leg(p.coef)) + Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) + >>> c = P.Legendre(P.legendre.poly2leg(p.coef)) >>> c - Legendre([ 1. , 3.25, 1. , 0.75], [-1., 1.]) + Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) """ [pol] = pu.as_series([pol]) @@ -742,7 +742,7 @@ if cnt == 0: return c - c = np.rollaxis(c, iaxis) + c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: c = c[:1]*0 @@ -758,7 +758,7 @@ der[1] = 3*c[2] der[0] = c[1] c = der - c = np.rollaxis(c, 0, iaxis + 1) + c = np.moveaxis(c, 0, iaxis) return c @@ -810,8 +810,8 @@ Raises ------ ValueError - If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or - ``np.isscalar(scl) == False``. + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. See Also -------- @@ -860,6 +860,10 @@ raise ValueError("The order of integration must be non-negative") if len(k) > cnt: raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) @@ -867,7 +871,7 @@ if cnt == 0: return c - c = np.rollaxis(c, iaxis) + c = np.moveaxis(c, iaxis, 0) k = list(k) + [0]*(cnt - len(k)) for i in range(cnt): n = len(c) @@ -886,7 +890,7 @@ tmp[j - 1] -= t tmp[0] += k[i] - legval(lbnd, tmp) c = tmp - c = np.rollaxis(c, 0, iaxis + 1) + c = np.moveaxis(c, 0, iaxis) return c @@ -1026,7 +1030,7 @@ """ try: x, y = np.array((x, y), copy=0) - except: + except Exception: raise ValueError('x, y are incompatible') c = legval(x, c) @@ -1139,7 +1143,7 @@ """ try: x, y, z = np.array((x, y, z), copy=0) - except: + except Exception: raise ValueError('x, y, z are incompatible') c = legval(x, c) @@ -1259,7 +1263,7 @@ v[1] = x for i in range(2, ideg + 1): v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i - return np.rollaxis(v, 0, v.ndim) + return np.moveaxis(v, 0, -1) def legvander2d(x, y, deg): diff -Nru python-numpy-1.13.3/numpy/polynomial/_polybase.py python-numpy-1.14.5/numpy/polynomial/_polybase.py --- python-numpy-1.13.3/numpy/polynomial/_polybase.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/_polybase.py 2018-06-12 17:31:56.000000000 +0000 @@ -260,7 +260,7 @@ self.window = window def __repr__(self): - format = "%s(%s, %s, %s)" + format = "%s(%s, domain=%s, window=%s)" coef = repr(self.coef)[6:-1] domain = repr(self.domain)[6:-1] window = repr(self.window)[6:-1] @@ -307,32 +307,26 @@ return self def __add__(self, other): + othercoef = self._get_coefficients(other) try: - othercoef = self._get_coefficients(other) coef = self._add(self.coef, othercoef) - except TypeError as e: - raise e - except: + except Exception: return NotImplemented return self.__class__(coef, self.domain, self.window) def __sub__(self, other): + othercoef = self._get_coefficients(other) try: - othercoef = self._get_coefficients(other) coef = self._sub(self.coef, othercoef) - except TypeError as e: - raise e - except: + except Exception: return NotImplemented return self.__class__(coef, self.domain, self.window) def __mul__(self, other): + othercoef = self._get_coefficients(other) try: - othercoef = self._get_coefficients(other) coef = self._mul(self.coef, othercoef) - except TypeError as e: - raise e - except: + except Exception: return NotImplemented return self.__class__(coef, self.domain, self.window) @@ -362,12 +356,12 @@ return res[1] def __divmod__(self, other): + othercoef = self._get_coefficients(other) try: - othercoef = self._get_coefficients(other) quo, rem = self._div(self.coef, othercoef) - except (TypeError, ZeroDivisionError) as e: + except ZeroDivisionError as e: raise e - except: + except Exception: return NotImplemented quo = self.__class__(quo, self.domain, self.window) rem = self.__class__(rem, self.domain, self.window) @@ -381,21 +375,21 @@ def __radd__(self, other): try: coef = self._add(other, self.coef) - except: + except Exception: return NotImplemented return self.__class__(coef, self.domain, self.window) def __rsub__(self, other): try: coef = self._sub(other, self.coef) - except: + except Exception: return NotImplemented return self.__class__(coef, self.domain, self.window) def __rmul__(self, other): try: coef = self._mul(other, self.coef) - except: + except Exception: return NotImplemented return self.__class__(coef, self.domain, self.window) @@ -425,7 +419,7 @@ quo, rem = self._div(other, self.coef) except ZeroDivisionError as e: raise e - except: + except Exception: return NotImplemented quo = self.__class__(quo, self.domain, self.window) rem = self.__class__(rem, self.domain, self.window) diff -Nru python-numpy-1.13.3/numpy/polynomial/polynomial.py python-numpy-1.14.5/numpy/polynomial/polynomial.py --- python-numpy-1.13.3/numpy/polynomial/polynomial.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/polynomial.py 2018-06-12 18:28:52.000000000 +0000 @@ -546,7 +546,7 @@ if cnt == 0: return c - c = np.rollaxis(c, iaxis) + c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: c = c[:1]*0 @@ -558,7 +558,7 @@ for j in range(n, 0, -1): der[j - 1] = j*c[j] c = der - c = np.rollaxis(c, 0, iaxis + 1) + c = np.moveaxis(c, 0, iaxis) return c @@ -608,7 +608,8 @@ Raises ------ ValueError - If ``m < 1``, ``len(k) > m``. + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. See Also -------- @@ -654,6 +655,10 @@ raise ValueError("The order of integration must be non-negative") if len(k) > cnt: raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") if iaxis != axis: raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) @@ -662,7 +667,7 @@ return c k = list(k) + [0]*(cnt - len(k)) - c = np.rollaxis(c, iaxis) + c = np.moveaxis(c, iaxis, 0) for i in range(cnt): n = len(c) c *= scl @@ -676,7 +681,7 @@ tmp[j + 1] = c[j]/(j + 1) tmp[0] += k[i] - polyval(lbnd, tmp) c = tmp - c = np.rollaxis(c, 0, iaxis + 1) + c = np.moveaxis(c, 0, iaxis) return c @@ -913,7 +918,7 @@ """ try: x, y = np.array((x, y), copy=0) - except: + except Exception: raise ValueError('x, y are incompatible') c = polyval(x, c) @@ -1026,7 +1031,7 @@ """ try: x, y, z = np.array((x, y, z), copy=0) - except: + except Exception: raise ValueError('x, y, z are incompatible') c = polyval(x, c) @@ -1147,7 +1152,7 @@ v[1] = x for i in range(2, ideg + 1): v[i] = v[i-1]*x - return np.rollaxis(v, 0, v.ndim) + return np.moveaxis(v, 0, -1) def polyvander2d(x, y, deg): diff -Nru python-numpy-1.13.3/numpy/polynomial/polyutils.py python-numpy-1.14.5/numpy/polynomial/polyutils.py --- python-numpy-1.13.3/numpy/polynomial/polyutils.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/polyutils.py 2018-06-12 17:31:56.000000000 +0000 @@ -153,14 +153,23 @@ Examples -------- - >>> from numpy import polynomial as P + >>> from numpy.polynomial import polyutils as pu >>> a = np.arange(4) - >>> P.as_series(a) + >>> pu.as_series(a) [array([ 0.]), array([ 1.]), array([ 2.]), array([ 3.])] >>> b = np.arange(6).reshape((2,3)) - >>> P.as_series(b) + >>> pu.as_series(b) [array([ 0., 1., 2.]), array([ 3., 4., 5.])] + >>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16))) + [array([ 1.]), array([ 0., 1., 2.]), array([ 0., 1.])] + + >>> pu.as_series([2, [1.1, 0.]]) + [array([ 2.]), array([ 1.1])] + + >>> pu.as_series([2, [1.1, 0.]], trim=False) + [array([ 2.]), array([ 1.1, 0. ])] + """ arrays = [np.array(a, ndmin=1, copy=0) for a in alist] if min([a.size for a in arrays]) == 0: @@ -182,7 +191,7 @@ else: try: dtype = np.common_type(*arrays) - except: + except Exception: raise ValueError("Coefficient arrays have no common type") ret = [np.array(a, copy=1, dtype=dtype) for a in arrays] return ret @@ -222,13 +231,13 @@ Examples -------- - >>> from numpy import polynomial as P - >>> P.trimcoef((0,0,3,0,5,0,0)) + >>> from numpy.polynomial import polyutils as pu + >>> pu.trimcoef((0,0,3,0,5,0,0)) array([ 0., 0., 3., 0., 5.]) - >>> P.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed + >>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed array([ 0.]) >>> i = complex(0,1) # works for complex - >>> P.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) + >>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) array([ 0.0003+0.j , 0.0010-0.001j]) """ @@ -236,7 +245,7 @@ raise ValueError("tol must be non-negative") [c] = as_series([c]) - [ind] = np.where(np.abs(c) > tol) + [ind] = np.nonzero(np.abs(c) > tol) if len(ind) == 0: return c[:1]*0 else: @@ -319,13 +328,13 @@ Examples -------- - >>> from numpy import polynomial as P - >>> P.mapparms((-1,1),(-1,1)) + >>> from numpy.polynomial import polyutils as pu + >>> pu.mapparms((-1,1),(-1,1)) (0.0, 1.0) - >>> P.mapparms((1,-1),(-1,1)) + >>> pu.mapparms((1,-1),(-1,1)) (0.0, -1.0) >>> i = complex(0,1) - >>> P.mapparms((-i,-1),(1,i)) + >>> pu.mapparms((-i,-1),(1,i)) ((1+1j), (1+0j)) """ @@ -375,15 +384,15 @@ Examples -------- - >>> from numpy import polynomial as P + >>> from numpy.polynomial import polyutils as pu >>> old_domain = (-1,1) >>> new_domain = (0,2*np.pi) >>> x = np.linspace(-1,1,6); x array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) - >>> x_out = P.mapdomain(x, old_domain, new_domain); x_out + >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, 6.28318531]) - >>> x - P.mapdomain(x_out, new_domain, old_domain) + >>> x - pu.mapdomain(x_out, new_domain, old_domain) array([ 0., 0., 0., 0., 0., 0.]) Also works for complex numbers (and thus can be used to map any line in diff -Nru python-numpy-1.13.3/numpy/polynomial/tests/test_chebyshev.py python-numpy-1.14.5/numpy/polynomial/tests/test_chebyshev.py --- python-numpy-1.13.3/numpy/polynomial/tests/test_chebyshev.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/tests/test_chebyshev.py 2018-06-12 18:28:52.000000000 +0000 @@ -7,8 +7,9 @@ import numpy.polynomial.chebyshev as cheb from numpy.polynomial.polynomial import polyval from numpy.testing import ( - TestCase, assert_almost_equal, assert_raises, - assert_equal, assert_, run_module_suite) + assert_almost_equal, assert_raises, assert_equal, assert_, + run_module_suite + ) def trim(x): @@ -28,7 +29,7 @@ Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] -class TestPrivate(TestCase): +class TestPrivate(object): def test__cseries_to_zseries(self): for i in range(5): @@ -45,7 +46,7 @@ assert_equal(res, tgt) -class TestConstants(TestCase): +class TestConstants(object): def test_chebdomain(self): assert_equal(cheb.chebdomain, [-1, 1]) @@ -60,7 +61,7 @@ assert_equal(cheb.chebx, [0, 1]) -class TestArithmetic(TestCase): +class TestArithmetic(object): def test_chebadd(self): for i in range(5): @@ -112,7 +113,7 @@ assert_equal(trim(res), trim(tgt), err_msg=msg) -class TestEvaluation(TestCase): +class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 c1d = np.array([2.5, 2., 1.5]) c2d = np.einsum('i,j->ij', c1d, c1d) @@ -206,13 +207,16 @@ assert_(res.shape == (2, 3)*3) -class TestIntegral(TestCase): +class TestIntegral(object): def test_chebint(self): # check exceptions assert_raises(ValueError, cheb.chebint, [0], .5) assert_raises(ValueError, cheb.chebint, [0], -1) assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0]) + assert_raises(ValueError, cheb.chebint, [0], lbnd=[0]) + assert_raises(ValueError, cheb.chebint, [0], scl=[0]) + assert_raises(ValueError, cheb.chebint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): @@ -305,7 +309,7 @@ assert_almost_equal(res, tgt) -class TestDerivative(TestCase): +class TestDerivative(object): def test_chebder(self): # check exceptions @@ -345,7 +349,7 @@ assert_almost_equal(res, tgt) -class TestVander(TestCase): +class TestVander(object): # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 @@ -393,7 +397,7 @@ assert_(van.shape == (1, 5, 24)) -class TestFitting(TestCase): +class TestFitting(object): def test_chebfit(self): def f(x): @@ -470,7 +474,32 @@ assert_almost_equal(coef1, coef2) -class TestCompanion(TestCase): +class TestInterpolate(object): + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, cheb.chebinterpolate, self.f, -1) + assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,)) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(-1, 1, 10) + for deg in range(0, 10): + for p in range(0, deg + 1): + c = cheb.chebinterpolate(powx, deg, (p,)) + assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12) + + +class TestCompanion(object): def test_raises(self): assert_raises(ValueError, cheb.chebcompanion, []) @@ -485,7 +514,7 @@ assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5) -class TestGauss(TestCase): +class TestGauss(object): def test_100(self): x, w = cheb.chebgauss(100) @@ -504,7 +533,7 @@ assert_almost_equal(w.sum(), tgt) -class TestMisc(TestCase): +class TestMisc(object): def test_chebfromroots(self): res = cheb.chebfromroots([]) diff -Nru python-numpy-1.13.3/numpy/polynomial/tests/test_classes.py python-numpy-1.14.5/numpy/polynomial/tests/test_classes.py --- python-numpy-1.13.3/numpy/polynomial/tests/test_classes.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/tests/test_classes.py 2018-06-12 18:28:52.000000000 +0000 @@ -583,5 +583,30 @@ assert_raises(TypeError, np.add, x, p) +class TestInterpolate(object): + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, Chebyshev.interpolate, self.f, -1) + assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(Chebyshev.interpolate(self.f, deg).degree() == deg) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(0, 2, 10) + for deg in range(0, 10): + for t in range(0, deg + 1): + p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,)) + assert_almost_equal(p(x), powx(x, t), decimal=12) + + if __name__ == "__main__": run_module_suite() diff -Nru python-numpy-1.13.3/numpy/polynomial/tests/test_hermite_e.py python-numpy-1.14.5/numpy/polynomial/tests/test_hermite_e.py --- python-numpy-1.13.3/numpy/polynomial/tests/test_hermite_e.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/tests/test_hermite_e.py 2018-06-12 18:28:52.000000000 +0000 @@ -7,8 +7,9 @@ import numpy.polynomial.hermite_e as herme from numpy.polynomial.polynomial import polyval from numpy.testing import ( - TestCase, assert_almost_equal, assert_raises, - assert_equal, assert_, run_module_suite) + assert_almost_equal, assert_raises, assert_equal, assert_, + run_module_suite + ) He0 = np.array([1]) He1 = np.array([0, 1]) @@ -28,7 +29,7 @@ return herme.hermetrim(x, tol=1e-6) -class TestConstants(TestCase): +class TestConstants(object): def test_hermedomain(self): assert_equal(herme.hermedomain, [-1, 1]) @@ -43,7 +44,7 @@ assert_equal(herme.hermex, [0, 1]) -class TestArithmetic(TestCase): +class TestArithmetic(object): x = np.linspace(-3, 3, 100) def test_hermeadd(self): @@ -100,7 +101,7 @@ assert_equal(trim(res), trim(tgt), err_msg=msg) -class TestEvaluation(TestCase): +class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 c1d = np.array([4., 2., 3.]) c2d = np.einsum('i,j->ij', c1d, c1d) @@ -194,13 +195,16 @@ assert_(res.shape == (2, 3)*3) -class TestIntegral(TestCase): +class TestIntegral(object): def test_hermeint(self): # check exceptions assert_raises(ValueError, herme.hermeint, [0], .5) assert_raises(ValueError, herme.hermeint, [0], -1) assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0]) + assert_raises(ValueError, herme.hermeint, [0], lbnd=[0]) + assert_raises(ValueError, herme.hermeint, [0], scl=[0]) + assert_raises(ValueError, herme.hermeint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): @@ -293,7 +297,7 @@ assert_almost_equal(res, tgt) -class TestDerivative(TestCase): +class TestDerivative(object): def test_hermeder(self): # check exceptions @@ -334,7 +338,7 @@ assert_almost_equal(res, tgt) -class TestVander(TestCase): +class TestVander(object): # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 @@ -382,7 +386,7 @@ assert_(van.shape == (1, 5, 24)) -class TestFitting(TestCase): +class TestFitting(object): def test_hermefit(self): def f(x): @@ -459,7 +463,7 @@ assert_almost_equal(coef1, coef2) -class TestCompanion(TestCase): +class TestCompanion(object): def test_raises(self): assert_raises(ValueError, herme.hermecompanion, []) @@ -474,7 +478,7 @@ assert_(herme.hermecompanion([1, 2])[0, 0] == -.5) -class TestGauss(TestCase): +class TestGauss(object): def test_100(self): x, w = herme.hermegauss(100) @@ -493,7 +497,7 @@ assert_almost_equal(w.sum(), tgt) -class TestMisc(TestCase): +class TestMisc(object): def test_hermefromroots(self): res = herme.hermefromroots([]) diff -Nru python-numpy-1.13.3/numpy/polynomial/tests/test_hermite.py python-numpy-1.14.5/numpy/polynomial/tests/test_hermite.py --- python-numpy-1.13.3/numpy/polynomial/tests/test_hermite.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/tests/test_hermite.py 2018-06-12 18:28:52.000000000 +0000 @@ -7,8 +7,9 @@ import numpy.polynomial.hermite as herm from numpy.polynomial.polynomial import polyval from numpy.testing import ( - TestCase, assert_almost_equal, assert_raises, - assert_equal, assert_, run_module_suite) + assert_almost_equal, assert_raises, assert_equal, assert_, + run_module_suite + ) H0 = np.array([1]) H1 = np.array([0, 2]) @@ -28,7 +29,7 @@ return herm.hermtrim(x, tol=1e-6) -class TestConstants(TestCase): +class TestConstants(object): def test_hermdomain(self): assert_equal(herm.hermdomain, [-1, 1]) @@ -43,7 +44,7 @@ assert_equal(herm.hermx, [0, .5]) -class TestArithmetic(TestCase): +class TestArithmetic(object): x = np.linspace(-3, 3, 100) def test_hermadd(self): @@ -100,7 +101,7 @@ assert_equal(trim(res), trim(tgt), err_msg=msg) -class TestEvaluation(TestCase): +class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 c1d = np.array([2.5, 1., .75]) c2d = np.einsum('i,j->ij', c1d, c1d) @@ -194,13 +195,16 @@ assert_(res.shape == (2, 3)*3) -class TestIntegral(TestCase): +class TestIntegral(object): def test_hermint(self): # check exceptions assert_raises(ValueError, herm.hermint, [0], .5) assert_raises(ValueError, herm.hermint, [0], -1) assert_raises(ValueError, herm.hermint, [0], 1, [0, 0]) + assert_raises(ValueError, herm.hermint, [0], lbnd=[0]) + assert_raises(ValueError, herm.hermint, [0], scl=[0]) + assert_raises(ValueError, herm.hermint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): @@ -293,7 +297,7 @@ assert_almost_equal(res, tgt) -class TestDerivative(TestCase): +class TestDerivative(object): def test_hermder(self): # check exceptions @@ -333,7 +337,7 @@ assert_almost_equal(res, tgt) -class TestVander(TestCase): +class TestVander(object): # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 @@ -381,7 +385,7 @@ assert_(van.shape == (1, 5, 24)) -class TestFitting(TestCase): +class TestFitting(object): def test_hermfit(self): def f(x): @@ -458,7 +462,7 @@ assert_almost_equal(coef1, coef2) -class TestCompanion(TestCase): +class TestCompanion(object): def test_raises(self): assert_raises(ValueError, herm.hermcompanion, []) @@ -473,7 +477,7 @@ assert_(herm.hermcompanion([1, 2])[0, 0] == -.25) -class TestGauss(TestCase): +class TestGauss(object): def test_100(self): x, w = herm.hermgauss(100) @@ -492,7 +496,7 @@ assert_almost_equal(w.sum(), tgt) -class TestMisc(TestCase): +class TestMisc(object): def test_hermfromroots(self): res = herm.hermfromroots([]) diff -Nru python-numpy-1.13.3/numpy/polynomial/tests/test_laguerre.py python-numpy-1.14.5/numpy/polynomial/tests/test_laguerre.py --- python-numpy-1.13.3/numpy/polynomial/tests/test_laguerre.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/tests/test_laguerre.py 2018-06-12 18:28:52.000000000 +0000 @@ -7,8 +7,9 @@ import numpy.polynomial.laguerre as lag from numpy.polynomial.polynomial import polyval from numpy.testing import ( - TestCase, assert_almost_equal, assert_raises, - assert_equal, assert_, run_module_suite) + assert_almost_equal, assert_raises, assert_equal, assert_, + run_module_suite + ) L0 = np.array([1])/1 L1 = np.array([1, -1])/1 @@ -25,7 +26,7 @@ return lag.lagtrim(x, tol=1e-6) -class TestConstants(TestCase): +class TestConstants(object): def test_lagdomain(self): assert_equal(lag.lagdomain, [0, 1]) @@ -40,7 +41,7 @@ assert_equal(lag.lagx, [1, -1]) -class TestArithmetic(TestCase): +class TestArithmetic(object): x = np.linspace(-3, 3, 100) def test_lagadd(self): @@ -97,7 +98,7 @@ assert_almost_equal(trim(res), trim(tgt), err_msg=msg) -class TestEvaluation(TestCase): +class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 c1d = np.array([9., -14., 6.]) c2d = np.einsum('i,j->ij', c1d, c1d) @@ -191,13 +192,16 @@ assert_(res.shape == (2, 3)*3) -class TestIntegral(TestCase): +class TestIntegral(object): def test_lagint(self): # check exceptions assert_raises(ValueError, lag.lagint, [0], .5) assert_raises(ValueError, lag.lagint, [0], -1) assert_raises(ValueError, lag.lagint, [0], 1, [0, 0]) + assert_raises(ValueError, lag.lagint, [0], lbnd=[0]) + assert_raises(ValueError, lag.lagint, [0], scl=[0]) + assert_raises(ValueError, lag.lagint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): @@ -290,7 +294,7 @@ assert_almost_equal(res, tgt) -class TestDerivative(TestCase): +class TestDerivative(object): def test_lagder(self): # check exceptions @@ -330,7 +334,7 @@ assert_almost_equal(res, tgt) -class TestVander(TestCase): +class TestVander(object): # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 @@ -378,7 +382,7 @@ assert_(van.shape == (1, 5, 24)) -class TestFitting(TestCase): +class TestFitting(object): def test_lagfit(self): def f(x): @@ -440,7 +444,7 @@ assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1]) -class TestCompanion(TestCase): +class TestCompanion(object): def test_raises(self): assert_raises(ValueError, lag.lagcompanion, []) @@ -455,7 +459,7 @@ assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5) -class TestGauss(TestCase): +class TestGauss(object): def test_100(self): x, w = lag.laggauss(100) @@ -474,7 +478,7 @@ assert_almost_equal(w.sum(), tgt) -class TestMisc(TestCase): +class TestMisc(object): def test_lagfromroots(self): res = lag.lagfromroots([]) diff -Nru python-numpy-1.13.3/numpy/polynomial/tests/test_legendre.py python-numpy-1.14.5/numpy/polynomial/tests/test_legendre.py --- python-numpy-1.13.3/numpy/polynomial/tests/test_legendre.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/tests/test_legendre.py 2018-06-12 18:28:52.000000000 +0000 @@ -7,8 +7,9 @@ import numpy.polynomial.legendre as leg from numpy.polynomial.polynomial import polyval from numpy.testing import ( - TestCase, assert_almost_equal, assert_raises, - assert_equal, assert_, run_module_suite) + assert_almost_equal, assert_raises, assert_equal, assert_, + run_module_suite + ) L0 = np.array([1]) L1 = np.array([0, 1]) @@ -28,7 +29,7 @@ return leg.legtrim(x, tol=1e-6) -class TestConstants(TestCase): +class TestConstants(object): def test_legdomain(self): assert_equal(leg.legdomain, [-1, 1]) @@ -43,7 +44,7 @@ assert_equal(leg.legx, [0, 1]) -class TestArithmetic(TestCase): +class TestArithmetic(object): x = np.linspace(-1, 1, 100) def test_legadd(self): @@ -101,7 +102,7 @@ assert_equal(trim(res), trim(tgt), err_msg=msg) -class TestEvaluation(TestCase): +class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 c1d = np.array([2., 2., 2.]) c2d = np.einsum('i,j->ij', c1d, c1d) @@ -195,13 +196,16 @@ assert_(res.shape == (2, 3)*3) -class TestIntegral(TestCase): +class TestIntegral(object): def test_legint(self): # check exceptions assert_raises(ValueError, leg.legint, [0], .5) assert_raises(ValueError, leg.legint, [0], -1) assert_raises(ValueError, leg.legint, [0], 1, [0, 0]) + assert_raises(ValueError, leg.legint, [0], lbnd=[0]) + assert_raises(ValueError, leg.legint, [0], scl=[0]) + assert_raises(ValueError, leg.legint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): @@ -294,7 +298,7 @@ assert_almost_equal(res, tgt) -class TestDerivative(TestCase): +class TestDerivative(object): def test_legder(self): # check exceptions @@ -334,7 +338,7 @@ assert_almost_equal(res, tgt) -class TestVander(TestCase): +class TestVander(object): # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 @@ -382,7 +386,7 @@ assert_(van.shape == (1, 5, 24)) -class TestFitting(TestCase): +class TestFitting(object): def test_legfit(self): def f(x): @@ -459,7 +463,7 @@ assert_almost_equal(coef1, coef2) -class TestCompanion(TestCase): +class TestCompanion(object): def test_raises(self): assert_raises(ValueError, leg.legcompanion, []) @@ -474,7 +478,7 @@ assert_(leg.legcompanion([1, 2])[0, 0] == -.5) -class TestGauss(TestCase): +class TestGauss(object): def test_100(self): x, w = leg.leggauss(100) @@ -493,7 +497,7 @@ assert_almost_equal(w.sum(), tgt) -class TestMisc(TestCase): +class TestMisc(object): def test_legfromroots(self): res = leg.legfromroots([]) diff -Nru python-numpy-1.13.3/numpy/polynomial/tests/test_polynomial.py python-numpy-1.14.5/numpy/polynomial/tests/test_polynomial.py --- python-numpy-1.13.3/numpy/polynomial/tests/test_polynomial.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/tests/test_polynomial.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,8 +6,9 @@ import numpy as np import numpy.polynomial.polynomial as poly from numpy.testing import ( - TestCase, assert_almost_equal, assert_raises, - assert_equal, assert_, run_module_suite) + assert_almost_equal, assert_raises, assert_equal, assert_, + run_module_suite + ) def trim(x): @@ -27,7 +28,7 @@ Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] -class TestConstants(TestCase): +class TestConstants(object): def test_polydomain(self): assert_equal(poly.polydomain, [-1, 1]) @@ -42,7 +43,7 @@ assert_equal(poly.polyx, [0, 1]) -class TestArithmetic(TestCase): +class TestArithmetic(object): def test_polyadd(self): for i in range(5): @@ -103,7 +104,7 @@ assert_equal(res, tgt, err_msg=msg) -class TestEvaluation(TestCase): +class TestEvaluation(object): # coefficients of 1 + 2*x + 3*x**2 c1d = np.array([1., 2., 3.]) c2d = np.einsum('i,j->ij', c1d, c1d) @@ -263,13 +264,16 @@ assert_(res.shape == (2, 3)*3) -class TestIntegral(TestCase): +class TestIntegral(object): def test_polyint(self): # check exceptions assert_raises(ValueError, poly.polyint, [0], .5) assert_raises(ValueError, poly.polyint, [0], -1) assert_raises(ValueError, poly.polyint, [0], 1, [0, 0]) + assert_raises(ValueError, poly.polyint, [0], lbnd=[0]) + assert_raises(ValueError, poly.polyint, [0], scl=[0]) + assert_raises(ValueError, poly.polyint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): @@ -357,7 +361,7 @@ assert_almost_equal(res, tgt) -class TestDerivative(TestCase): +class TestDerivative(object): def test_polyder(self): # check exceptions @@ -397,7 +401,7 @@ assert_almost_equal(res, tgt) -class TestVander(TestCase): +class TestVander(object): # some random values in [-1, 1) x = np.random.random((3, 5))*2 - 1 @@ -445,7 +449,7 @@ assert_(van.shape == (1, 5, 24)) -class TestCompanion(TestCase): +class TestCompanion(object): def test_raises(self): assert_raises(ValueError, poly.polycompanion, []) @@ -460,7 +464,7 @@ assert_(poly.polycompanion([1, 2])[0, 0] == -.5) -class TestMisc(TestCase): +class TestMisc(object): def test_polyfromroots(self): res = poly.polyfromroots([]) diff -Nru python-numpy-1.13.3/numpy/polynomial/tests/test_polyutils.py python-numpy-1.14.5/numpy/polynomial/tests/test_polyutils.py --- python-numpy-1.13.3/numpy/polynomial/tests/test_polyutils.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/tests/test_polyutils.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,11 +6,12 @@ import numpy as np import numpy.polynomial.polyutils as pu from numpy.testing import ( - TestCase, assert_almost_equal, assert_raises, - assert_equal, assert_, run_module_suite) + assert_almost_equal, assert_raises, assert_equal, assert_, + run_module_suite + ) -class TestMisc(TestCase): +class TestMisc(object): def test_trimseq(self): for i in range(5): @@ -43,7 +44,7 @@ assert_equal(pu.trimcoef(coef, 2), [0]) -class TestDomain(TestCase): +class TestDomain(object): def test_getdomain(self): # test for real values diff -Nru python-numpy-1.13.3/numpy/polynomial/tests/test_printing.py python-numpy-1.14.5/numpy/polynomial/tests/test_printing.py --- python-numpy-1.13.3/numpy/polynomial/tests/test_printing.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/polynomial/tests/test_printing.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,71 +1,71 @@ from __future__ import division, absolute_import, print_function import numpy.polynomial as poly -from numpy.testing import TestCase, run_module_suite, assert_ +from numpy.testing import run_module_suite, assert_equal -class test_str(TestCase): +class TestStr(object): def test_polynomial_str(self): res = str(poly.Polynomial([0, 1])) - tgt = 'poly([0., 1.])' - assert_(res, tgt) + tgt = 'poly([0. 1.])' + assert_equal(res, tgt) def test_chebyshev_str(self): res = str(poly.Chebyshev([0, 1])) - tgt = 'leg([0., 1.])' - assert_(res, tgt) + tgt = 'cheb([0. 1.])' + assert_equal(res, tgt) def test_legendre_str(self): res = str(poly.Legendre([0, 1])) - tgt = 'leg([0., 1.])' - assert_(res, tgt) + tgt = 'leg([0. 1.])' + assert_equal(res, tgt) def test_hermite_str(self): res = str(poly.Hermite([0, 1])) - tgt = 'herm([0., 1.])' - assert_(res, tgt) + tgt = 'herm([0. 1.])' + assert_equal(res, tgt) def test_hermiteE_str(self): res = str(poly.HermiteE([0, 1])) - tgt = 'herme([0., 1.])' - assert_(res, tgt) + tgt = 'herme([0. 1.])' + assert_equal(res, tgt) def test_laguerre_str(self): res = str(poly.Laguerre([0, 1])) - tgt = 'lag([0., 1.])' - assert_(res, tgt) + tgt = 'lag([0. 1.])' + assert_equal(res, tgt) -class test_repr(TestCase): +class TestRepr(object): def test_polynomial_str(self): res = repr(poly.Polynomial([0, 1])) - tgt = 'Polynomial([0., 1.])' - assert_(res, tgt) + tgt = 'Polynomial([0., 1.], domain=[-1, 1], window=[-1, 1])' + assert_equal(res, tgt) def test_chebyshev_str(self): res = repr(poly.Chebyshev([0, 1])) - tgt = 'Chebyshev([0., 1.], [-1., 1.], [-1., 1.])' - assert_(res, tgt) + tgt = 'Chebyshev([0., 1.], domain=[-1, 1], window=[-1, 1])' + assert_equal(res, tgt) def test_legendre_repr(self): res = repr(poly.Legendre([0, 1])) - tgt = 'Legendre([0., 1.], [-1., 1.], [-1., 1.])' - assert_(res, tgt) + tgt = 'Legendre([0., 1.], domain=[-1, 1], window=[-1, 1])' + assert_equal(res, tgt) def test_hermite_repr(self): res = repr(poly.Hermite([0, 1])) - tgt = 'Hermite([0., 1.], [-1., 1.], [-1., 1.])' - assert_(res, tgt) + tgt = 'Hermite([0., 1.], domain=[-1, 1], window=[-1, 1])' + assert_equal(res, tgt) def test_hermiteE_repr(self): res = repr(poly.HermiteE([0, 1])) - tgt = 'HermiteE([0., 1.], [-1., 1.], [-1., 1.])' - assert_(res, tgt) + tgt = 'HermiteE([0., 1.], domain=[-1, 1], window=[-1, 1])' + assert_equal(res, tgt) def test_laguerre_repr(self): res = repr(poly.Laguerre([0, 1])) - tgt = 'Laguerre([0., 1.], [0., 1.], [0., 1.])' - assert_(res, tgt) + tgt = 'Laguerre([0., 1.], domain=[0, 1], window=[0, 1])' + assert_equal(res, tgt) # diff -Nru python-numpy-1.13.3/numpy/random/__init__.py python-numpy-1.14.5/numpy/random/__init__.py --- python-numpy-1.13.3/numpy/random/__init__.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/random/__init__.py 2018-06-12 18:28:52.000000000 +0000 @@ -117,6 +117,6 @@ """ return RandomState(seed=0) -from numpy.testing.nosetester import _numpy_tester +from numpy.testing import _numpy_tester test = _numpy_tester().test bench = _numpy_tester().bench diff -Nru python-numpy-1.13.3/numpy/random/mtrand/distributions.c python-numpy-1.14.5/numpy/random/mtrand/distributions.c --- python-numpy-1.13.3/numpy/random/mtrand/distributions.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/random/mtrand/distributions.c 2018-06-12 17:31:56.000000000 +0000 @@ -41,10 +41,11 @@ * SOFTWARE OR ITS DOCUMENTATION. */ -#include -#include #include "distributions.h" #include +#include +#include +#include #ifndef min #define min(x,y) ((x LONG_MAX || X < 1.0) { + continue; + } + T = pow(1.0 + 1.0/X, am1); - } while (((V*X*(T-1.0)/(b-1.0)) > (T/b)) || X < 1); - return X; + if (V*X*(T - 1.0)/(b - 1.0) <= T/b) { + return (long)X; + } + } } long rk_geometric_search(rk_state *state, double p) diff -Nru python-numpy-1.13.3/numpy/random/mtrand/mtrand.c python-numpy-1.14.5/numpy/random/mtrand/mtrand.c --- python-numpy-1.13.3/numpy/random/mtrand/mtrand.c 2017-09-29 18:22:10.000000000 +0000 +++ python-numpy-1.14.5/numpy/random/mtrand/mtrand.c 2018-06-12 18:29:46.000000000 +0000 @@ -1,13 +1,14 @@ -/* Generated by Cython 0.26.1 */ +/* Generated by Cython 0.28.2 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. -#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000) - #error Cython requires Python 2.6+ or Python 3.2+. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. #else -#define CYTHON_ABI "0_26_1" +#define CYTHON_ABI "0_28_2" +#define CYTHON_FUTURE_DIVISION 0 #include #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) @@ -31,7 +32,7 @@ #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG - #if PY_VERSION_HEX >= 0x03030000 || (PY_MAJOR_VERSION == 2 && PY_VERSION_HEX >= 0x02070000) + #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif @@ -49,8 +50,12 @@ #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS @@ -69,6 +74,10 @@ #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 @@ -102,6 +111,10 @@ #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 @@ -154,6 +167,12 @@ #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) @@ -164,6 +183,103 @@ #undef BASE #undef MASK #endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif @@ -192,12 +308,12 @@ #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif -#if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL) +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif - typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs); - typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args, + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast @@ -209,6 +325,74 @@ #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; // PyThread_create_key reports success always +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif // TSS (Thread Specific Storage) API +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +#else +#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) +#endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ @@ -253,18 +437,6 @@ #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) - #define PyObject_Malloc(s) PyMem_Malloc(s) - #define PyObject_Free(p) PyMem_Free(p) - #define PyObject_Realloc(p) PyMem_Realloc(p) -#endif -#if CYTHON_COMPILING_IN_PYSTON - #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) -#else - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) -#endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 @@ -292,8 +464,11 @@ #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) -#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) +#else + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) +#endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type @@ -328,112 +503,26 @@ #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif -#ifndef __has_attribute - #define __has_attribute(x) 0 -#endif -#ifndef __has_cpp_attribute - #define __has_cpp_attribute(x) 0 -#endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else - typedef struct { - unaryfunc am_await; - unaryfunc am_aiter; - unaryfunc am_anext; - } __Pyx_PyAsyncMethodsStruct; #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif -#ifndef CYTHON_RESTRICT - #if defined(__GNUC__) - #define CYTHON_RESTRICT __restrict__ - #elif defined(_MSC_VER) && _MSC_VER >= 1400 - #define CYTHON_RESTRICT __restrict - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_RESTRICT restrict - #else - #define CYTHON_RESTRICT - #endif -#endif -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif -#ifndef CYTHON_MAYBE_UNUSED_VAR -# if defined(__cplusplus) - template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } -# else -# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) -# endif -#endif -#ifndef CYTHON_NCP_UNUSED -# if CYTHON_COMPILING_IN_CPYTHON -# define CYTHON_NCP_UNUSED -# else -# define CYTHON_NCP_UNUSED CYTHON_UNUSED -# endif -#endif -#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) -#ifdef _MSC_VER - #ifndef _MSC_STDINT_H_ - #if _MSC_VER < 1300 - typedef unsigned char uint8_t; - typedef unsigned int uint32_t; - #else - typedef unsigned __int8 uint8_t; - typedef unsigned __int32 uint32_t; - #endif - #endif -#else - #include -#endif -#ifndef CYTHON_FALLTHROUGH - #ifdef __cplusplus - #if __has_cpp_attribute(fallthrough) - #define CYTHON_FALLTHROUGH [[fallthrough]] - #elif __has_cpp_attribute(clang::fallthrough) - #define CYTHON_FALLTHROUGH [[clang::fallthrough]] - #endif - #endif - #ifndef CYTHON_FALLTHROUGH - #if __has_attribute(fallthrough) || (defined(__GNUC__) && defined(__attribute__)) - #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) - #else - #define CYTHON_FALLTHROUGH - #endif - #endif -#endif - -#ifndef CYTHON_INLINE - #if defined(__clang__) - #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) - #elif defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) @@ -461,14 +550,6 @@ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif - #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" @@ -479,6 +560,7 @@ #define __PYX_HAVE__mtrand #define __PYX_HAVE_API__mtrand +/* Early includes */ #include "string.h" #include "math.h" #include @@ -494,7 +576,7 @@ #include #endif /* _OPENMP */ -#ifdef PYREX_WITHOUT_ASSERTIONS +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif @@ -525,8 +607,8 @@ #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) -#elif defined (_MSC_VER) && defined (_M_X64) - #define __Pyx_sst_abs(value) _abs64(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) @@ -548,6 +630,12 @@ #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) @@ -558,16 +646,11 @@ #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) -#if PY_MAJOR_VERSION < 3 -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) -{ +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } -#else -#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen -#endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode @@ -576,6 +659,8 @@ #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS @@ -676,7 +761,7 @@ #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } -static PyObject *__pyx_m; +static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime; @@ -695,6 +780,18 @@ "numpy.pxd", "type.pxd", }; +/* NoFastGil.proto */ +#define __Pyx_PyGILState_Ensure PyGILState_Ensure +#define __Pyx_PyGILState_Release PyGILState_Release +#define __Pyx_FastGIL_Remember() +#define __Pyx_FastGIL_Forget() +#define __Pyx_FastGilFuncInit() + +/* ForceInitThreads.proto */ +#ifndef __PYX_FORCE_INIT_THREADS + #define __PYX_FORCE_INIT_THREADS 0 +#endif + /*--- Type declarations ---*/ struct __pyx_obj_6mtrand_RandomState; @@ -869,16 +966,7 @@ /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro)) - return tp->tp_getattro(obj, attr_name); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_getattr)) - return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); -#endif - return PyObject_GetAttr(obj, attr_name); -} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif @@ -934,20 +1022,15 @@ /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); -/* NoFastGil.proto */ -#define __Pyx_PyGILState_Ensure PyGILState_Ensure -#define __Pyx_PyGILState_Release PyGILState_Release -#define __Pyx_FastGIL_Remember() -#define __Pyx_FastGIL_Forget() -#define __Pyx_FastGilFuncInit() - /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; -#define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET(); +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* SaveResetException.proto */ @@ -979,15 +1062,25 @@ /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif @@ -1031,6 +1124,14 @@ /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); +/* PyIntBinop.proto */ +#if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, long intval, int inplace); +#else +#define __Pyx_PyInt_EqObjC(op1, op2, intval, inplace)\ + PyObject_RichCompare(op1, op2, Py_EQ) + #endif + /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ @@ -1049,7 +1150,7 @@ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); @@ -1093,6 +1194,13 @@ return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); } +/* ObjectGetItem.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); +#else +#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) +#endif + /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, long intval, int inplace); @@ -1101,14 +1209,6 @@ (inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2)) #endif -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, long intval, int inplace); -#else -#define __Pyx_PyInt_EqObjC(op1, op2, intval, inplace)\ - PyObject_RichCompare(op1, op2, Py_EQ) - #endif - /* SliceObject.proto */ #define __Pyx_PyObject_DelSlice(obj, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound)\ __Pyx_PyObject_SetSlice(obj, (PyObject*)NULL, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound) @@ -1119,24 +1219,15 @@ /* PyObjectSetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS -#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o,n,NULL) -static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_setattro)) - return tp->tp_setattro(obj, attr_name, value); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_setattr)) - return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value); -#endif - return PyObject_SetAttr(obj, attr_name, value); -} +#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL) +static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value); #else #define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n) #define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v) #endif /* KeywordStringCheck.proto */ -static CYTHON_INLINE int __Pyx_CheckKeywordStrings(PyObject *kwdict, const char* function_name, int kw_allowed); +static int __Pyx_CheckKeywordStrings(PyObject *kwdict, const char* function_name, int kw_allowed); /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY @@ -1169,26 +1260,39 @@ #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif -/* ForceInitThreads.proto */ -#ifndef __PYX_FORCE_INIT_THREADS - #define __PYX_FORCE_INIT_THREADS 0 -#endif - /* SetItemInt.proto */ #define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\ __Pyx_SetItemInt_Generic(o, to_py_func(i), v))) -static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v); +static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v); static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, int wraparound, int boundscheck); +/* PyObject_GenericGetAttrNoDict.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr +#endif + +/* PyObject_GenericGetAttr.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr +#endif + /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* CLineInTraceback.proto */ -static int __Pyx_CLineForTraceback(int c_line); +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif /* CodeObjectCache.proto */ typedef struct { @@ -1287,6 +1391,19 @@ /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); @@ -1356,6 +1473,7 @@ static PyObject *__pyx_f_6mtrand_discd_array(rk_state *, __pyx_t_6mtrand_rk_discd, PyObject *, PyArrayObject *, PyObject *); /*proto*/ static double __pyx_f_6mtrand_kahan_sum(double *, npy_intp); /*proto*/ #define __Pyx_MODULE_NAME "mtrand" +extern int __pyx_module_is_main_mtrand; int __pyx_module_is_main_mtrand = 0; /* Implementation of 'mtrand' */ @@ -1440,7 +1558,6 @@ static const char __pyx_k_warn[] = "warn"; static const char __pyx_k_zipf[] = "zipf"; static const char __pyx_k_a_0_2[] = "a < 0"; -static const char __pyx_k_a_1_0[] = "a <= 1.0"; static const char __pyx_k_alpha[] = "alpha"; static const char __pyx_k_array[] = "array"; static const char __pyx_k_bytes[] = "bytes"; @@ -1503,11 +1620,11 @@ static const char __pyx_k_unique[] = "unique"; static const char __pyx_k_unsafe[] = "unsafe"; static const char __pyx_k_MT19937[] = "MT19937"; +static const char __pyx_k_alpha_0[] = "alpha <= 0"; static const char __pyx_k_asarray[] = "asarray"; static const char __pyx_k_casting[] = "casting"; static const char __pyx_k_dfden_0[] = "dfden <= 0"; static const char __pyx_k_dfnum_0[] = "dfnum <= 0"; -static const char __pyx_k_dfnum_1[] = "dfnum <= 1"; static const char __pyx_k_float64[] = "float64"; static const char __pyx_k_greater[] = "greater"; static const char __pyx_k_integer[] = "integer"; @@ -1619,41 +1736,44 @@ static const char __pyx_k_noncentral_chisquare[] = "noncentral_chisquare"; static const char __pyx_k_standard_exponential[] = "standard_exponential"; static const char __pyx_k_lam_value_too_large_2[] = "lam value too large."; -static const char __pyx_k_RandomState_f_line_1992[] = "RandomState.f (line 1992)"; +static const char __pyx_k_Seed_array_must_be_1_d[] = "Seed array must be 1-d"; +static const char __pyx_k_Seed_must_be_non_empty[] = "Seed must be non-empty"; +static const char __pyx_k_RandomState_f_line_1997[] = "RandomState.f (line 1997)"; static const char __pyx_k_a_must_be_1_dimensional[] = "a must be 1-dimensional"; static const char __pyx_k_p_must_be_1_dimensional[] = "p must be 1-dimensional"; static const char __pyx_k_state_must_be_624_longs[] = "state must be 624 longs"; static const char __pyx_k_a_must_be_greater_than_0[] = "a must be greater than 0"; static const char __pyx_k_algorithm_must_be_MT19937[] = "algorithm must be 'MT19937'"; -static const char __pyx_k_RandomState_bytes_line_999[] = "RandomState.bytes (line 999)"; -static const char __pyx_k_RandomState_rand_line_1316[] = "RandomState.rand (line 1316)"; -static const char __pyx_k_RandomState_wald_line_3505[] = "RandomState.wald (line 3505)"; -static const char __pyx_k_RandomState_zipf_line_3991[] = "RandomState.zipf (line 3991)"; +static const char __pyx_k_RandomState_rand_line_1321[] = "RandomState.rand (line 1321)"; +static const char __pyx_k_RandomState_wald_line_3516[] = "RandomState.wald (line 3516)"; +static const char __pyx_k_RandomState_zipf_line_4002[] = "RandomState.zipf (line 4002)"; static const char __pyx_k_Range_exceeds_valid_bounds[] = "Range exceeds valid bounds"; static const char __pyx_k_low_is_out_of_bounds_for_s[] = "low is out of bounds for %s"; static const char __pyx_k_mean_must_be_1_dimensional[] = "mean must be 1 dimensional"; -static const char __pyx_k_RandomState_gamma_line_1896[] = "RandomState.gamma (line 1896)"; -static const char __pyx_k_RandomState_power_line_2869[] = "RandomState.power (line 2869)"; -static const char __pyx_k_RandomState_randn_line_1360[] = "RandomState.randn (line 1360)"; +static const char __pyx_k_RandomState_bytes_line_1004[] = "RandomState.bytes (line 1004)"; +static const char __pyx_k_RandomState_gamma_line_1901[] = "RandomState.gamma (line 1901)"; +static const char __pyx_k_RandomState_power_line_2880[] = "RandomState.power (line 2880)"; +static const char __pyx_k_RandomState_randn_line_1365[] = "RandomState.randn (line 1365)"; static const char __pyx_k_a_and_p_must_have_same_size[] = "a and p must have same size"; +static const char __pyx_k_a_must_be_a_valid_float_1_0[] = "'a' must be a valid float > 1.0"; static const char __pyx_k_high_is_out_of_bounds_for_s[] = "high is out of bounds for %s"; -static const char __pyx_k_RandomState_choice_line_1028[] = "RandomState.choice (line 1028)"; -static const char __pyx_k_RandomState_gumbel_line_3078[] = "RandomState.gumbel (line 3078)"; -static const char __pyx_k_RandomState_normal_line_1547[] = "RandomState.normal (line 1547)"; -static const char __pyx_k_RandomState_pareto_line_2649[] = "RandomState.pareto (line 2649)"; -static const char __pyx_k_RandomState_randint_line_905[] = "RandomState.randint (line 905)"; -static const char __pyx_k_RandomState_laplace_line_2980[] = "RandomState.laplace (line 2980)"; -static const char __pyx_k_RandomState_poisson_line_3903[] = "RandomState.poisson (line 3903)"; -static const char __pyx_k_RandomState_shuffle_line_4759[] = "RandomState.shuffle (line 4759)"; -static const char __pyx_k_RandomState_tomaxint_line_858[] = "RandomState.tomaxint (line 858)"; -static const char __pyx_k_RandomState_uniform_line_1210[] = "RandomState.uniform (line 1210)"; -static const char __pyx_k_RandomState_weibull_line_2759[] = "RandomState.weibull (line 2759)"; +static const char __pyx_k_RandomState_choice_line_1033[] = "RandomState.choice (line 1033)"; +static const char __pyx_k_RandomState_gumbel_line_3089[] = "RandomState.gumbel (line 3089)"; +static const char __pyx_k_RandomState_normal_line_1552[] = "RandomState.normal (line 1552)"; +static const char __pyx_k_RandomState_pareto_line_2660[] = "RandomState.pareto (line 2660)"; +static const char __pyx_k_RandomState_randint_line_910[] = "RandomState.randint (line 910)"; +static const char __pyx_k_RandomState_laplace_line_2991[] = "RandomState.laplace (line 2991)"; +static const char __pyx_k_RandomState_poisson_line_3914[] = "RandomState.poisson (line 3914)"; +static const char __pyx_k_RandomState_shuffle_line_4779[] = "RandomState.shuffle (line 4779)"; +static const char __pyx_k_RandomState_tomaxint_line_863[] = "RandomState.tomaxint (line 863)"; +static const char __pyx_k_RandomState_uniform_line_1215[] = "RandomState.uniform (line 1215)"; +static const char __pyx_k_RandomState_weibull_line_2770[] = "RandomState.weibull (line 2770)"; static const char __pyx_k_probabilities_do_not_sum_to_1[] = "probabilities do not sum to 1"; -static const char __pyx_k_RandomState_binomial_line_3686[] = "RandomState.binomial (line 3686)"; -static const char __pyx_k_RandomState_logistic_line_3209[] = "RandomState.logistic (line 3209)"; -static const char __pyx_k_RandomState_rayleigh_line_3426[] = "RandomState.rayleigh (line 3426)"; -static const char __pyx_k_RandomState_vonmises_line_2551[] = "RandomState.vonmises (line 2551)"; -static const char __pyx_k_dirichlet_alpha_size_None_Draw[] = "\n dirichlet(alpha, size=None)\n\n Draw samples from the Dirichlet distribution.\n\n Draw `size` samples of dimension k from a Dirichlet distribution. A\n Dirichlet-distributed random variable can be seen as a multivariate\n generalization of a Beta distribution. Dirichlet pdf is the conjugate\n prior of a multinomial in Bayesian inference.\n\n Parameters\n ----------\n alpha : array\n Parameter of the distribution (k dimension for sample of\n dimension k).\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. Default is None, in which case a\n single value is returned.\n\n Returns\n -------\n samples : ndarray,\n The drawn samples, of shape (size, alpha.ndim).\n\n Notes\n -----\n .. math:: X \\approx \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i}\n\n Uses the following property for computation: for each dimension,\n draw a random sample y_i from a standard gamma generator of shape\n `alpha_i`, then\n :math:`X = \\frac{1}{\\sum_{i=1}^k{y_i}} (y_1, \\ldots, y_n)` is\n Dirichlet distributed.\n\n References\n ----------\n .. [1] David McKay, \"Information Theory, Inference and Learning\n Algorithms,\" chapter 23,\n http://www.inference.phy.cam.ac.uk/mackay/\n .. [2] Wikipedia, \"Dirichlet distribution\",\n http://en.wikipedia.org/wiki/Dirichlet_distribution\n\n Examples\n --------\n Taking an example cited in Wikipedia, this distribution can be used if\n one wanted to cut strings (each of initial length 1.0) into K pieces\n with different lengths, where each piece had, on average, a designated\n average length, but allowing some variation in the relative sizes of\n th""e pieces.\n\n >>> s = np.random.dirichlet((10, 5, 3), 20).transpose()\n\n >>> plt.barh(range(20), s[0])\n >>> plt.barh(range(20), s[1], left=s[0], color='g')\n >>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r')\n >>> plt.title(\"Lengths of Strings\")\n\n "; +static const char __pyx_k_RandomState_binomial_line_3697[] = "RandomState.binomial (line 3697)"; +static const char __pyx_k_RandomState_logistic_line_3220[] = "RandomState.logistic (line 3220)"; +static const char __pyx_k_RandomState_rayleigh_line_3437[] = "RandomState.rayleigh (line 3437)"; +static const char __pyx_k_RandomState_vonmises_line_2562[] = "RandomState.vonmises (line 2562)"; +static const char __pyx_k_dirichlet_alpha_size_None_Draw[] = "\n dirichlet(alpha, size=None)\n\n Draw samples from the Dirichlet distribution.\n\n Draw `size` samples of dimension k from a Dirichlet distribution. A\n Dirichlet-distributed random variable can be seen as a multivariate\n generalization of a Beta distribution. Dirichlet pdf is the conjugate\n prior of a multinomial in Bayesian inference.\n\n Parameters\n ----------\n alpha : array\n Parameter of the distribution (k dimension for sample of\n dimension k).\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. Default is None, in which case a\n single value is returned.\n\n Returns\n -------\n samples : ndarray,\n The drawn samples, of shape (size, alpha.ndim).\n\n Raises\n -------\n ValueError\n If any value in alpha is less than or equal to zero\n\n Notes\n -----\n .. math:: X \\approx \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i}\n\n Uses the following property for computation: for each dimension,\n draw a random sample y_i from a standard gamma generator of shape\n `alpha_i`, then\n :math:`X = \\frac{1}{\\sum_{i=1}^k{y_i}} (y_1, \\ldots, y_n)` is\n Dirichlet distributed.\n\n References\n ----------\n .. [1] David McKay, \"Information Theory, Inference and Learning\n Algorithms,\" chapter 23,\n http://www.inference.phy.cam.ac.uk/mackay/\n .. [2] Wikipedia, \"Dirichlet distribution\",\n http://en.wikipedia.org/wiki/Dirichlet_distribution\n\n Examples\n --------\n Taking an example cited in Wikipedia, this distribution can be used if\n one wanted to cut strings (each of initial length 1.0) into K pieces\n with different lengths, where each piece"" had, on average, a designated\n average length, but allowing some variation in the relative sizes of\n the pieces.\n\n >>> s = np.random.dirichlet((10, 5, 3), 20).transpose()\n\n >>> plt.barh(range(20), s[0])\n >>> plt.barh(range(20), s[1], left=s[0], color='g')\n >>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r')\n >>> plt.title(\"Lengths of Strings\")\n\n "; static const char __pyx_k_laplace_loc_0_0_scale_1_0_size[] = "\n laplace(loc=0.0, scale=1.0, size=None)\n\n Draw samples from the Laplace or double exponential distribution with\n specified location (or mean) and scale (decay).\n\n The Laplace distribution is similar to the Gaussian/normal distribution,\n but is sharper at the peak and has fatter tails. It represents the\n difference between two independent, identically distributed exponential\n random variables.\n\n Parameters\n ----------\n loc : float or array_like of floats, optional\n The position, :math:`\\mu`, of the distribution peak. Default is 0.\n scale : float or array_like of floats, optional\n :math:`\\lambda`, the exponential decay. Default is 1.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``loc`` and ``scale`` are both scalars.\n Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized Laplace distribution.\n\n Notes\n -----\n It has the probability density function\n\n .. math:: f(x; \\mu, \\lambda) = \\frac{1}{2\\lambda}\n \\exp\\left(-\\frac{|x - \\mu|}{\\lambda}\\right).\n\n The first law of Laplace, from 1774, states that the frequency\n of an error can be expressed as an exponential function of the\n absolute magnitude of the error, which leads to the Laplace\n distribution. For many problems in economics and health\n sciences, this distribution seems to model the data better\n than the standard Gaussian distribution.\n\n References\n ----------\n .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). \"Han""dbook of\n Mathematical Functions with Formulas, Graphs, and Mathematical\n Tables, 9th printing,\" New York: Dover, 1972.\n .. [2] Kotz, Samuel, et. al. \"The Laplace Distribution and\n Generalizations, \" Birkhauser, 2001.\n .. [3] Weisstein, Eric W. \"Laplace Distribution.\"\n From MathWorld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/LaplaceDistribution.html\n .. [4] Wikipedia, \"Laplace distribution\",\n http://en.wikipedia.org/wiki/Laplace_distribution\n\n Examples\n --------\n Draw samples from the distribution\n\n >>> loc, scale = 0., 1.\n >>> s = np.random.laplace(loc, scale, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 30, normed=True)\n >>> x = np.arange(-8., 8., .01)\n >>> pdf = np.exp(-abs(x-loc)/scale)/(2.*scale)\n >>> plt.plot(x, pdf)\n\n Plot Gaussian for comparison:\n\n >>> g = (1/(scale * np.sqrt(2 * np.pi)) *\n ... np.exp(-(x - loc)**2 / (2 * scale**2)))\n >>> plt.plot(x,g)\n\n "; static const char __pyx_k_permutation_x_Randomly_permute[] = "\n permutation(x)\n\n Randomly permute a sequence, or return a permuted range.\n\n If `x` is a multi-dimensional array, it is only shuffled along its\n first index.\n\n Parameters\n ----------\n x : int or array_like\n If `x` is an integer, randomly permute ``np.arange(x)``.\n If `x` is an array, make a copy and shuffle the elements\n randomly.\n\n Returns\n -------\n out : ndarray\n Permuted sequence or array range.\n\n Examples\n --------\n >>> np.random.permutation(10)\n array([1, 7, 4, 3, 0, 9, 2, 5, 8, 6])\n\n >>> np.random.permutation([1, 4, 9, 12, 15])\n array([15, 1, 9, 4, 12])\n\n >>> arr = np.arange(9).reshape((3, 3))\n >>> np.random.permutation(arr)\n array([[6, 7, 8],\n [0, 1, 2],\n [3, 4, 5]])\n\n "; static const char __pyx_k_poisson_lam_1_0_size_None_Draw[] = "\n poisson(lam=1.0, size=None)\n\n Draw samples from a Poisson distribution.\n\n The Poisson distribution is the limit of the binomial distribution\n for large N.\n\n Parameters\n ----------\n lam : float or array_like of floats\n Expectation of interval, should be >= 0. A sequence of expectation\n intervals must be broadcastable over the requested size.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``lam`` is a scalar. Otherwise,\n ``np.array(lam).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized Poisson distribution.\n\n Notes\n -----\n The Poisson distribution\n\n .. math:: f(k; \\lambda)=\\frac{\\lambda^k e^{-\\lambda}}{k!}\n\n For events with an expected separation :math:`\\lambda` the Poisson\n distribution :math:`f(k; \\lambda)` describes the probability of\n :math:`k` events occurring within the observed\n interval :math:`\\lambda`.\n\n Because the output is limited to the range of the C long type, a\n ValueError is raised when `lam` is within 10 sigma of the maximum\n representable value.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Poisson Distribution.\"\n From MathWorld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/PoissonDistribution.html\n .. [2] Wikipedia, \"Poisson distribution\",\n http://en.wikipedia.org/wiki/Poisson_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> import numpy as np\n >>> s = np.random.poisson(5, 10000)\n\n Display histo""gram of the sample:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 14, normed=True)\n >>> plt.show()\n\n Draw each 100 values for lambda 100 and 500:\n\n >>> s = np.random.poisson(lam=(100., 500.), size=(100, 2))\n\n "; @@ -1666,21 +1786,22 @@ static const char __pyx_k_standard_gamma_shape_size_None[] = "\n standard_gamma(shape, size=None)\n\n Draw samples from a standard Gamma distribution.\n\n Samples are drawn from a Gamma distribution with specified parameters,\n shape (sometimes designated \"k\") and scale=1.\n\n Parameters\n ----------\n shape : float or array_like of floats\n Parameter, should be > 0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``shape`` is a scalar. Otherwise,\n ``np.array(shape).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized standard gamma distribution.\n\n See Also\n --------\n scipy.stats.gamma : probability density function, distribution or\n cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Gamma distribution is\n\n .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},\n\n where :math:`k` is the shape and :math:`\\theta` the scale,\n and :math:`\\Gamma` is the Gamma function.\n\n The Gamma distribution is often used to model the times to failure of\n electronic components, and arises naturally in processes for which the\n waiting times between Poisson distributed events are relevant.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Gamma Distribution.\" From MathWorld--A\n Wolfram Web Resource.\n http://mathworld.wolfram.com/GammaDistribution.html\n .. [2] Wikipedia, \"Gamma distribution\",\n http://en.wikipedia.org/wiki/Gamma_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> shape, scale = 2""., 1. # mean and width\n >>> s = np.random.standard_gamma(shape, 1000000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> import scipy.special as sps\n >>> count, bins, ignored = plt.hist(s, 50, normed=True)\n >>> y = bins**(shape-1) * ((np.exp(-bins/scale))/ \\\n ... (sps.gamma(shape) * scale**shape))\n >>> plt.plot(bins, y, linewidth=2, color='r')\n >>> plt.show()\n\n "; static const char __pyx_k_standard_normal_size_None_Draw[] = "\n standard_normal(size=None)\n\n Draw samples from a standard Normal distribution (mean=0, stdev=1).\n\n Parameters\n ----------\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. Default is None, in which case a\n single value is returned.\n\n Returns\n -------\n out : float or ndarray\n Drawn samples.\n\n Examples\n --------\n >>> s = np.random.standard_normal(8000)\n >>> s\n array([ 0.6888893 , 0.78096262, -0.89086505, ..., 0.49876311, #random\n -0.38672696, -0.4685006 ]) #random\n >>> s.shape\n (8000,)\n >>> s = np.random.standard_normal(size=(3, 4, 2))\n >>> s.shape\n (3, 4, 2)\n\n "; static const char __pyx_k_wald_mean_scale_size_None_Draw[] = "\n wald(mean, scale, size=None)\n\n Draw samples from a Wald, or inverse Gaussian, distribution.\n\n As the scale approaches infinity, the distribution becomes more like a\n Gaussian. Some references claim that the Wald is an inverse Gaussian\n with mean equal to 1, but this is by no means universal.\n\n The inverse Gaussian distribution was first studied in relationship to\n Brownian motion. In 1956 M.C.K. Tweedie used the name inverse Gaussian\n because there is an inverse relationship between the time to cover a\n unit distance and distance covered in unit time.\n\n Parameters\n ----------\n mean : float or array_like of floats\n Distribution mean, should be > 0.\n scale : float or array_like of floats\n Scale parameter, should be >= 0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``mean`` and ``scale`` are both scalars.\n Otherwise, ``np.broadcast(mean, scale).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized Wald distribution.\n\n Notes\n -----\n The probability density function for the Wald distribution is\n\n .. math:: P(x;mean,scale) = \\sqrt{\\frac{scale}{2\\pi x^3}}e^\n \\frac{-scale(x-mean)^2}{2\\cdotp mean^2x}\n\n As noted above the inverse Gaussian distribution first arise\n from attempts to model Brownian motion. It is also a\n competitor to the Weibull for use in reliability modeling and\n modeling stock returns and interest rate processes.\n\n References\n ----------\n .. [1] Brighton Webs Ltd., Wald Distribution,\n "" http://www.brighton-webs.co.uk/distributions/wald.asp\n .. [2] Chhikara, Raj S., and Folks, J. Leroy, \"The Inverse Gaussian\n Distribution: Theory : Methodology, and Applications\", CRC Press,\n 1988.\n .. [3] Wikipedia, \"Wald distribution\"\n http://en.wikipedia.org/wiki/Wald_distribution\n\n Examples\n --------\n Draw values from the distribution and plot the histogram:\n\n >>> import matplotlib.pyplot as plt\n >>> h = plt.hist(np.random.wald(3, 2, 100000), bins=200, normed=True)\n >>> plt.show()\n\n "; -static const char __pyx_k_RandomState_chisquare_line_2196[] = "RandomState.chisquare (line 2196)"; -static const char __pyx_k_RandomState_dirichlet_line_4643[] = "RandomState.dirichlet (line 4643)"; -static const char __pyx_k_RandomState_geometric_line_4082[] = "RandomState.geometric (line 4082)"; -static const char __pyx_k_RandomState_hypergeometric_line[] = "RandomState.hypergeometric (line 4150)"; -static const char __pyx_k_RandomState_lognormal_line_3302[] = "RandomState.lognormal (line 3302)"; -static const char __pyx_k_RandomState_logseries_line_4272[] = "RandomState.logseries (line 4272)"; -static const char __pyx_k_RandomState_multivariate_normal[] = "RandomState.multivariate_normal (line 4369)"; -static const char __pyx_k_RandomState_standard_gamma_line[] = "RandomState.standard_gamma (line 1810)"; +static const char __pyx_k_RandomState_chisquare_line_2205[] = "RandomState.chisquare (line 2205)"; +static const char __pyx_k_RandomState_dirichlet_line_4656[] = "RandomState.dirichlet (line 4656)"; +static const char __pyx_k_RandomState_geometric_line_4095[] = "RandomState.geometric (line 4095)"; +static const char __pyx_k_RandomState_hypergeometric_line[] = "RandomState.hypergeometric (line 4163)"; +static const char __pyx_k_RandomState_lognormal_line_3313[] = "RandomState.lognormal (line 3313)"; +static const char __pyx_k_RandomState_logseries_line_4285[] = "RandomState.logseries (line 4285)"; +static const char __pyx_k_RandomState_multivariate_normal[] = "RandomState.multivariate_normal (line 4382)"; +static const char __pyx_k_RandomState_standard_gamma_line[] = "RandomState.standard_gamma (line 1815)"; static const char __pyx_k_Seed_must_be_between_0_and_2_32[] = "Seed must be between 0 and 2**32 - 1"; static const char __pyx_k_Unsupported_dtype_s_for_randint[] = "Unsupported dtype \"%s\" for randint"; +static const char __pyx_k_a_must_contain_valid_floats_1_0[] = "'a' must contain valid floats > 1.0"; static const char __pyx_k_binomial_n_p_size_None_Draw_sam[] = "\n binomial(n, p, size=None)\n\n Draw samples from a binomial distribution.\n\n Samples are drawn from a binomial distribution with specified\n parameters, n trials and p probability of success where\n n an integer >= 0 and p is in the interval [0,1]. (n may be\n input as a float, but it is truncated to an integer in use)\n\n Parameters\n ----------\n n : int or array_like of ints\n Parameter of the distribution, >= 0. Floats are also accepted,\n but they will be truncated to integers.\n p : float or array_like of floats\n Parameter of the distribution, >= 0 and <=1.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``n`` and ``p`` are both scalars.\n Otherwise, ``np.broadcast(n, p).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized binomial distribution, where\n each sample is equal to the number of successes over the n trials.\n\n See Also\n --------\n scipy.stats.binom : probability density function, distribution or\n cumulative density function, etc.\n\n Notes\n -----\n The probability density for the binomial distribution is\n\n .. math:: P(N) = \\binom{n}{N}p^N(1-p)^{n-N},\n\n where :math:`n` is the number of trials, :math:`p` is the probability\n of success, and :math:`N` is the number of successes.\n\n When estimating the standard error of a proportion in a population by\n using a random sample, the normal distribution works well unless the\n product p*n <=5, where p = population proportion estimate, and n =\n number of samples, in which case the binom""ial distribution is used\n instead. For example, a sample of 15 people shows 4 who are left\n handed, and 11 who are right handed. Then p = 4/15 = 27%. 0.27*15 = 4,\n so the binomial distribution should be used in this case.\n\n References\n ----------\n .. [1] Dalgaard, Peter, \"Introductory Statistics with R\",\n Springer-Verlag, 2002.\n .. [2] Glantz, Stanton A. \"Primer of Biostatistics.\", McGraw-Hill,\n Fifth Edition, 2002.\n .. [3] Lentner, Marvin, \"Elementary Applied Statistics\", Bogden\n and Quigley, 1972.\n .. [4] Weisstein, Eric W. \"Binomial Distribution.\" From MathWorld--A\n Wolfram Web Resource.\n http://mathworld.wolfram.com/BinomialDistribution.html\n .. [5] Wikipedia, \"Binomial distribution\",\n http://en.wikipedia.org/wiki/Binomial_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> n, p = 10, .5 # number of trials, probability of each trial\n >>> s = np.random.binomial(n, p, 1000)\n # result of flipping a coin 10 times, tested 1000 times.\n\n A real world example. A company drills 9 wild-cat oil exploration\n wells, each with an estimated probability of success of 0.1. All nine\n wells fail. What is the probability of that happening?\n\n Let's do 20,000 trials of the model, and count the number that\n generate zero positive results.\n\n >>> sum(np.random.binomial(9, 0.1, 20000) == 0)/20000.\n # answer = 0.38885, or 38%.\n\n "; static const char __pyx_k_bytes_length_Return_random_byte[] = "\n bytes(length)\n\n Return random bytes.\n\n Parameters\n ----------\n length : int\n Number of random bytes.\n\n Returns\n -------\n out : str\n String of length `length`.\n\n Examples\n --------\n >>> np.random.bytes(10)\n ' eh\\x85\\x022SZ\\xbf\\xa4' #random\n\n "; -static const char __pyx_k_chisquare_df_size_None_Draw_sam[] = "\n chisquare(df, size=None)\n\n Draw samples from a chi-square distribution.\n\n When `df` independent random variables, each with standard normal\n distributions (mean 0, variance 1), are squared and summed, the\n resulting distribution is chi-square (see Notes). This distribution\n is often used in hypothesis testing.\n\n Parameters\n ----------\n df : int or array_like of ints\n Number of degrees of freedom.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``df`` is a scalar. Otherwise,\n ``np.array(df).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized chi-square distribution.\n\n Raises\n ------\n ValueError\n When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)\n is given.\n\n Notes\n -----\n The variable obtained by summing the squares of `df` independent,\n standard normally distributed random variables:\n\n .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i\n\n is chi-square distributed, denoted\n\n .. math:: Q \\sim \\chi^2_k.\n\n The probability density function of the chi-squared distribution is\n\n .. math:: p(x) = \\frac{(1/2)^{k/2}}{\\Gamma(k/2)}\n x^{k/2 - 1} e^{-x/2},\n\n where :math:`\\Gamma` is the gamma function,\n\n .. math:: \\Gamma(x) = \\int_0^{-\\infty} t^{x - 1} e^{-t} dt.\n\n References\n ----------\n .. [1] NIST \"Engineering Statistics Handbook\"\n http://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n\n Examples\n --------\n >>> np.random.chisquare(""2,4)\n array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272])\n\n "; +static const char __pyx_k_chisquare_df_size_None_Draw_sam[] = "\n chisquare(df, size=None)\n\n Draw samples from a chi-square distribution.\n\n When `df` independent random variables, each with standard normal\n distributions (mean 0, variance 1), are squared and summed, the\n resulting distribution is chi-square (see Notes). This distribution\n is often used in hypothesis testing.\n\n Parameters\n ----------\n df : float or array_like of floats\n Number of degrees of freedom, should be > 0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``df`` is a scalar. Otherwise,\n ``np.array(df).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized chi-square distribution.\n\n Raises\n ------\n ValueError\n When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)\n is given.\n\n Notes\n -----\n The variable obtained by summing the squares of `df` independent,\n standard normally distributed random variables:\n\n .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i\n\n is chi-square distributed, denoted\n\n .. math:: Q \\sim \\chi^2_k.\n\n The probability density function of the chi-squared distribution is\n\n .. math:: p(x) = \\frac{(1/2)^{k/2}}{\\Gamma(k/2)}\n x^{k/2 - 1} e^{-x/2},\n\n where :math:`\\Gamma` is the gamma function,\n\n .. math:: \\Gamma(x) = \\int_0^{-\\infty} t^{x - 1} e^{-t} dt.\n\n References\n ----------\n .. [1] NIST \"Engineering Statistics Handbook\"\n http://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n\n Examples\n --------\n >>> n""p.random.chisquare(2,4)\n array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272])\n\n "; static const char __pyx_k_choice_a_size_None_replace_True[] = "\n choice(a, size=None, replace=True, p=None)\n\n Generates a random sample from a given 1-D array\n\n .. versionadded:: 1.7.0\n\n Parameters\n -----------\n a : 1-D array-like or int\n If an ndarray, a random sample is generated from its elements.\n If an int, the random sample is generated as if a were np.arange(a)\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. Default is None, in which case a\n single value is returned.\n replace : boolean, optional\n Whether the sample is with or without replacement\n p : 1-D array-like, optional\n The probabilities associated with each entry in a.\n If not given the sample assumes a uniform distribution over all\n entries in a.\n\n Returns\n --------\n samples : single item or ndarray\n The generated random samples\n\n Raises\n -------\n ValueError\n If a is an int and less than zero, if a or p are not 1-dimensional,\n if a is an array-like of size 0, if p is not a vector of\n probabilities, if a and p have different lengths, or if\n replace=False and the sample size is greater than the population\n size\n\n See Also\n ---------\n randint, shuffle, permutation\n\n Examples\n ---------\n Generate a uniform random sample from np.arange(5) of size 3:\n\n >>> np.random.choice(5, 3)\n array([0, 3, 4])\n >>> #This is equivalent to np.random.randint(0,5,3)\n\n Generate a non-uniform random sample from np.arange(5) of size 3:\n\n >>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0])\n array([3, 3, 0])\n\n Generate a uniform random sample from np.arange(5) of size 3 without\n ""replacement:\n\n >>> np.random.choice(5, 3, replace=False)\n array([3,1,0])\n >>> #This is equivalent to np.random.permutation(np.arange(5))[:3]\n\n Generate a non-uniform random sample from np.arange(5) of size\n 3 without replacement:\n\n >>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])\n array([2, 3, 0])\n\n Any of the above can be repeated with an arbitrary array-like\n instead of just integers. For instance:\n\n >>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']\n >>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])\n array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],\n dtype='|S11')\n\n "; -static const char __pyx_k_f_dfnum_dfden_size_None_Draw_sa[] = "\n f(dfnum, dfden, size=None)\n\n Draw samples from an F distribution.\n\n Samples are drawn from an F distribution with specified parameters,\n `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of\n freedom in denominator), where both parameters should be greater than\n zero.\n\n The random variate of the F distribution (also known as the\n Fisher distribution) is a continuous probability distribution\n that arises in ANOVA tests, and is the ratio of two chi-square\n variates.\n\n Parameters\n ----------\n dfnum : int or array_like of ints\n Degrees of freedom in numerator. Should be greater than zero.\n dfden : int or array_like of ints\n Degrees of freedom in denominator. Should be greater than zero.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``dfnum`` and ``dfden`` are both scalars.\n Otherwise, ``np.broadcast(dfnum, dfden).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized Fisher distribution.\n\n See Also\n --------\n scipy.stats.f : probability density function, distribution or\n cumulative density function, etc.\n\n Notes\n -----\n The F statistic is used to compare in-group variances to between-group\n variances. Calculating the distribution depends on the sampling, and\n so it is a function of the respective degrees of freedom in the\n problem. The variable `dfnum` is the number of samples minus one, the\n between-groups degrees of freedom, while `dfden` is the within-groups\n degrees of freedom, the sum of the number of samples in each ""group\n minus the number of groups.\n\n References\n ----------\n .. [1] Glantz, Stanton A. \"Primer of Biostatistics.\", McGraw-Hill,\n Fifth Edition, 2002.\n .. [2] Wikipedia, \"F-distribution\",\n http://en.wikipedia.org/wiki/F-distribution\n\n Examples\n --------\n An example from Glantz[1], pp 47-40:\n\n Two groups, children of diabetics (25 people) and children from people\n without diabetes (25 controls). Fasting blood glucose was measured,\n case group had a mean value of 86.1, controls had a mean value of\n 82.2. Standard deviations were 2.09 and 2.49 respectively. Are these\n data consistent with the null hypothesis that the parents diabetic\n status does not affect their children's blood glucose levels?\n Calculating the F statistic from the data gives a value of 36.01.\n\n Draw samples from the distribution:\n\n >>> dfnum = 1. # between group degrees of freedom\n >>> dfden = 48. # within groups degrees of freedom\n >>> s = np.random.f(dfnum, dfden, 1000)\n\n The lower bound for the top 1% of the samples is :\n\n >>> sort(s)[-10]\n 7.61988120985\n\n So there is about a 1% chance that the F statistic will exceed 7.62,\n the measured value is 36, so the null hypothesis is rejected at the 1%\n level.\n\n "; +static const char __pyx_k_f_dfnum_dfden_size_None_Draw_sa[] = "\n f(dfnum, dfden, size=None)\n\n Draw samples from an F distribution.\n\n Samples are drawn from an F distribution with specified parameters,\n `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of\n freedom in denominator), where both parameters should be greater than\n zero.\n\n The random variate of the F distribution (also known as the\n Fisher distribution) is a continuous probability distribution\n that arises in ANOVA tests, and is the ratio of two chi-square\n variates.\n\n Parameters\n ----------\n dfnum : float or array_like of floats\n Degrees of freedom in numerator, should be > 0.\n dfden : float or array_like of float\n Degrees of freedom in denominator, should be > 0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``dfnum`` and ``dfden`` are both scalars.\n Otherwise, ``np.broadcast(dfnum, dfden).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized Fisher distribution.\n\n See Also\n --------\n scipy.stats.f : probability density function, distribution or\n cumulative density function, etc.\n\n Notes\n -----\n The F statistic is used to compare in-group variances to between-group\n variances. Calculating the distribution depends on the sampling, and\n so it is a function of the respective degrees of freedom in the\n problem. The variable `dfnum` is the number of samples minus one, the\n between-groups degrees of freedom, while `dfden` is the within-groups\n degrees of freedom, the sum of the number of samples in each group\n minus ""the number of groups.\n\n References\n ----------\n .. [1] Glantz, Stanton A. \"Primer of Biostatistics.\", McGraw-Hill,\n Fifth Edition, 2002.\n .. [2] Wikipedia, \"F-distribution\",\n http://en.wikipedia.org/wiki/F-distribution\n\n Examples\n --------\n An example from Glantz[1], pp 47-40:\n\n Two groups, children of diabetics (25 people) and children from people\n without diabetes (25 controls). Fasting blood glucose was measured,\n case group had a mean value of 86.1, controls had a mean value of\n 82.2. Standard deviations were 2.09 and 2.49 respectively. Are these\n data consistent with the null hypothesis that the parents diabetic\n status does not affect their children's blood glucose levels?\n Calculating the F statistic from the data gives a value of 36.01.\n\n Draw samples from the distribution:\n\n >>> dfnum = 1. # between group degrees of freedom\n >>> dfden = 48. # within groups degrees of freedom\n >>> s = np.random.f(dfnum, dfden, 1000)\n\n The lower bound for the top 1% of the samples is :\n\n >>> sort(s)[-10]\n 7.61988120985\n\n So there is about a 1% chance that the F statistic will exceed 7.62,\n the measured value is 36, so the null hypothesis is rejected at the 1%\n level.\n\n "; static const char __pyx_k_gamma_shape_scale_1_0_size_None[] = "\n gamma(shape, scale=1.0, size=None)\n\n Draw samples from a Gamma distribution.\n\n Samples are drawn from a Gamma distribution with specified parameters,\n `shape` (sometimes designated \"k\") and `scale` (sometimes designated\n \"theta\"), where both parameters are > 0.\n\n Parameters\n ----------\n shape : float or array_like of floats\n The shape of the gamma distribution. Should be greater than zero.\n scale : float or array_like of floats, optional\n The scale of the gamma distribution. Should be greater than zero.\n Default is equal to 1.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``shape`` and ``scale`` are both scalars.\n Otherwise, ``np.broadcast(shape, scale).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized gamma distribution.\n\n See Also\n --------\n scipy.stats.gamma : probability density function, distribution or\n cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Gamma distribution is\n\n .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},\n\n where :math:`k` is the shape and :math:`\\theta` the scale,\n and :math:`\\Gamma` is the Gamma function.\n\n The Gamma distribution is often used to model the times to failure of\n electronic components, and arises naturally in processes for which the\n waiting times between Poisson distributed events are relevant.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Gamma Distribution.\" From MathWorld--A\n Wolfram Web Resourc""e.\n http://mathworld.wolfram.com/GammaDistribution.html\n .. [2] Wikipedia, \"Gamma distribution\",\n http://en.wikipedia.org/wiki/Gamma_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> shape, scale = 2., 2. # mean=4, std=2*sqrt(2)\n >>> s = np.random.gamma(shape, scale, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> import scipy.special as sps\n >>> count, bins, ignored = plt.hist(s, 50, normed=True)\n >>> y = bins**(shape-1)*(np.exp(-bins/scale) /\n ... (sps.gamma(shape)*scale**shape))\n >>> plt.plot(bins, y, linewidth=2, color='r')\n >>> plt.show()\n\n "; static const char __pyx_k_geometric_p_size_None_Draw_samp[] = "\n geometric(p, size=None)\n\n Draw samples from the geometric distribution.\n\n Bernoulli trials are experiments with one of two outcomes:\n success or failure (an example of such an experiment is flipping\n a coin). The geometric distribution models the number of trials\n that must be run in order to achieve success. It is therefore\n supported on the positive integers, ``k = 1, 2, ...``.\n\n The probability mass function of the geometric distribution is\n\n .. math:: f(k) = (1 - p)^{k - 1} p\n\n where `p` is the probability of success of an individual trial.\n\n Parameters\n ----------\n p : float or array_like of floats\n The probability of success of an individual trial.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``p`` is a scalar. Otherwise,\n ``np.array(p).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized geometric distribution.\n\n Examples\n --------\n Draw ten thousand values from the geometric distribution,\n with the probability of an individual success equal to 0.35:\n\n >>> z = np.random.geometric(p=0.35, size=10000)\n\n How many trials succeeded after a single run?\n\n >>> (z == 1).sum() / 10000.\n 0.34889999999999999 #random\n\n "; static const char __pyx_k_gumbel_loc_0_0_scale_1_0_size_N[] = "\n gumbel(loc=0.0, scale=1.0, size=None)\n\n Draw samples from a Gumbel distribution.\n\n Draw samples from a Gumbel distribution with specified location and\n scale. For more information on the Gumbel distribution, see\n Notes and References below.\n\n Parameters\n ----------\n loc : float or array_like of floats, optional\n The location of the mode of the distribution. Default is 0.\n scale : float or array_like of floats, optional\n The scale parameter of the distribution. Default is 1.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``loc`` and ``scale`` are both scalars.\n Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized Gumbel distribution.\n\n See Also\n --------\n scipy.stats.gumbel_l\n scipy.stats.gumbel_r\n scipy.stats.genextreme\n weibull\n\n Notes\n -----\n The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme\n Value Type I) distribution is one of a class of Generalized Extreme\n Value (GEV) distributions used in modeling extreme value problems.\n The Gumbel is a special case of the Extreme Value Type I distribution\n for maximums from distributions with \"exponential-like\" tails.\n\n The probability density for the Gumbel distribution is\n\n .. math:: p(x) = \\frac{e^{-(x - \\mu)/ \\beta}}{\\beta} e^{ -e^{-(x - \\mu)/\n \\beta}},\n\n where :math:`\\mu` is the mode, a location parameter, and\n :math:`\\beta` is the scale parameter.\n\n The Gumbel (named for German mathematician ""Emil Julius Gumbel) was used\n very early in the hydrology literature, for modeling the occurrence of\n flood events. It is also used for modeling maximum wind speed and\n rainfall rates. It is a \"fat-tailed\" distribution - the probability of\n an event in the tail of the distribution is larger than if one used a\n Gaussian, hence the surprisingly frequent occurrence of 100-year\n floods. Floods were initially modeled as a Gaussian process, which\n underestimated the frequency of extreme events.\n\n It is one of a class of extreme value distributions, the Generalized\n Extreme Value (GEV) distributions, which also includes the Weibull and\n Frechet.\n\n The function has a mean of :math:`\\mu + 0.57721\\beta` and a variance\n of :math:`\\frac{\\pi^2}{6}\\beta^2`.\n\n References\n ----------\n .. [1] Gumbel, E. J., \"Statistics of Extremes,\"\n New York: Columbia University Press, 1958.\n .. [2] Reiss, R.-D. and Thomas, M., \"Statistical Analysis of Extreme\n Values from Insurance, Finance, Hydrology and Other Fields,\"\n Basel: Birkhauser Verlag, 2001.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> mu, beta = 0, 0.1 # location and scale\n >>> s = np.random.gumbel(mu, beta, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 30, normed=True)\n >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)\n ... * np.exp( -np.exp( -(bins - mu) /beta) ),\n ... linewidth=2, color='r')\n >>> plt.show()\n\n Show how an extreme value distribution can arise from a Gaussian process\n and compare to a Gaussian:\n\n >>> means = []\n >>> maxima = []\n "" >>> for i in range(0,1000) :\n ... a = np.random.normal(mu, beta, 1000)\n ... means.append(a.mean())\n ... maxima.append(a.max())\n >>> count, bins, ignored = plt.hist(maxima, 30, normed=True)\n >>> beta = np.std(maxima) * np.sqrt(6) / np.pi\n >>> mu = np.mean(maxima) - 0.57721*beta\n >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)\n ... * np.exp(-np.exp(-(bins - mu)/beta)),\n ... linewidth=2, color='r')\n >>> plt.plot(bins, 1/(beta * np.sqrt(2 * np.pi))\n ... * np.exp(-(bins - mu)**2 / (2 * beta**2)),\n ... linewidth=2, color='g')\n >>> plt.show()\n\n "; @@ -1691,8 +1812,8 @@ static const char __pyx_k_multinomial_n_pvals_size_None_D[] = "\n multinomial(n, pvals, size=None)\n\n Draw samples from a multinomial distribution.\n\n The multinomial distribution is a multivariate generalisation of the\n binomial distribution. Take an experiment with one of ``p``\n possible outcomes. An example of such an experiment is throwing a dice,\n where the outcome can be 1 through 6. Each sample drawn from the\n distribution represents `n` such experiments. Its values,\n ``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the\n outcome was ``i``.\n\n Parameters\n ----------\n n : int\n Number of experiments.\n pvals : sequence of floats, length p\n Probabilities of each of the ``p`` different outcomes. These\n should sum to 1 (however, the last element is always assumed to\n account for the remaining probability, as long as\n ``sum(pvals[:-1]) <= 1)``.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. Default is None, in which case a\n single value is returned.\n\n Returns\n -------\n out : ndarray\n The drawn samples, of shape *size*, if that was provided. If not,\n the shape is ``(N,)``.\n\n In other words, each entry ``out[i,j,...,:]`` is an N-dimensional\n value drawn from the distribution.\n\n Examples\n --------\n Throw a dice 20 times:\n\n >>> np.random.multinomial(20, [1/6.]*6, size=1)\n array([[4, 1, 7, 5, 2, 1]])\n\n It landed 4 times on 1, once on 2, etc.\n\n Now, throw the dice 20 times, and 20 times again:\n\n >>> np.random.multinomial(20, [1/6.]*6, size=2)\n array([[3, 4, 3, 3, 4, 3],\n [2, 4, 3, 4, 0, 7]])\n\n For the first run, we threw 3 times 1, 4 times 2, etc. Fo""r the second,\n we threw 2 times 1, 4 times 2, etc.\n\n A loaded die is more likely to land on number 6:\n\n >>> np.random.multinomial(100, [1/7.]*5 + [2/7.])\n array([11, 16, 14, 17, 16, 26])\n\n The probability inputs should be normalized. As an implementation\n detail, the value of the last entry is ignored and assumed to take\n up any leftover probability mass, but this should not be relied on.\n A biased coin which has twice as much weight on one side as on the\n other should be sampled like so:\n\n >>> np.random.multinomial(100, [1.0 / 3, 2.0 / 3]) # RIGHT\n array([38, 62])\n\n not like:\n\n >>> np.random.multinomial(100, [1.0, 2.0]) # WRONG\n array([100, 0])\n\n "; static const char __pyx_k_multivariate_normal_mean_cov_si[] = "\n multivariate_normal(mean, cov[, size, check_valid, tol])\n\n Draw random samples from a multivariate normal distribution.\n\n The multivariate normal, multinormal or Gaussian distribution is a\n generalization of the one-dimensional normal distribution to higher\n dimensions. Such a distribution is specified by its mean and\n covariance matrix. These parameters are analogous to the mean\n (average or \"center\") and variance (standard deviation, or \"width,\"\n squared) of the one-dimensional normal distribution.\n\n Parameters\n ----------\n mean : 1-D array_like, of length N\n Mean of the N-dimensional distribution.\n cov : 2-D array_like, of shape (N, N)\n Covariance matrix of the distribution. It must be symmetric and\n positive-semidefinite for proper sampling.\n size : int or tuple of ints, optional\n Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are\n generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because\n each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``.\n If no shape is specified, a single (`N`-D) sample is returned.\n check_valid : { 'warn', 'raise', 'ignore' }, optional\n Behavior when the covariance matrix is not positive semidefinite.\n tol : float, optional\n Tolerance when checking the singular values in covariance matrix.\n\n Returns\n -------\n out : ndarray\n The drawn samples, of shape *size*, if that was provided. If not,\n the shape is ``(N,)``.\n\n In other words, each entry ``out[i,j,...,:]`` is an N-dimensional\n value drawn from the distribution.\n\n Notes\n -----\n The mean is a coordinate in N-dimensional space, which represents the\n location where samples are most likely to be generated. This ""is\n analogous to the peak of the bell curve for the one-dimensional or\n univariate normal distribution.\n\n Covariance indicates the level to which two variables vary together.\n From the multivariate normal distribution, we draw N-dimensional\n samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix\n element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`.\n The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its\n \"spread\").\n\n Instead of specifying the full covariance matrix, popular\n approximations include:\n\n - Spherical covariance (`cov` is a multiple of the identity matrix)\n - Diagonal covariance (`cov` has non-negative elements, and only on\n the diagonal)\n\n This geometrical property can be seen in two dimensions by plotting\n generated data-points:\n\n >>> mean = [0, 0]\n >>> cov = [[1, 0], [0, 100]] # diagonal covariance\n\n Diagonal covariance means that points are oriented along x or y-axis:\n\n >>> import matplotlib.pyplot as plt\n >>> x, y = np.random.multivariate_normal(mean, cov, 5000).T\n >>> plt.plot(x, y, 'x')\n >>> plt.axis('equal')\n >>> plt.show()\n\n Note that the covariance matrix must be positive semidefinite (a.k.a.\n nonnegative-definite). Otherwise, the behavior of this method is\n undefined and backwards compatibility is not guaranteed.\n\n References\n ----------\n .. [1] Papoulis, A., \"Probability, Random Variables, and Stochastic\n Processes,\" 3rd ed., New York: McGraw-Hill, 1991.\n .. [2] Duda, R. O., Hart, P. E., and Stork, D. G., \"Pattern\n Classification,\" 2nd ed., New York: Wiley, 2001.\n\n Examples\n --------\n >>> mean = (1, 2)\n >>> cov = [[1, 0], [0, 1]]\n >>> x = np.random.multivariate_normal(mean"", cov, (3, 3))\n >>> x.shape\n (3, 3, 2)\n\n The following is probably true, given that 0.6 is roughly twice the\n standard deviation:\n\n >>> list((x[0,0,:] - mean) < 0.6)\n [True, True]\n\n "; static const char __pyx_k_negative_binomial_n_p_size_None[] = "\n negative_binomial(n, p, size=None)\n\n Draw samples from a negative binomial distribution.\n\n Samples are drawn from a negative binomial distribution with specified\n parameters, `n` trials and `p` probability of success where `n` is an\n integer > 0 and `p` is in the interval [0, 1].\n\n Parameters\n ----------\n n : int or array_like of ints\n Parameter of the distribution, > 0. Floats are also accepted,\n but they will be truncated to integers.\n p : float or array_like of floats\n Parameter of the distribution, >= 0 and <=1.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``n`` and ``p`` are both scalars.\n Otherwise, ``np.broadcast(n, p).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized negative binomial distribution,\n where each sample is equal to N, the number of trials it took to\n achieve n - 1 successes, N - (n - 1) failures, and a success on the,\n (N + n)th trial.\n\n Notes\n -----\n The probability density for the negative binomial distribution is\n\n .. math:: P(N;n,p) = \\binom{N+n-1}{n-1}p^{n}(1-p)^{N},\n\n where :math:`n-1` is the number of successes, :math:`p` is the\n probability of success, and :math:`N+n-1` is the number of trials.\n The negative binomial distribution gives the probability of n-1\n successes and N failures in N+n-1 trials, and success on the (N+n)th\n trial.\n\n If one throws a die repeatedly until the third time a \"1\" appears,\n then the probability distribution of the number of non-\"1\"s that\n appear before the ""third \"1\" is a negative binomial distribution.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Negative Binomial Distribution.\" From\n MathWorld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/NegativeBinomialDistribution.html\n .. [2] Wikipedia, \"Negative binomial distribution\",\n http://en.wikipedia.org/wiki/Negative_binomial_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n A real world example. A company drills wild-cat oil\n exploration wells, each with an estimated probability of\n success of 0.1. What is the probability of having one success\n for each successive well, that is what is the probability of a\n single success after drilling 5 wells, after 6 wells, etc.?\n\n >>> s = np.random.negative_binomial(1, 0.1, 100000)\n >>> for i in range(1, 11):\n ... probability = sum(s>> import matplotlib.pyplot as plt\n >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),\n ... bins=200, normed=True)\n >>> plt.show()\n\n Draw values from a noncentral chisquare with very small noncentrality,\n and compare to a chisquare.\n\n >>> plt.figure()\n >>> values = plt.hist(np.random.noncentral_chisquare(3, .0000001, 100000),\n ... bins=np.arange(0., 25, .1), normed=True)\n >>> values2 = plt.hist(np.random.chisquare(3, 100000),\n ... bins=np.arange(0., 25, .1), normed=True)\n >>> plt.plot(values[1][0:-1], values[0]-values2[0], 'ob')\n >>> plt.show()\n\n Demonstrate how large values of non-centrality lead to a more symmetric\n distribution.\n\n >>> plt.figure()\n >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),\n ... bins=200, normed=True)\n >>> plt.show()\n\n "; -static const char __pyx_k_noncentral_f_dfnum_dfden_nonc_s[] = "\n noncentral_f(dfnum, dfden, nonc, size=None)\n\n Draw samples from the noncentral F distribution.\n\n Samples are drawn from an F distribution with specified parameters,\n `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of\n freedom in denominator), where both parameters > 1.\n `nonc` is the non-centrality parameter.\n\n Parameters\n ----------\n dfnum : int or array_like of ints\n Parameter, should be > 1.\n dfden : int or array_like of ints\n Parameter, should be > 1.\n nonc : float or array_like of floats\n Parameter, should be >= 0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``dfnum``, ``dfden``, and ``nonc``\n are all scalars. Otherwise, ``np.broadcast(dfnum, dfden, nonc).size``\n samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized noncentral Fisher distribution.\n\n Notes\n -----\n When calculating the power of an experiment (power = probability of\n rejecting the null hypothesis when a specific alternative is true) the\n non-central F statistic becomes important. When the null hypothesis is\n true, the F statistic follows a central F distribution. When the null\n hypothesis is not true, then it follows a non-central F statistic.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Noncentral F-Distribution.\"\n From MathWorld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/NoncentralF-Distribution.html\n .. [2] Wikipedia, \"Noncentral F-distribution\",\n http://en.wikipedia.org/wiki/Noncentral_F-dis""tribution\n\n Examples\n --------\n In a study, testing for a specific alternative to the null hypothesis\n requires use of the Noncentral F distribution. We need to calculate the\n area in the tail of the distribution that exceeds the value of the F\n distribution for the null hypothesis. We'll plot the two probability\n distributions for comparison.\n\n >>> dfnum = 3 # between group deg of freedom\n >>> dfden = 20 # within groups degrees of freedom\n >>> nonc = 3.0\n >>> nc_vals = np.random.noncentral_f(dfnum, dfden, nonc, 1000000)\n >>> NF = np.histogram(nc_vals, bins=50, normed=True)\n >>> c_vals = np.random.f(dfnum, dfden, 1000000)\n >>> F = np.histogram(c_vals, bins=50, normed=True)\n >>> plt.plot(F[1][1:], F[0])\n >>> plt.plot(NF[1][1:], NF[0])\n >>> plt.show()\n\n "; +static const char __pyx_k_noncentral_chisquare_df_nonc_si[] = "\n noncentral_chisquare(df, nonc, size=None)\n\n Draw samples from a noncentral chi-square distribution.\n\n The noncentral :math:`\\chi^2` distribution is a generalisation of\n the :math:`\\chi^2` distribution.\n\n Parameters\n ----------\n df : float or array_like of floats\n Degrees of freedom, should be > 0.\n\n .. versionchanged:: 1.10.0\n Earlier NumPy versions required dfnum > 1.\n nonc : float or array_like of floats\n Non-centrality, should be non-negative.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``df`` and ``nonc`` are both scalars.\n Otherwise, ``np.broadcast(df, nonc).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized noncentral chi-square distribution.\n\n Notes\n -----\n The probability density function for the noncentral Chi-square\n distribution is\n\n .. math:: P(x;df,nonc) = \\sum^{\\infty}_{i=0}\n \\frac{e^{-nonc/2}(nonc/2)^{i}}{i!}\n \\P_{Y_{df+2i}}(x),\n\n where :math:`Y_{q}` is the Chi-square with q degrees of freedom.\n\n In Delhi (2007), it is noted that the noncentral chi-square is\n useful in bombing and coverage problems, the probability of\n killing the point target given by the noncentral chi-squared\n distribution.\n\n References\n ----------\n .. [1] Delhi, M.S. Holla, \"On a noncentral chi-square distribution in\n the analysis of weapon systems effectiveness\", Metrika,\n Volume 15, Number 1 / December, 1970.\n .. [2] Wikipedia, \"Noncentral chi-s""quare distribution\"\n http://en.wikipedia.org/wiki/Noncentral_chi-square_distribution\n\n Examples\n --------\n Draw values from the distribution and plot the histogram\n\n >>> import matplotlib.pyplot as plt\n >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),\n ... bins=200, normed=True)\n >>> plt.show()\n\n Draw values from a noncentral chisquare with very small noncentrality,\n and compare to a chisquare.\n\n >>> plt.figure()\n >>> values = plt.hist(np.random.noncentral_chisquare(3, .0000001, 100000),\n ... bins=np.arange(0., 25, .1), normed=True)\n >>> values2 = plt.hist(np.random.chisquare(3, 100000),\n ... bins=np.arange(0., 25, .1), normed=True)\n >>> plt.plot(values[1][0:-1], values[0]-values2[0], 'ob')\n >>> plt.show()\n\n Demonstrate how large values of non-centrality lead to a more symmetric\n distribution.\n\n >>> plt.figure()\n >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),\n ... bins=200, normed=True)\n >>> plt.show()\n\n "; +static const char __pyx_k_noncentral_f_dfnum_dfden_nonc_s[] = "\n noncentral_f(dfnum, dfden, nonc, size=None)\n\n Draw samples from the noncentral F distribution.\n\n Samples are drawn from an F distribution with specified parameters,\n `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of\n freedom in denominator), where both parameters > 1.\n `nonc` is the non-centrality parameter.\n\n Parameters\n ----------\n dfnum : float or array_like of floats\n Numerator degrees of freedom, should be > 0.\n\n .. versionchanged:: 1.14.0\n Earlier NumPy versions required dfnum > 1.\n dfden : float or array_like of floats\n Denominator degrees of freedom, should be > 0.\n nonc : float or array_like of floats\n Non-centrality parameter, the sum of the squares of the numerator\n means, should be >= 0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``dfnum``, ``dfden``, and ``nonc``\n are all scalars. Otherwise, ``np.broadcast(dfnum, dfden, nonc).size``\n samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized noncentral Fisher distribution.\n\n Notes\n -----\n When calculating the power of an experiment (power = probability of\n rejecting the null hypothesis when a specific alternative is true) the\n non-central F statistic becomes important. When the null hypothesis is\n true, the F statistic follows a central F distribution. When the null\n hypothesis is not true, then it follows a non-central F statistic.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Noncentral F-Distribution.\"\n From MathW""orld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/NoncentralF-Distribution.html\n .. [2] Wikipedia, \"Noncentral F-distribution\",\n http://en.wikipedia.org/wiki/Noncentral_F-distribution\n\n Examples\n --------\n In a study, testing for a specific alternative to the null hypothesis\n requires use of the Noncentral F distribution. We need to calculate the\n area in the tail of the distribution that exceeds the value of the F\n distribution for the null hypothesis. We'll plot the two probability\n distributions for comparison.\n\n >>> dfnum = 3 # between group deg of freedom\n >>> dfden = 20 # within groups degrees of freedom\n >>> nonc = 3.0\n >>> nc_vals = np.random.noncentral_f(dfnum, dfden, nonc, 1000000)\n >>> NF = np.histogram(nc_vals, bins=50, normed=True)\n >>> c_vals = np.random.f(dfnum, dfden, 1000000)\n >>> F = np.histogram(c_vals, bins=50, normed=True)\n >>> plt.plot(F[1][1:], F[0])\n >>> plt.plot(NF[1][1:], NF[0])\n >>> plt.show()\n\n "; static const char __pyx_k_normal_loc_0_0_scale_1_0_size_N[] = "\n normal(loc=0.0, scale=1.0, size=None)\n\n Draw random samples from a normal (Gaussian) distribution.\n\n The probability density function of the normal distribution, first\n derived by De Moivre and 200 years later by both Gauss and Laplace\n independently [2]_, is often called the bell curve because of\n its characteristic shape (see the example below).\n\n The normal distributions occurs often in nature. For example, it\n describes the commonly occurring distribution of samples influenced\n by a large number of tiny, random disturbances, each with its own\n unique distribution [2]_.\n\n Parameters\n ----------\n loc : float or array_like of floats\n Mean (\"centre\") of the distribution.\n scale : float or array_like of floats\n Standard deviation (spread or \"width\") of the distribution.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``loc`` and ``scale`` are both scalars.\n Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized normal distribution.\n\n See Also\n --------\n scipy.stats.norm : probability density function, distribution or\n cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Gaussian distribution is\n\n .. math:: p(x) = \\frac{1}{\\sqrt{ 2 \\pi \\sigma^2 }}\n e^{ - \\frac{ (x - \\mu)^2 } {2 \\sigma^2} },\n\n where :math:`\\mu` is the mean and :math:`\\sigma` the standard\n deviation. The square of the standard deviation, :math:`\\sigma^2`,\n is called the v""ariance.\n\n The function has its peak at the mean, and its \"spread\" increases with\n the standard deviation (the function reaches 0.607 times its maximum at\n :math:`x + \\sigma` and :math:`x - \\sigma` [2]_). This implies that\n `numpy.random.normal` is more likely to return samples lying close to\n the mean, rather than those far away.\n\n References\n ----------\n .. [1] Wikipedia, \"Normal distribution\",\n http://en.wikipedia.org/wiki/Normal_distribution\n .. [2] P. R. Peebles Jr., \"Central Limit Theorem\" in \"Probability,\n Random Variables and Random Signal Principles\", 4th ed., 2001,\n pp. 51, 51, 125.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> mu, sigma = 0, 0.1 # mean and standard deviation\n >>> s = np.random.normal(mu, sigma, 1000)\n\n Verify the mean and the variance:\n\n >>> abs(mu - np.mean(s)) < 0.01\n True\n\n >>> abs(sigma - np.std(s, ddof=1)) < 0.01\n True\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 30, normed=True)\n >>> plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *\n ... np.exp( - (bins - mu)**2 / (2 * sigma**2) ),\n ... linewidth=2, color='r')\n >>> plt.show()\n\n "; static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; static const char __pyx_k_pareto_a_size_None_Draw_samples[] = "\n pareto(a, size=None)\n\n Draw samples from a Pareto II or Lomax distribution with\n specified shape.\n\n The Lomax or Pareto II distribution is a shifted Pareto\n distribution. The classical Pareto distribution can be\n obtained from the Lomax distribution by adding 1 and\n multiplying by the scale parameter ``m`` (see Notes). The\n smallest value of the Lomax distribution is zero while for the\n classical Pareto distribution it is ``mu``, where the standard\n Pareto distribution has location ``mu = 1``. Lomax can also\n be considered as a simplified version of the Generalized\n Pareto distribution (available in SciPy), with the scale set\n to one and the location set to zero.\n\n The Pareto distribution must be greater than zero, and is\n unbounded above. It is also known as the \"80-20 rule\". In\n this distribution, 80 percent of the weights are in the lowest\n 20 percent of the range, while the other 20 percent fill the\n remaining 80 percent of the range.\n\n Parameters\n ----------\n a : float or array_like of floats\n Shape of the distribution. Should be greater than zero.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``a`` is a scalar. Otherwise,\n ``np.array(a).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized Pareto distribution.\n\n See Also\n --------\n scipy.stats.lomax : probability density function, distribution or\n cumulative density function, etc.\n scipy.stats.genpareto : probability density function, distribution or\n cumulative densit""y function, etc.\n\n Notes\n -----\n The probability density for the Pareto distribution is\n\n .. math:: p(x) = \\frac{am^a}{x^{a+1}}\n\n where :math:`a` is the shape and :math:`m` the scale.\n\n The Pareto distribution, named after the Italian economist\n Vilfredo Pareto, is a power law probability distribution\n useful in many real world problems. Outside the field of\n economics it is generally referred to as the Bradford\n distribution. Pareto developed the distribution to describe\n the distribution of wealth in an economy. It has also found\n use in insurance, web page access statistics, oil field sizes,\n and many other problems, including the download frequency for\n projects in Sourceforge [1]_. It is one of the so-called\n \"fat-tailed\" distributions.\n\n\n References\n ----------\n .. [1] Francis Hunt and Paul Johnson, On the Pareto Distribution of\n Sourceforge projects.\n .. [2] Pareto, V. (1896). Course of Political Economy. Lausanne.\n .. [3] Reiss, R.D., Thomas, M.(2001), Statistical Analysis of Extreme\n Values, Birkhauser Verlag, Basel, pp 23-30.\n .. [4] Wikipedia, \"Pareto distribution\",\n http://en.wikipedia.org/wiki/Pareto_distribution\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> a, m = 3., 2. # shape and mode\n >>> s = (np.random.pareto(a, 1000) + 1) * m\n\n Display the histogram of the samples, along with the probability\n density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, _ = plt.hist(s, 100, normed=True)\n >>> fit = a*m**a / bins**(a+1)\n >>> plt.plot(bins, max(count)*fit/max(fit), linewidth=2, color='r')\n >>> plt.show()\n\n "; @@ -1700,8 +1821,8 @@ static const char __pyx_k_randint_low_high_None_size_None[] = "\n randint(low, high=None, size=None, dtype='l')\n\n Return random integers from `low` (inclusive) to `high` (exclusive).\n\n Return random integers from the \"discrete uniform\" distribution of\n the specified dtype in the \"half-open\" interval [`low`, `high`). If\n `high` is None (the default), then results are from [0, `low`).\n\n Parameters\n ----------\n low : int\n Lowest (signed) integer to be drawn from the distribution (unless\n ``high=None``, in which case this parameter is one above the\n *highest* such integer).\n high : int, optional\n If provided, one above the largest (signed) integer to be drawn\n from the distribution (see above for behavior if ``high=None``).\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. Default is None, in which case a\n single value is returned.\n dtype : dtype, optional\n Desired dtype of the result. All dtypes are determined by their\n name, i.e., 'int64', 'int', etc, so byteorder is not available\n and a specific precision may have different C types depending\n on the platform. The default value is 'np.int'.\n\n .. versionadded:: 1.11.0\n\n Returns\n -------\n out : int or ndarray of ints\n `size`-shaped array of random integers from the appropriate\n distribution, or a single such random int if `size` not provided.\n\n See Also\n --------\n random.random_integers : similar to `randint`, only for the closed\n interval [`low`, `high`], and 1 is the lowest value if `high` is\n omitted. In particular, this other one is the one to use to generate\n uniformly distributed discrete non-integers.\n\n Examples\n ---""-----\n >>> np.random.randint(2, size=10)\n array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0])\n >>> np.random.randint(1, size=10)\n array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n\n Generate a 2 x 4 array of ints between 0 and 4, inclusive:\n\n >>> np.random.randint(5, size=(2, 4))\n array([[4, 0, 2, 1],\n [3, 2, 2, 0]])\n\n "; static const char __pyx_k_random_integers_low_high_None_s[] = "\n random_integers(low, high=None, size=None)\n\n Random integers of type np.int between `low` and `high`, inclusive.\n\n Return random integers of type np.int from the \"discrete uniform\"\n distribution in the closed interval [`low`, `high`]. If `high` is\n None (the default), then results are from [1, `low`]. The np.int\n type translates to the C long type used by Python 2 for \"short\"\n integers and its precision is platform dependent.\n\n This function has been deprecated. Use randint instead.\n\n .. deprecated:: 1.11.0\n\n Parameters\n ----------\n low : int\n Lowest (signed) integer to be drawn from the distribution (unless\n ``high=None``, in which case this parameter is the *highest* such\n integer).\n high : int, optional\n If provided, the largest (signed) integer to be drawn from the\n distribution (see above for behavior if ``high=None``).\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. Default is None, in which case a\n single value is returned.\n\n Returns\n -------\n out : int or ndarray of ints\n `size`-shaped array of random integers from the appropriate\n distribution, or a single such random int if `size` not provided.\n\n See Also\n --------\n random.randint : Similar to `random_integers`, only for the half-open\n interval [`low`, `high`), and 0 is the lowest value if `high` is\n omitted.\n\n Notes\n -----\n To sample from N evenly spaced floating-point numbers between a and b,\n use::\n\n a + (b - a) * (np.random.random_integers(N) - 1) / (N - 1.)\n\n Examples\n --------\n >>> np.random.random_integers(5)\n 4\n >"">> type(np.random.random_integers(5))\n \n >>> np.random.random_integers(5, size=(3,2))\n array([[5, 4],\n [3, 3],\n [4, 5]])\n\n Choose five random numbers from the set of five evenly-spaced\n numbers between 0 and 2.5, inclusive (*i.e.*, from the set\n :math:`{0, 5/8, 10/8, 15/8, 20/8}`):\n\n >>> 2.5 * (np.random.random_integers(5, size=(5,)) - 1) / 4.\n array([ 0.625, 1.25 , 0.625, 0.625, 2.5 ])\n\n Roll two six sided dice 1000 times and sum the results:\n\n >>> d1 = np.random.random_integers(1, 6, 1000)\n >>> d2 = np.random.random_integers(1, 6, 1000)\n >>> dsums = d1 + d2\n\n Display results as a histogram:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(dsums, 11, normed=True)\n >>> plt.show()\n\n "; static const char __pyx_k_rayleigh_scale_1_0_size_None_Dr[] = "\n rayleigh(scale=1.0, size=None)\n\n Draw samples from a Rayleigh distribution.\n\n The :math:`\\chi` and Weibull distributions are generalizations of the\n Rayleigh.\n\n Parameters\n ----------\n scale : float or array_like of floats, optional\n Scale, also equals the mode. Should be >= 0. Default is 1.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``scale`` is a scalar. Otherwise,\n ``np.array(scale).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized Rayleigh distribution.\n\n Notes\n -----\n The probability density function for the Rayleigh distribution is\n\n .. math:: P(x;scale) = \\frac{x}{scale^2}e^{\\frac{-x^2}{2 \\cdotp scale^2}}\n\n The Rayleigh distribution would arise, for example, if the East\n and North components of the wind velocity had identical zero-mean\n Gaussian distributions. Then the wind speed would have a Rayleigh\n distribution.\n\n References\n ----------\n .. [1] Brighton Webs Ltd., \"Rayleigh Distribution,\"\n http://www.brighton-webs.co.uk/distributions/rayleigh.asp\n .. [2] Wikipedia, \"Rayleigh distribution\"\n http://en.wikipedia.org/wiki/Rayleigh_distribution\n\n Examples\n --------\n Draw values from the distribution and plot the histogram\n\n >>> values = hist(np.random.rayleigh(3, 100000), bins=200, normed=True)\n\n Wave heights tend to follow a Rayleigh distribution. If the mean wave\n height is 1 meter, what fraction of waves are likely to be larger than 3\n meters?\n\n >>> meanvalue = 1\n >>> mo""devalue = np.sqrt(2 / np.pi) * meanvalue\n >>> s = np.random.rayleigh(modevalue, 1000000)\n\n The percentage of waves larger than 3 meters is:\n\n >>> 100.*sum(s>3)/1000000.\n 0.087300000000000003\n\n "; -static const char __pyx_k_standard_t_df_size_None_Draw_sa[] = "\n standard_t(df, size=None)\n\n Draw samples from a standard Student's t distribution with `df` degrees\n of freedom.\n\n A special case of the hyperbolic distribution. As `df` gets\n large, the result resembles that of the standard normal\n distribution (`standard_normal`).\n\n Parameters\n ----------\n df : int or array_like of ints\n Degrees of freedom, should be > 0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``df`` is a scalar. Otherwise,\n ``np.array(df).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized standard Student's t distribution.\n\n Notes\n -----\n The probability density function for the t distribution is\n\n .. math:: P(x, df) = \\frac{\\Gamma(\\frac{df+1}{2})}{\\sqrt{\\pi df}\n \\Gamma(\\frac{df}{2})}\\Bigl( 1+\\frac{x^2}{df} \\Bigr)^{-(df+1)/2}\n\n The t test is based on an assumption that the data come from a\n Normal distribution. The t test provides a way to test whether\n the sample mean (that is the mean calculated from the data) is\n a good estimate of the true mean.\n\n The derivation of the t-distribution was first published in\n 1908 by William Gosset while working for the Guinness Brewery\n in Dublin. Due to proprietary issues, he had to publish under\n a pseudonym, and so he used the name Student.\n\n References\n ----------\n .. [1] Dalgaard, Peter, \"Introductory Statistics With R\",\n Springer, 2002.\n .. [2] Wikipedia, \"Student's t-distribution\"\n http://en.wikipedia.org/wiki/Student's_t-distributio""n\n\n Examples\n --------\n From Dalgaard page 83 [1]_, suppose the daily energy intake for 11\n women in Kj is:\n\n >>> intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \\\n ... 7515, 8230, 8770])\n\n Does their energy intake deviate systematically from the recommended\n value of 7725 kJ?\n\n We have 10 degrees of freedom, so is the sample mean within 95% of the\n recommended value?\n\n >>> s = np.random.standard_t(10, size=100000)\n >>> np.mean(intake)\n 6753.636363636364\n >>> intake.std(ddof=1)\n 1142.1232221373727\n\n Calculate the t statistic, setting the ddof parameter to the unbiased\n value so the divisor in the standard deviation will be degrees of\n freedom, N-1.\n\n >>> t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))\n >>> import matplotlib.pyplot as plt\n >>> h = plt.hist(s, bins=100, normed=True)\n\n For a one-sided t-test, how far out in the distribution does the t\n statistic appear?\n\n >>> np.sum(s>> intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \\\n ... 7515, 8230, 8770])\n\n Does their energy intake deviate systematically from the recommended\n value of 7725 kJ?\n\n We have 10 degrees of freedom, so is the sample mean within 95% of the\n recommended value?\n\n >>> s = np.random.standard_t(10, size=100000)\n >>> np.mean(intake)\n 6753.636363636364\n >>> intake.std(ddof=1)\n 1142.1232221373727\n\n Calculate the t statistic, setting the ddof parameter to the unbiased\n value so the divisor in the standard deviation will be degrees of\n freedom, N-1.\n\n >>> t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))\n >>> import matplotlib.pyplot as plt\n >>> h = plt.hist(s, bins=100, normed=True)\n\n For a one-sided t-test, how far out in the distribution does the t\n statistic appear?\n\n >>> np.sum(s>> import matplotlib.pyplot as plt\n >>> h = plt.hist(np.random.triangular(-3, 0, 8, 100000), bins=200,\n ... normed=True)\n >>> plt.show()\n\n "; static const char __pyx_k_uniform_low_0_0_high_1_0_size_N[] = "\n uniform(low=0.0, high=1.0, size=None)\n\n Draw samples from a uniform distribution.\n\n Samples are uniformly distributed over the half-open interval\n ``[low, high)`` (includes low, but excludes high). In other words,\n any value within the given interval is equally likely to be drawn\n by `uniform`.\n\n Parameters\n ----------\n low : float or array_like of floats, optional\n Lower boundary of the output interval. All values generated will be\n greater than or equal to low. The default value is 0.\n high : float or array_like of floats\n Upper boundary of the output interval. All values generated will be\n less than high. The default value is 1.0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``low`` and ``high`` are both scalars.\n Otherwise, ``np.broadcast(low, high).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized uniform distribution.\n\n See Also\n --------\n randint : Discrete uniform distribution, yielding integers.\n random_integers : Discrete uniform distribution over the closed\n interval ``[low, high]``.\n random_sample : Floats uniformly distributed over ``[0, 1)``.\n random : Alias for `random_sample`.\n rand : Convenience function that accepts dimensions as input, e.g.,\n ``rand(2,2)`` would generate a 2-by-2 array of floats,\n uniformly distributed over ``[0, 1)``.\n\n Notes\n -----\n The probability density function of the uniform distribution is\n\n .. math:: p(x) = \\frac{1}{b - a}\n\n anywhe""re within the interval ``[a, b)``, and zero elsewhere.\n\n When ``high`` == ``low``, values of ``low`` will be returned.\n If ``high`` < ``low``, the results are officially undefined\n and may eventually raise an error, i.e. do not rely on this\n function to behave when passed arguments satisfying that\n inequality condition.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> s = np.random.uniform(-1,0,1000)\n\n All values are within the given interval:\n\n >>> np.all(s >= -1)\n True\n >>> np.all(s < 0)\n True\n\n Display the histogram of the samples, along with the\n probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> count, bins, ignored = plt.hist(s, 15, normed=True)\n >>> plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')\n >>> plt.show()\n\n "; static const char __pyx_k_vonmises_mu_kappa_size_None_Dra[] = "\n vonmises(mu, kappa, size=None)\n\n Draw samples from a von Mises distribution.\n\n Samples are drawn from a von Mises distribution with specified mode\n (mu) and dispersion (kappa), on the interval [-pi, pi].\n\n The von Mises distribution (also known as the circular normal\n distribution) is a continuous probability distribution on the unit\n circle. It may be thought of as the circular analogue of the normal\n distribution.\n\n Parameters\n ----------\n mu : float or array_like of floats\n Mode (\"center\") of the distribution.\n kappa : float or array_like of floats\n Dispersion of the distribution, has to be >=0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``mu`` and ``kappa`` are both scalars.\n Otherwise, ``np.broadcast(mu, kappa).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized von Mises distribution.\n\n See Also\n --------\n scipy.stats.vonmises : probability density function, distribution, or\n cumulative density function, etc.\n\n Notes\n -----\n The probability density for the von Mises distribution is\n\n .. math:: p(x) = \\frac{e^{\\kappa cos(x-\\mu)}}{2\\pi I_0(\\kappa)},\n\n where :math:`\\mu` is the mode and :math:`\\kappa` the dispersion,\n and :math:`I_0(\\kappa)` is the modified Bessel function of order 0.\n\n The von Mises is named for Richard Edler von Mises, who was born in\n Austria-Hungary, in what is now the Ukraine. He fled to the United\n States in 1939 and became a professor at Harvard. He worked in\n probability theory, aero""dynamics, fluid mechanics, and philosophy of\n science.\n\n References\n ----------\n .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). \"Handbook of\n Mathematical Functions with Formulas, Graphs, and Mathematical\n Tables, 9th printing,\" New York: Dover, 1972.\n .. [2] von Mises, R., \"Mathematical Theory of Probability\n and Statistics\", New York: Academic Press, 1964.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> mu, kappa = 0.0, 4.0 # mean and dispersion\n >>> s = np.random.vonmises(mu, kappa, 1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> from scipy.special import i0\n >>> plt.hist(s, 50, normed=True)\n >>> x = np.linspace(-np.pi, np.pi, num=51)\n >>> y = np.exp(kappa*np.cos(x-mu))/(2*np.pi*i0(kappa))\n >>> plt.plot(x, y, linewidth=2, color='r')\n >>> plt.show()\n\n "; @@ -1709,18 +1830,19 @@ static const char __pyx_k_zipf_a_size_None_Draw_samples_f[] = "\n zipf(a, size=None)\n\n Draw samples from a Zipf distribution.\n\n Samples are drawn from a Zipf distribution with specified parameter\n `a` > 1.\n\n The Zipf distribution (also known as the zeta distribution) is a\n continuous probability distribution that satisfies Zipf's law: the\n frequency of an item is inversely proportional to its rank in a\n frequency table.\n\n Parameters\n ----------\n a : float or array_like of floats\n Distribution parameter. Should be greater than 1.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``a`` is a scalar. Otherwise,\n ``np.array(a).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized Zipf distribution.\n\n See Also\n --------\n scipy.stats.zipf : probability density function, distribution, or\n cumulative density function, etc.\n\n Notes\n -----\n The probability density for the Zipf distribution is\n\n .. math:: p(x) = \\frac{x^{-a}}{\\zeta(a)},\n\n where :math:`\\zeta` is the Riemann Zeta function.\n\n It is named for the American linguist George Kingsley Zipf, who noted\n that the frequency of any word in a sample of a language is inversely\n proportional to its rank in the frequency table.\n\n References\n ----------\n .. [1] Zipf, G. K., \"Selected Studies of the Principle of Relative\n Frequency in Language,\" Cambridge, MA: Harvard Univ. Press,\n 1932.\n\n Examples\n --------\n Draw samples from the distribution:\n\n >>> a = 2. # parameter\n >>> s = np.random.zipf(a, ""1000)\n\n Display the histogram of the samples, along with\n the probability density function:\n\n >>> import matplotlib.pyplot as plt\n >>> from scipy import special\n\n Truncate s values at 50 so plot is interesting:\n\n >>> count, bins, ignored = plt.hist(s[s<50], 50, normed=True)\n >>> x = np.arange(1., 50.)\n >>> y = x**(-a) / special.zetac(a)\n >>> plt.plot(x, y/max(y), linewidth=2, color='r')\n >>> plt.show()\n\n "; static const char __pyx_k_Cannot_take_a_larger_sample_than[] = "Cannot take a larger sample than population when 'replace=False'"; static const char __pyx_k_Fewer_non_zero_entries_in_p_than[] = "Fewer non-zero entries in p than size"; -static const char __pyx_k_RandomState_multinomial_line_453[] = "RandomState.multinomial (line 4530)"; -static const char __pyx_k_RandomState_negative_binomial_li[] = "RandomState.negative_binomial (line 3802)"; -static const char __pyx_k_RandomState_noncentral_chisquare[] = "RandomState.noncentral_chisquare (line 2277)"; -static const char __pyx_k_RandomState_noncentral_f_line_20[] = "RandomState.noncentral_f (line 2099)"; -static const char __pyx_k_RandomState_permutation_line_484[] = "RandomState.permutation (line 4847)"; -static const char __pyx_k_RandomState_random_integers_line[] = "RandomState.random_integers (line 1417)"; -static const char __pyx_k_RandomState_random_sample_line_8[] = "RandomState.random_sample (line 814)"; -static const char __pyx_k_RandomState_standard_cauchy_line[] = "RandomState.standard_cauchy (line 2381)"; -static const char __pyx_k_RandomState_standard_exponential[] = "RandomState.standard_exponential (line 1779)"; -static const char __pyx_k_RandomState_standard_normal_line[] = "RandomState.standard_normal (line 1514)"; -static const char __pyx_k_RandomState_standard_t_line_2445[] = "RandomState.standard_t (line 2445)"; -static const char __pyx_k_RandomState_triangular_line_3592[] = "RandomState.triangular (line 3592)"; +static const char __pyx_k_RandomState_multinomial_line_454[] = "RandomState.multinomial (line 4543)"; +static const char __pyx_k_RandomState_negative_binomial_li[] = "RandomState.negative_binomial (line 3813)"; +static const char __pyx_k_RandomState_noncentral_chisquare[] = "RandomState.noncentral_chisquare (line 2286)"; +static const char __pyx_k_RandomState_noncentral_f_line_21[] = "RandomState.noncentral_f (line 2104)"; +static const char __pyx_k_RandomState_permutation_line_486[] = "RandomState.permutation (line 4867)"; +static const char __pyx_k_RandomState_random_integers_line[] = "RandomState.random_integers (line 1422)"; +static const char __pyx_k_RandomState_random_sample_line_8[] = "RandomState.random_sample (line 819)"; +static const char __pyx_k_RandomState_standard_cauchy_line[] = "RandomState.standard_cauchy (line 2392)"; +static const char __pyx_k_RandomState_standard_exponential[] = "RandomState.standard_exponential (line 1784)"; +static const char __pyx_k_RandomState_standard_normal_line[] = "RandomState.standard_normal (line 1519)"; +static const char __pyx_k_RandomState_standard_t_line_2456[] = "RandomState.standard_t (line 2456)"; +static const char __pyx_k_RandomState_triangular_line_3603[] = "RandomState.triangular (line 3603)"; +static const char __pyx_k_Seed_values_must_be_between_0_an[] = "Seed values must be between 0 and 2**32 - 1"; static const char __pyx_k_This_function_is_deprecated_Plea[] = "This function is deprecated. Please call randint(1, {low} + 1) instead"; static const char __pyx_k_a_must_be_1_dimensional_or_an_in[] = "a must be 1-dimensional or an integer"; static const char __pyx_k_check_valid_must_equal_warn_rais[] = "check_valid must equal 'warn', 'raise', or 'ignore'"; @@ -1738,53 +1860,56 @@ static PyObject *__pyx_n_s_Lock; static PyObject *__pyx_n_s_MT19937; static PyObject *__pyx_n_s_OverflowError; -static PyObject *__pyx_kp_u_RandomState_binomial_line_3686; -static PyObject *__pyx_kp_u_RandomState_bytes_line_999; -static PyObject *__pyx_kp_u_RandomState_chisquare_line_2196; -static PyObject *__pyx_kp_u_RandomState_choice_line_1028; +static PyObject *__pyx_kp_u_RandomState_binomial_line_3697; +static PyObject *__pyx_kp_u_RandomState_bytes_line_1004; +static PyObject *__pyx_kp_u_RandomState_chisquare_line_2205; +static PyObject *__pyx_kp_u_RandomState_choice_line_1033; static PyObject *__pyx_n_s_RandomState_ctor; -static PyObject *__pyx_kp_u_RandomState_dirichlet_line_4643; -static PyObject *__pyx_kp_u_RandomState_f_line_1992; -static PyObject *__pyx_kp_u_RandomState_gamma_line_1896; -static PyObject *__pyx_kp_u_RandomState_geometric_line_4082; -static PyObject *__pyx_kp_u_RandomState_gumbel_line_3078; +static PyObject *__pyx_kp_u_RandomState_dirichlet_line_4656; +static PyObject *__pyx_kp_u_RandomState_f_line_1997; +static PyObject *__pyx_kp_u_RandomState_gamma_line_1901; +static PyObject *__pyx_kp_u_RandomState_geometric_line_4095; +static PyObject *__pyx_kp_u_RandomState_gumbel_line_3089; static PyObject *__pyx_kp_u_RandomState_hypergeometric_line; -static PyObject *__pyx_kp_u_RandomState_laplace_line_2980; -static PyObject *__pyx_kp_u_RandomState_logistic_line_3209; -static PyObject *__pyx_kp_u_RandomState_lognormal_line_3302; -static PyObject *__pyx_kp_u_RandomState_logseries_line_4272; -static PyObject *__pyx_kp_u_RandomState_multinomial_line_453; +static PyObject *__pyx_kp_u_RandomState_laplace_line_2991; +static PyObject *__pyx_kp_u_RandomState_logistic_line_3220; +static PyObject *__pyx_kp_u_RandomState_lognormal_line_3313; +static PyObject *__pyx_kp_u_RandomState_logseries_line_4285; +static PyObject *__pyx_kp_u_RandomState_multinomial_line_454; static PyObject *__pyx_kp_u_RandomState_multivariate_normal; static PyObject *__pyx_kp_u_RandomState_negative_binomial_li; static PyObject *__pyx_kp_u_RandomState_noncentral_chisquare; -static PyObject *__pyx_kp_u_RandomState_noncentral_f_line_20; -static PyObject *__pyx_kp_u_RandomState_normal_line_1547; -static PyObject *__pyx_kp_u_RandomState_pareto_line_2649; -static PyObject *__pyx_kp_u_RandomState_permutation_line_484; -static PyObject *__pyx_kp_u_RandomState_poisson_line_3903; -static PyObject *__pyx_kp_u_RandomState_power_line_2869; -static PyObject *__pyx_kp_u_RandomState_rand_line_1316; -static PyObject *__pyx_kp_u_RandomState_randint_line_905; -static PyObject *__pyx_kp_u_RandomState_randn_line_1360; +static PyObject *__pyx_kp_u_RandomState_noncentral_f_line_21; +static PyObject *__pyx_kp_u_RandomState_normal_line_1552; +static PyObject *__pyx_kp_u_RandomState_pareto_line_2660; +static PyObject *__pyx_kp_u_RandomState_permutation_line_486; +static PyObject *__pyx_kp_u_RandomState_poisson_line_3914; +static PyObject *__pyx_kp_u_RandomState_power_line_2880; +static PyObject *__pyx_kp_u_RandomState_rand_line_1321; +static PyObject *__pyx_kp_u_RandomState_randint_line_910; +static PyObject *__pyx_kp_u_RandomState_randn_line_1365; static PyObject *__pyx_kp_u_RandomState_random_integers_line; static PyObject *__pyx_kp_u_RandomState_random_sample_line_8; -static PyObject *__pyx_kp_u_RandomState_rayleigh_line_3426; -static PyObject *__pyx_kp_u_RandomState_shuffle_line_4759; +static PyObject *__pyx_kp_u_RandomState_rayleigh_line_3437; +static PyObject *__pyx_kp_u_RandomState_shuffle_line_4779; static PyObject *__pyx_kp_u_RandomState_standard_cauchy_line; static PyObject *__pyx_kp_u_RandomState_standard_exponential; static PyObject *__pyx_kp_u_RandomState_standard_gamma_line; static PyObject *__pyx_kp_u_RandomState_standard_normal_line; -static PyObject *__pyx_kp_u_RandomState_standard_t_line_2445; -static PyObject *__pyx_kp_u_RandomState_tomaxint_line_858; -static PyObject *__pyx_kp_u_RandomState_triangular_line_3592; -static PyObject *__pyx_kp_u_RandomState_uniform_line_1210; -static PyObject *__pyx_kp_u_RandomState_vonmises_line_2551; -static PyObject *__pyx_kp_u_RandomState_wald_line_3505; -static PyObject *__pyx_kp_u_RandomState_weibull_line_2759; -static PyObject *__pyx_kp_u_RandomState_zipf_line_3991; +static PyObject *__pyx_kp_u_RandomState_standard_t_line_2456; +static PyObject *__pyx_kp_u_RandomState_tomaxint_line_863; +static PyObject *__pyx_kp_u_RandomState_triangular_line_3603; +static PyObject *__pyx_kp_u_RandomState_uniform_line_1215; +static PyObject *__pyx_kp_u_RandomState_vonmises_line_2562; +static PyObject *__pyx_kp_u_RandomState_wald_line_3516; +static PyObject *__pyx_kp_u_RandomState_weibull_line_2770; +static PyObject *__pyx_kp_u_RandomState_zipf_line_4002; static PyObject *__pyx_kp_s_Range_exceeds_valid_bounds; static PyObject *__pyx_n_s_RuntimeWarning; +static PyObject *__pyx_kp_s_Seed_array_must_be_1_d; static PyObject *__pyx_kp_s_Seed_must_be_between_0_and_2_32; +static PyObject *__pyx_kp_s_Seed_must_be_non_empty; +static PyObject *__pyx_kp_s_Seed_values_must_be_between_0_an; static PyObject *__pyx_n_s_T; static PyObject *__pyx_kp_s_This_function_is_deprecated_Plea; static PyObject *__pyx_kp_s_This_function_is_deprecated_Plea_2; @@ -1794,17 +1919,19 @@ static PyObject *__pyx_n_s_a; static PyObject *__pyx_kp_s_a_0; static PyObject *__pyx_kp_s_a_0_2; -static PyObject *__pyx_kp_s_a_1_0; static PyObject *__pyx_kp_s_a_and_p_must_have_same_size; static PyObject *__pyx_kp_s_a_must_be_1_dimensional; static PyObject *__pyx_kp_s_a_must_be_1_dimensional_or_an_in; +static PyObject *__pyx_kp_s_a_must_be_a_valid_float_1_0; static PyObject *__pyx_kp_s_a_must_be_greater_than_0; static PyObject *__pyx_kp_s_a_must_be_non_empty; +static PyObject *__pyx_kp_s_a_must_contain_valid_floats_1_0; static PyObject *__pyx_n_s_add; static PyObject *__pyx_kp_s_algorithm_must_be_MT19937; static PyObject *__pyx_n_s_all; static PyObject *__pyx_n_s_allclose; static PyObject *__pyx_n_s_alpha; +static PyObject *__pyx_kp_s_alpha_0; static PyObject *__pyx_n_s_any; static PyObject *__pyx_n_s_arange; static PyObject *__pyx_n_s_array; @@ -1847,7 +1974,6 @@ static PyObject *__pyx_kp_s_dfden_0; static PyObject *__pyx_n_s_dfnum; static PyObject *__pyx_kp_s_dfnum_0; -static PyObject *__pyx_kp_s_dfnum_1; static PyObject *__pyx_n_s_dirichlet; static PyObject *__pyx_kp_u_dirichlet_alpha_size_None_Draw; static PyObject *__pyx_n_s_dot; @@ -2172,7 +2298,6 @@ static PyObject *__pyx_int_32768; static PyObject *__pyx_int_65536; static PyObject *__pyx_int_2147483648; -static PyObject *__pyx_int_4294967295; static PyObject *__pyx_int_4294967296; static PyObject *__pyx_int_9223372036854775808; static PyObject *__pyx_int_18446744073709551616; @@ -2181,7 +2306,7 @@ static PyObject *__pyx_int_neg_32768; static PyObject *__pyx_int_neg_2147483648; static PyObject *__pyx_int_neg_9223372036854775808; -static PyObject *__pyx_k__48; +static PyObject *__pyx_k__50; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; @@ -2191,8 +2316,8 @@ static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; -static PyObject *__pyx_slice__44; -static PyObject *__pyx_slice__45; +static PyObject *__pyx_slice__46; +static PyObject *__pyx_slice__47; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; @@ -2227,10 +2352,10 @@ static PyObject *__pyx_tuple__41; static PyObject *__pyx_tuple__42; static PyObject *__pyx_tuple__43; -static PyObject *__pyx_tuple__46; -static PyObject *__pyx_tuple__47; +static PyObject *__pyx_tuple__44; +static PyObject *__pyx_tuple__45; +static PyObject *__pyx_tuple__48; static PyObject *__pyx_tuple__49; -static PyObject *__pyx_tuple__50; static PyObject *__pyx_tuple__51; static PyObject *__pyx_tuple__52; static PyObject *__pyx_tuple__53; @@ -2280,8 +2405,8 @@ static PyObject *__pyx_tuple__97; static PyObject *__pyx_tuple__98; static PyObject *__pyx_tuple__99; -static PyObject *__pyx_slice__165; -static PyObject *__pyx_slice__168; +static PyObject *__pyx_slice__167; +static PyObject *__pyx_slice__171; static PyObject *__pyx_tuple__100; static PyObject *__pyx_tuple__101; static PyObject *__pyx_tuple__102; @@ -2347,17 +2472,19 @@ static PyObject *__pyx_tuple__162; static PyObject *__pyx_tuple__163; static PyObject *__pyx_tuple__164; +static PyObject *__pyx_tuple__165; static PyObject *__pyx_tuple__166; -static PyObject *__pyx_tuple__167; +static PyObject *__pyx_tuple__168; static PyObject *__pyx_tuple__169; static PyObject *__pyx_tuple__170; -static PyObject *__pyx_tuple__171; static PyObject *__pyx_tuple__172; static PyObject *__pyx_tuple__173; static PyObject *__pyx_tuple__174; static PyObject *__pyx_tuple__175; static PyObject *__pyx_tuple__176; +static PyObject *__pyx_tuple__177; static PyObject *__pyx_tuple__178; +static PyObject *__pyx_tuple__179; static PyObject *__pyx_tuple__180; static PyObject *__pyx_tuple__182; static PyObject *__pyx_tuple__184; @@ -2367,9 +2494,9 @@ static PyObject *__pyx_tuple__192; static PyObject *__pyx_tuple__194; static PyObject *__pyx_tuple__196; -static PyObject *__pyx_tuple__197; -static PyObject *__pyx_codeobj__177; -static PyObject *__pyx_codeobj__179; +static PyObject *__pyx_tuple__198; +static PyObject *__pyx_tuple__200; +static PyObject *__pyx_tuple__201; static PyObject *__pyx_codeobj__181; static PyObject *__pyx_codeobj__183; static PyObject *__pyx_codeobj__185; @@ -2378,6 +2505,9 @@ static PyObject *__pyx_codeobj__191; static PyObject *__pyx_codeobj__193; static PyObject *__pyx_codeobj__195; +static PyObject *__pyx_codeobj__197; +static PyObject *__pyx_codeobj__199; +/* Late includes */ /* "randint_helpers.pxi":5 * """ @@ -2420,23 +2550,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_bool", 1, 4, 4, 1); __PYX_ERR(1, 5, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: - if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_bool", 1, 4, 4, 2); __PYX_ERR(1, 5, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: - if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; + if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_bool", 1, 4, 4, 3); __PYX_ERR(1, 5, __pyx_L3_error) } @@ -2501,7 +2631,7 @@ * * rng = (high - low) */ - __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == NULL && PyErr_Occurred())) __PYX_ERR(1, 40, __pyx_L1_error) + __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == ((void *)NULL) && PyErr_Occurred())) __PYX_ERR(1, 40, __pyx_L1_error) __pyx_v_state = ((rk_state *)__pyx_t_1); /* "randint_helpers.pxi":42 @@ -2832,23 +2962,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_int8", 1, 4, 4, 1); __PYX_ERR(1, 56, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: - if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_int8", 1, 4, 4, 2); __PYX_ERR(1, 56, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: - if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; + if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_int8", 1, 4, 4, 3); __PYX_ERR(1, 56, __pyx_L3_error) } @@ -2914,7 +3044,7 @@ * * rng = (high - low) */ - __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == NULL && PyErr_Occurred())) __PYX_ERR(1, 91, __pyx_L1_error) + __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == ((void *)NULL) && PyErr_Occurred())) __PYX_ERR(1, 91, __pyx_L1_error) __pyx_v_state = ((rk_state *)__pyx_t_1); /* "randint_helpers.pxi":93 @@ -3245,23 +3375,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_int16", 1, 4, 4, 1); __PYX_ERR(1, 107, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: - if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_int16", 1, 4, 4, 2); __PYX_ERR(1, 107, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: - if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; + if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_int16", 1, 4, 4, 3); __PYX_ERR(1, 107, __pyx_L3_error) } @@ -3327,7 +3457,7 @@ * * rng = (high - low) */ - __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == NULL && PyErr_Occurred())) __PYX_ERR(1, 142, __pyx_L1_error) + __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == ((void *)NULL) && PyErr_Occurred())) __PYX_ERR(1, 142, __pyx_L1_error) __pyx_v_state = ((rk_state *)__pyx_t_1); /* "randint_helpers.pxi":144 @@ -3658,23 +3788,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_int32", 1, 4, 4, 1); __PYX_ERR(1, 158, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: - if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_int32", 1, 4, 4, 2); __PYX_ERR(1, 158, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: - if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; + if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_int32", 1, 4, 4, 3); __PYX_ERR(1, 158, __pyx_L3_error) } @@ -3740,7 +3870,7 @@ * * rng = (high - low) */ - __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == NULL && PyErr_Occurred())) __PYX_ERR(1, 193, __pyx_L1_error) + __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == ((void *)NULL) && PyErr_Occurred())) __PYX_ERR(1, 193, __pyx_L1_error) __pyx_v_state = ((rk_state *)__pyx_t_1); /* "randint_helpers.pxi":195 @@ -4071,23 +4201,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_int64", 1, 4, 4, 1); __PYX_ERR(1, 209, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: - if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_int64", 1, 4, 4, 2); __PYX_ERR(1, 209, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: - if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; + if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_int64", 1, 4, 4, 3); __PYX_ERR(1, 209, __pyx_L3_error) } @@ -4153,7 +4283,7 @@ * * rng = (high - low) */ - __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == NULL && PyErr_Occurred())) __PYX_ERR(1, 244, __pyx_L1_error) + __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == ((void *)NULL) && PyErr_Occurred())) __PYX_ERR(1, 244, __pyx_L1_error) __pyx_v_state = ((rk_state *)__pyx_t_1); /* "randint_helpers.pxi":246 @@ -4484,23 +4614,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_uint8", 1, 4, 4, 1); __PYX_ERR(1, 260, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: - if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_uint8", 1, 4, 4, 2); __PYX_ERR(1, 260, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: - if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; + if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_uint8", 1, 4, 4, 3); __PYX_ERR(1, 260, __pyx_L3_error) } @@ -4565,7 +4695,7 @@ * * rng = (high - low) */ - __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == NULL && PyErr_Occurred())) __PYX_ERR(1, 295, __pyx_L1_error) + __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == ((void *)NULL) && PyErr_Occurred())) __PYX_ERR(1, 295, __pyx_L1_error) __pyx_v_state = ((rk_state *)__pyx_t_1); /* "randint_helpers.pxi":297 @@ -4896,23 +5026,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_uint16", 1, 4, 4, 1); __PYX_ERR(1, 311, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: - if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_uint16", 1, 4, 4, 2); __PYX_ERR(1, 311, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: - if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; + if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_uint16", 1, 4, 4, 3); __PYX_ERR(1, 311, __pyx_L3_error) } @@ -4977,7 +5107,7 @@ * * rng = (high - low) */ - __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == NULL && PyErr_Occurred())) __PYX_ERR(1, 346, __pyx_L1_error) + __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == ((void *)NULL) && PyErr_Occurred())) __PYX_ERR(1, 346, __pyx_L1_error) __pyx_v_state = ((rk_state *)__pyx_t_1); /* "randint_helpers.pxi":348 @@ -5308,23 +5438,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_uint32", 1, 4, 4, 1); __PYX_ERR(1, 362, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: - if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_uint32", 1, 4, 4, 2); __PYX_ERR(1, 362, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: - if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; + if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_uint32", 1, 4, 4, 3); __PYX_ERR(1, 362, __pyx_L3_error) } @@ -5389,7 +5519,7 @@ * * rng = (high - low) */ - __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == NULL && PyErr_Occurred())) __PYX_ERR(1, 397, __pyx_L1_error) + __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == ((void *)NULL) && PyErr_Occurred())) __PYX_ERR(1, 397, __pyx_L1_error) __pyx_v_state = ((rk_state *)__pyx_t_1); /* "randint_helpers.pxi":399 @@ -5720,23 +5850,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_high)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_uint64", 1, 4, 4, 1); __PYX_ERR(1, 413, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: - if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_uint64", 1, 4, 4, 2); __PYX_ERR(1, 413, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: - if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; + if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_rngstate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_rand_uint64", 1, 4, 4, 3); __PYX_ERR(1, 413, __pyx_L3_error) } @@ -5801,7 +5931,7 @@ * * rng = (high - low) */ - __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == NULL && PyErr_Occurred())) __PYX_ERR(1, 448, __pyx_L1_error) + __pyx_t_1 = PyCapsule_GetPointer(__pyx_v_rngstate, NULL); if (unlikely(__pyx_t_1 == ((void *)NULL) && PyErr_Occurred())) __PYX_ERR(1, 448, __pyx_L1_error) __pyx_v_state = ((rk_state *)__pyx_t_1); /* "randint_helpers.pxi":450 @@ -6088,7 +6218,7 @@ return __pyx_r; } -/* "numpy.pxd":157 +/* "numpy.pxd":158 * * # copied from cython version with addition of PyErr_Print. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< @@ -6109,7 +6239,7 @@ PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_array", 0); - /* "numpy.pxd":158 + /* "numpy.pxd":159 * # copied from cython version with addition of PyErr_Print. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< @@ -6125,16 +6255,16 @@ __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { - /* "numpy.pxd":159 + /* "numpy.pxd":160 * cdef inline int import_array() except -1: * try: * _import_array() # <<<<<<<<<<<<<< * except Exception: * PyErr_Print() */ - __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(2, 159, __pyx_L3_error) + __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 160, __pyx_L3_error) - /* "numpy.pxd":158 + /* "numpy.pxd":159 * # copied from cython version with addition of PyErr_Print. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< @@ -6147,9 +6277,8 @@ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; - __Pyx_PyThreadState_assign - /* "numpy.pxd":160 + /* "numpy.pxd":161 * try: * _import_array() * except Exception: # <<<<<<<<<<<<<< @@ -6159,12 +6288,12 @@ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("mtrand.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 160, __pyx_L5_except_error) + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 161, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); - /* "numpy.pxd":161 + /* "numpy.pxd":162 * _import_array() * except Exception: * PyErr_Print() # <<<<<<<<<<<<<< @@ -6172,28 +6301,27 @@ */ PyErr_Print(); - /* "numpy.pxd":162 + /* "numpy.pxd":163 * except Exception: * PyErr_Print() * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< */ - __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 162, __pyx_L5_except_error) + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 163, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __PYX_ERR(2, 162, __pyx_L5_except_error) + __PYX_ERR(2, 163, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; - /* "numpy.pxd":158 + /* "numpy.pxd":159 * # copied from cython version with addition of PyErr_Print. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); @@ -6202,7 +6330,7 @@ __pyx_L8_try_end:; } - /* "numpy.pxd":157 + /* "numpy.pxd":158 * * # copied from cython version with addition of PyErr_Print. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< @@ -6300,7 +6428,7 @@ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /*try:*/ { { - if (__pyx_t_7||__pyx_t_8||__pyx_t_9); else {/*mark used*/} + (void)__pyx_t_7; (void)__pyx_t_8; (void)__pyx_t_9; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -6506,7 +6634,7 @@ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /*try:*/ { { - if (__pyx_t_9||__pyx_t_8||__pyx_t_7); else {/*mark used*/} + (void)__pyx_t_9; (void)__pyx_t_8; (void)__pyx_t_7; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -6690,7 +6818,7 @@ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /*try:*/ { { - if (__pyx_t_7||__pyx_t_8||__pyx_t_9); else {/*mark used*/} + (void)__pyx_t_7; (void)__pyx_t_8; (void)__pyx_t_9; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -6896,7 +7024,7 @@ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /*try:*/ { { - if (__pyx_t_9||__pyx_t_8||__pyx_t_7); else {/*mark used*/} + (void)__pyx_t_9; (void)__pyx_t_8; (void)__pyx_t_7; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -7102,7 +7230,7 @@ * itera = PyArray_IterNew(oa) * with lock, nogil: # <<<<<<<<<<<<<< * for i from 0 <= i < length: - * array_data[i] = func(state, ((itera.dataptr))[0]) + * array_data[i] = func(state, (PyArray_ITER_DATA(itera))[0]) */ /*with:*/ { __pyx_t_5 = __Pyx_PyObject_LookupSpecial(__pyx_v_lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 212, __pyx_L1_error) @@ -7130,7 +7258,7 @@ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /*try:*/ { { - if (__pyx_t_7||__pyx_t_8||__pyx_t_9); else {/*mark used*/} + (void)__pyx_t_7; (void)__pyx_t_8; (void)__pyx_t_9; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -7144,7 +7272,7 @@ * itera = PyArray_IterNew(oa) * with lock, nogil: * for i from 0 <= i < length: # <<<<<<<<<<<<<< - * array_data[i] = func(state, ((itera.dataptr))[0]) + * array_data[i] = func(state, (PyArray_ITER_DATA(itera))[0]) * PyArray_ITER_NEXT(itera) */ __pyx_t_10 = __pyx_v_length; @@ -7153,15 +7281,15 @@ /* "mtrand.pyx":214 * with lock, nogil: * for i from 0 <= i < length: - * array_data[i] = func(state, ((itera.dataptr))[0]) # <<<<<<<<<<<<<< + * array_data[i] = func(state, (PyArray_ITER_DATA(itera))[0]) # <<<<<<<<<<<<<< * PyArray_ITER_NEXT(itera) * else: */ - (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (((double *)__pyx_v_itera->dataptr)[0])); + (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (((double *)PyArray_ITER_DATA(__pyx_v_itera))[0])); /* "mtrand.pyx":215 * for i from 0 <= i < length: - * array_data[i] = func(state, ((itera.dataptr))[0]) + * array_data[i] = func(state, (PyArray_ITER_DATA(itera))[0]) * PyArray_ITER_NEXT(itera) # <<<<<<<<<<<<<< * else: * array = np.empty(size, np.float64) @@ -7175,7 +7303,7 @@ * itera = PyArray_IterNew(oa) * with lock, nogil: # <<<<<<<<<<<<<< * for i from 0 <= i < length: - * array_data[i] = func(state, ((itera.dataptr))[0]) + * array_data[i] = func(state, (PyArray_ITER_DATA(itera))[0]) */ /*finally:*/ { /*normal exit:*/{ @@ -7324,7 +7452,7 @@ * with lock, nogil: */ __pyx_t_2 = ((__pyx_v_multi->size != PyArray_SIZE(arrayObject)) != 0); - if (__pyx_t_2) { + if (unlikely(__pyx_t_2)) { /* "mtrand.pyx":222 * oa) @@ -7381,7 +7509,7 @@ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /*try:*/ { { - if (__pyx_t_9||__pyx_t_8||__pyx_t_7); else {/*mark used*/} + (void)__pyx_t_9; (void)__pyx_t_8; (void)__pyx_t_7; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -7586,7 +7714,7 @@ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /*try:*/ { { - if (__pyx_t_7||__pyx_t_8||__pyx_t_9); else {/*mark used*/} + (void)__pyx_t_7; (void)__pyx_t_8; (void)__pyx_t_9; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -7792,7 +7920,7 @@ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /*try:*/ { { - if (__pyx_t_9||__pyx_t_8||__pyx_t_7); else {/*mark used*/} + (void)__pyx_t_9; (void)__pyx_t_8; (void)__pyx_t_7; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -7932,6 +8060,7 @@ PyObject *__pyx_t_12 = NULL; npy_intp __pyx_t_13; npy_intp __pyx_t_14; + npy_intp __pyx_t_15; __Pyx_RefNannySetupContext("cont2_array", 0); /* "mtrand.pyx":260 @@ -8027,7 +8156,7 @@ __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 262, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 262, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 262, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); @@ -8075,7 +8204,7 @@ __Pyx_INCREF(__pyx_v_size); __Pyx_GIVEREF(__pyx_v_size); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_size); - __pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 264, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); @@ -8177,7 +8306,7 @@ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 266, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - if (__pyx_t_2) { + if (unlikely(__pyx_t_2)) { /* "mtrand.pyx":267 * multi = np.broadcast(oa, ob, array) @@ -8245,7 +8374,7 @@ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; /*try:*/ { { - if (__pyx_t_10||__pyx_t_11||__pyx_t_12); else {/*mark used*/} + (void)__pyx_t_10; (void)__pyx_t_11; (void)__pyx_t_12; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -8263,8 +8392,9 @@ * ob_data = PyArray_MultiIter_DATA(multi, 1) */ __pyx_t_13 = __pyx_v_multi->size; - for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { - __pyx_v_i = __pyx_t_14; + __pyx_t_14 = __pyx_t_13; + for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) { + __pyx_v_i = __pyx_t_15; /* "mtrand.pyx":273 * with lock, nogil: @@ -8457,7 +8587,7 @@ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /*try:*/ { { - if (__pyx_t_7||__pyx_t_8||__pyx_t_9); else {/*mark used*/} + (void)__pyx_t_7; (void)__pyx_t_8; (void)__pyx_t_9; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -8663,7 +8793,7 @@ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /*try:*/ { { - if (__pyx_t_9||__pyx_t_8||__pyx_t_7); else {/*mark used*/} + (void)__pyx_t_9; (void)__pyx_t_8; (void)__pyx_t_7; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -8804,6 +8934,7 @@ PyObject *__pyx_t_12 = NULL; npy_intp __pyx_t_13; npy_intp __pyx_t_14; + npy_intp __pyx_t_15; __Pyx_RefNannySetupContext("cont3_array", 0); /* "mtrand.pyx":312 @@ -8902,7 +9033,7 @@ __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 314, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 314, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 314, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); @@ -8950,7 +9081,7 @@ __Pyx_INCREF(__pyx_v_size); __Pyx_GIVEREF(__pyx_v_size); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_size); - __pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 316, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 316, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 316, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); @@ -9055,7 +9186,7 @@ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 318, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - if (__pyx_t_2) { + if (unlikely(__pyx_t_2)) { /* "mtrand.pyx":319 * multi = np.broadcast(oa, ob, oc, array) @@ -9123,7 +9254,7 @@ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; /*try:*/ { { - if (__pyx_t_10||__pyx_t_11||__pyx_t_12); else {/*mark used*/} + (void)__pyx_t_10; (void)__pyx_t_11; (void)__pyx_t_12; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -9141,8 +9272,9 @@ * ob_data = PyArray_MultiIter_DATA(multi, 1) */ __pyx_t_13 = __pyx_v_multi->size; - for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { - __pyx_v_i = __pyx_t_14; + __pyx_t_14 = __pyx_t_13; + for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) { + __pyx_v_i = __pyx_t_15; /* "mtrand.pyx":325 * with lock, nogil: @@ -9343,7 +9475,7 @@ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /*try:*/ { { - if (__pyx_t_7||__pyx_t_8||__pyx_t_9); else {/*mark used*/} + (void)__pyx_t_7; (void)__pyx_t_8; (void)__pyx_t_9; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -9542,7 +9674,7 @@ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /*try:*/ { { - if (__pyx_t_9||__pyx_t_8||__pyx_t_7); else {/*mark used*/} + (void)__pyx_t_9; (void)__pyx_t_8; (void)__pyx_t_7; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -9724,7 +9856,7 @@ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /*try:*/ { { - if (__pyx_t_7||__pyx_t_8||__pyx_t_9); else {/*mark used*/} + (void)__pyx_t_7; (void)__pyx_t_8; (void)__pyx_t_9; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -9923,7 +10055,7 @@ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /*try:*/ { { - if (__pyx_t_9||__pyx_t_8||__pyx_t_7); else {/*mark used*/} + (void)__pyx_t_9; (void)__pyx_t_8; (void)__pyx_t_7; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -10061,6 +10193,7 @@ PyObject *__pyx_t_11 = NULL; npy_intp __pyx_t_12; npy_intp __pyx_t_13; + npy_intp __pyx_t_14; __Pyx_RefNannySetupContext("discnp_array", 0); /* "mtrand.pyx":381 @@ -10156,7 +10289,7 @@ __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 383, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 383, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, ((PyObject *)(&PyInt_Type))) < 0) __PYX_ERR(0, 383, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_7, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 383, __pyx_L1_error) @@ -10198,7 +10331,7 @@ __Pyx_INCREF(__pyx_v_size); __Pyx_GIVEREF(__pyx_v_size); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_size); - __pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 385, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_dtype, ((PyObject *)(&PyInt_Type))) < 0) __PYX_ERR(0, 385, __pyx_L1_error) __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 385, __pyx_L1_error) @@ -10294,7 +10427,7 @@ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 387, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (__pyx_t_2) { + if (unlikely(__pyx_t_2)) { /* "mtrand.pyx":388 * multi = np.broadcast(on, op, array) @@ -10362,7 +10495,7 @@ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /*try:*/ { { - if (__pyx_t_9||__pyx_t_10||__pyx_t_11); else {/*mark used*/} + (void)__pyx_t_9; (void)__pyx_t_10; (void)__pyx_t_11; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -10380,8 +10513,9 @@ * op_data = PyArray_MultiIter_DATA(multi, 1) */ __pyx_t_12 = __pyx_v_multi->size; - for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { - __pyx_v_i = __pyx_t_13; + __pyx_t_13 = __pyx_t_12; + for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { + __pyx_v_i = __pyx_t_14; /* "mtrand.pyx":394 * with lock, nogil: @@ -10572,7 +10706,7 @@ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /*try:*/ { { - if (__pyx_t_7||__pyx_t_8||__pyx_t_9); else {/*mark used*/} + (void)__pyx_t_7; (void)__pyx_t_8; (void)__pyx_t_9; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -10771,7 +10905,7 @@ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /*try:*/ { { - if (__pyx_t_9||__pyx_t_8||__pyx_t_7); else {/*mark used*/} + (void)__pyx_t_9; (void)__pyx_t_8; (void)__pyx_t_7; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -10909,6 +11043,7 @@ PyObject *__pyx_t_11 = NULL; npy_intp __pyx_t_12; npy_intp __pyx_t_13; + npy_intp __pyx_t_14; __Pyx_RefNannySetupContext("discdd_array", 0); /* "mtrand.pyx":430 @@ -11004,7 +11139,7 @@ __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 432, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 432, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, ((PyObject *)(&PyInt_Type))) < 0) __PYX_ERR(0, 432, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_7, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 432, __pyx_L1_error) @@ -11046,7 +11181,7 @@ __Pyx_INCREF(__pyx_v_size); __Pyx_GIVEREF(__pyx_v_size); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_size); - __pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 434, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 434, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_dtype, ((PyObject *)(&PyInt_Type))) < 0) __PYX_ERR(0, 434, __pyx_L1_error) __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 434, __pyx_L1_error) @@ -11142,7 +11277,7 @@ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 436, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (__pyx_t_2) { + if (unlikely(__pyx_t_2)) { /* "mtrand.pyx":437 * multi = np.broadcast(on, op, array) @@ -11210,7 +11345,7 @@ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /*try:*/ { { - if (__pyx_t_9||__pyx_t_10||__pyx_t_11); else {/*mark used*/} + (void)__pyx_t_9; (void)__pyx_t_10; (void)__pyx_t_11; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -11228,8 +11363,9 @@ * op_data = PyArray_MultiIter_DATA(multi, 1) */ __pyx_t_12 = __pyx_v_multi->size; - for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { - __pyx_v_i = __pyx_t_13; + __pyx_t_13 = __pyx_t_12; + for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { + __pyx_v_i = __pyx_t_14; /* "mtrand.pyx":443 * with lock, nogil: @@ -11420,7 +11556,7 @@ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /*try:*/ { { - if (__pyx_t_7||__pyx_t_8||__pyx_t_9); else {/*mark used*/} + (void)__pyx_t_7; (void)__pyx_t_8; (void)__pyx_t_9; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -11619,7 +11755,7 @@ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /*try:*/ { { - if (__pyx_t_9||__pyx_t_8||__pyx_t_7); else {/*mark used*/} + (void)__pyx_t_9; (void)__pyx_t_8; (void)__pyx_t_7; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -11758,6 +11894,7 @@ PyObject *__pyx_t_11 = NULL; npy_intp __pyx_t_12; npy_intp __pyx_t_13; + npy_intp __pyx_t_14; __Pyx_RefNannySetupContext("discnmN_array", 0); /* "mtrand.pyx":480 @@ -11856,7 +11993,7 @@ __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 482, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 482, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, ((PyObject *)(&PyInt_Type))) < 0) __PYX_ERR(0, 482, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_7, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 482, __pyx_L1_error) @@ -11898,7 +12035,7 @@ __Pyx_INCREF(__pyx_v_size); __Pyx_GIVEREF(__pyx_v_size); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_size); - __pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 484, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 484, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_dtype, ((PyObject *)(&PyInt_Type))) < 0) __PYX_ERR(0, 484, __pyx_L1_error) __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 484, __pyx_L1_error) @@ -11997,7 +12134,7 @@ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 486, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (__pyx_t_2) { + if (unlikely(__pyx_t_2)) { /* "mtrand.pyx":487 * multi = np.broadcast(on, om, oN, array) @@ -12065,7 +12202,7 @@ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /*try:*/ { { - if (__pyx_t_9||__pyx_t_10||__pyx_t_11); else {/*mark used*/} + (void)__pyx_t_9; (void)__pyx_t_10; (void)__pyx_t_11; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -12083,8 +12220,9 @@ * om_data = PyArray_MultiIter_DATA(multi, 1) */ __pyx_t_12 = __pyx_v_multi->size; - for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { - __pyx_v_i = __pyx_t_13; + __pyx_t_13 = __pyx_t_12; + for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { + __pyx_v_i = __pyx_t_14; /* "mtrand.pyx":493 * with lock, nogil: @@ -12284,7 +12422,7 @@ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /*try:*/ { { - if (__pyx_t_7||__pyx_t_8||__pyx_t_9); else {/*mark used*/} + (void)__pyx_t_7; (void)__pyx_t_8; (void)__pyx_t_9; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -12483,7 +12621,7 @@ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; /*try:*/ { { - if (__pyx_t_9||__pyx_t_8||__pyx_t_7); else {/*mark used*/} + (void)__pyx_t_9; (void)__pyx_t_8; (void)__pyx_t_7; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -12687,7 +12825,7 @@ * itera = PyArray_IterNew(oa) * with lock, nogil: # <<<<<<<<<<<<<< * for i from 0 <= i < length: - * array_data[i] = func(state, ((itera.dataptr))[0]) + * array_data[i] = func(state, (PyArray_ITER_DATA(itera))[0]) */ /*with:*/ { __pyx_t_5 = __Pyx_PyObject_LookupSpecial(__pyx_v_lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 537, __pyx_L1_error) @@ -12715,7 +12853,7 @@ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /*try:*/ { { - if (__pyx_t_7||__pyx_t_8||__pyx_t_9); else {/*mark used*/} + (void)__pyx_t_7; (void)__pyx_t_8; (void)__pyx_t_9; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -12729,7 +12867,7 @@ * itera = PyArray_IterNew(oa) * with lock, nogil: * for i from 0 <= i < length: # <<<<<<<<<<<<<< - * array_data[i] = func(state, ((itera.dataptr))[0]) + * array_data[i] = func(state, (PyArray_ITER_DATA(itera))[0]) * PyArray_ITER_NEXT(itera) */ __pyx_t_10 = __pyx_v_length; @@ -12738,15 +12876,15 @@ /* "mtrand.pyx":539 * with lock, nogil: * for i from 0 <= i < length: - * array_data[i] = func(state, ((itera.dataptr))[0]) # <<<<<<<<<<<<<< + * array_data[i] = func(state, (PyArray_ITER_DATA(itera))[0]) # <<<<<<<<<<<<<< * PyArray_ITER_NEXT(itera) * else: */ - (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (((double *)__pyx_v_itera->dataptr)[0])); + (__pyx_v_array_data[__pyx_v_i]) = __pyx_v_func(__pyx_v_state, (((double *)PyArray_ITER_DATA(__pyx_v_itera))[0])); /* "mtrand.pyx":540 * for i from 0 <= i < length: - * array_data[i] = func(state, ((itera.dataptr))[0]) + * array_data[i] = func(state, (PyArray_ITER_DATA(itera))[0]) * PyArray_ITER_NEXT(itera) # <<<<<<<<<<<<<< * else: * array = np.empty(size, int) @@ -12760,7 +12898,7 @@ * itera = PyArray_IterNew(oa) * with lock, nogil: # <<<<<<<<<<<<<< * for i from 0 <= i < length: - * array_data[i] = func(state, ((itera.dataptr))[0]) + * array_data[i] = func(state, (PyArray_ITER_DATA(itera))[0]) */ /*finally:*/ { /*normal exit:*/{ @@ -12902,7 +13040,7 @@ * with lock, nogil: */ __pyx_t_2 = ((__pyx_v_multi->size != PyArray_SIZE(arrayObject)) != 0); - if (__pyx_t_2) { + if (unlikely(__pyx_t_2)) { /* "mtrand.pyx":546 * multi = PyArray_MultiIterNew(2, array, oa) @@ -12959,7 +13097,7 @@ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /*try:*/ { { - if (__pyx_t_9||__pyx_t_8||__pyx_t_7); else {/*mark used*/} + (void)__pyx_t_9; (void)__pyx_t_8; (void)__pyx_t_7; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -13230,11 +13368,11 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_d)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_d)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("_shape_from_size", 1, 2, 2, 1); __PYX_ERR(0, 566, __pyx_L3_error) } @@ -13416,7 +13554,6 @@ __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; - __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -13444,7 +13581,7 @@ * return shape * */ - __pyx_t_7 = PySequence_Tuple(__pyx_v_size); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 573, __pyx_L6_except_error) + __pyx_t_7 = __Pyx_PySequence_Tuple(__pyx_v_size); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 573, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 573, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_11); @@ -13472,14 +13609,12 @@ * shape = (operator.index(size), d) * except TypeError: */ - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_6); __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); goto __pyx_L1_error; __pyx_L5_exception_handled:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_6); @@ -13558,7 +13693,7 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_seed); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_seed); if (value) { values[0] = value; kw_args--; } } } @@ -13816,7 +13951,7 @@ /* Python wrapper */ static PyObject *__pyx_pw_6mtrand_11RandomState_5seed(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6mtrand_11RandomState_4seed[] = "\n seed(seed=None)\n\n Seed the generator.\n\n This method is called when `RandomState` is initialized. It can be\n called again to re-seed the generator. For details, see `RandomState`.\n\n Parameters\n ----------\n seed : int or array_like, optional\n Seed for `RandomState`.\n Must be convertible to 32 bit unsigned integers.\n\n See Also\n --------\n RandomState\n\n "; +static char __pyx_doc_6mtrand_11RandomState_4seed[] = "\n seed(seed=None)\n\n Seed the generator.\n\n This method is called when `RandomState` is initialized. It can be\n called again to re-seed the generator. For details, see `RandomState`.\n\n Parameters\n ----------\n seed : int or 1-d array_like, optional\n Seed for `RandomState`.\n Must be convertible to 32 bit unsigned integers.\n\n See Also\n --------\n RandomState\n\n "; static PyObject *__pyx_pw_6mtrand_11RandomState_5seed(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_seed = 0; PyObject *__pyx_r = 0; @@ -13839,7 +13974,7 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_seed); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_seed); if (value) { values[0] = value; kw_args--; } } } @@ -13958,7 +14093,7 @@ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /*try:*/ { { - if (__pyx_t_10||__pyx_t_11||__pyx_t_12); else {/*mark used*/} + (void)__pyx_t_10; (void)__pyx_t_11; (void)__pyx_t_12; /* mark used */ /*try:*/ { /* "mtrand.pyx":676 @@ -14014,7 +14149,7 @@ * errcode = rk_randomseed(self.internal_state) * else: * idx = operator.index(seed) # <<<<<<<<<<<<<< - * if idx > int(2**32 - 1) or idx < 0: + * if (idx >= 2**32) or (idx < 0): * raise ValueError("Seed must be between 0 and 2**32 - 1") */ /*else*/ { @@ -14072,52 +14207,49 @@ /* "mtrand.pyx":679 * else: * idx = operator.index(seed) - * if idx > int(2**32 - 1) or idx < 0: # <<<<<<<<<<<<<< + * if (idx >= 2**32) or (idx < 0): # <<<<<<<<<<<<<< * raise ValueError("Seed must be between 0 and 2**32 - 1") * with self.lock: */ - __pyx_t_7 = __Pyx_PyNumber_Int(__pyx_int_4294967295); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 679, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = PyObject_RichCompare(__pyx_v_idx, __pyx_t_7, Py_GT); __Pyx_XGOTREF(__pyx_t_9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 679, __pyx_L3_error) + __pyx_t_7 = PyObject_RichCompare(__pyx_v_idx, __pyx_int_4294967296, Py_GE); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 679, __pyx_L3_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_7); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 679, __pyx_L3_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 679, __pyx_L3_error) - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (!__pyx_t_4) { } else { __pyx_t_5 = __pyx_t_4; goto __pyx_L22_bool_binop_done; } - __pyx_t_9 = PyObject_RichCompare(__pyx_v_idx, __pyx_int_0, Py_LT); __Pyx_XGOTREF(__pyx_t_9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 679, __pyx_L3_error) - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 679, __pyx_L3_error) - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_t_7 = PyObject_RichCompare(__pyx_v_idx, __pyx_int_0, Py_LT); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 679, __pyx_L3_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_7); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 679, __pyx_L3_error) + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_5 = __pyx_t_4; __pyx_L22_bool_binop_done:; - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { /* "mtrand.pyx":680 * idx = operator.index(seed) - * if idx > int(2**32 - 1) or idx < 0: + * if (idx >= 2**32) or (idx < 0): * raise ValueError("Seed must be between 0 and 2**32 - 1") # <<<<<<<<<<<<<< * with self.lock: * rk_seed(idx, self.internal_state) */ - __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__37, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 680, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_Raise(__pyx_t_9, 0, 0, 0); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__37, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 680, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_Raise(__pyx_t_7, 0, 0, 0); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __PYX_ERR(0, 680, __pyx_L3_error) /* "mtrand.pyx":679 * else: * idx = operator.index(seed) - * if idx > int(2**32 - 1) or idx < 0: # <<<<<<<<<<<<<< + * if (idx >= 2**32) or (idx < 0): # <<<<<<<<<<<<<< * raise ValueError("Seed must be between 0 and 2**32 - 1") * with self.lock: */ } /* "mtrand.pyx":681 - * if idx > int(2**32 - 1) or idx < 0: + * if (idx >= 2**32) or (idx < 0): * raise ValueError("Seed must be between 0 and 2**32 - 1") * with self.lock: # <<<<<<<<<<<<<< * rk_seed(idx, self.internal_state) @@ -14126,27 +14258,27 @@ /*with:*/ { __pyx_t_6 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 681, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 681, __pyx_L24_error) - __Pyx_GOTREF(__pyx_t_7); + __pyx_t_9 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 681, __pyx_L24_error) + __Pyx_GOTREF(__pyx_t_9); __pyx_t_13 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_13 = PyMethod_GET_SELF(__pyx_t_7); + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_9))) { + __pyx_t_13 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_13)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_13); __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); + __Pyx_DECREF_SET(__pyx_t_9, function); } } if (__pyx_t_13) { - __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_13); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 681, __pyx_L24_error) + __pyx_t_7 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_13); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 681, __pyx_L24_error) __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; } else { - __pyx_t_9 = __Pyx_PyObject_CallNoArg(__pyx_t_7); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 681, __pyx_L24_error) + __pyx_t_7 = __Pyx_PyObject_CallNoArg(__pyx_t_9); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 681, __pyx_L24_error) } - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /*try:*/ { { __Pyx_PyThreadState_declare @@ -14162,13 +14294,13 @@ * with self.lock: * rk_seed(idx, self.internal_state) # <<<<<<<<<<<<<< * except TypeError: - * obj = np.asarray(seed).astype(np.int64, casting='safe') + * obj = np.asarray(seed) */ __pyx_t_14 = __Pyx_PyInt_As_unsigned_long(__pyx_v_idx); if (unlikely((__pyx_t_14 == (unsigned long)-1) && PyErr_Occurred())) __PYX_ERR(0, 682, __pyx_L28_error) rk_seed(__pyx_t_14, __pyx_v_self->internal_state); /* "mtrand.pyx":681 - * if idx > int(2**32 - 1) or idx < 0: + * if (idx >= 2**32) or (idx < 0): * raise ValueError("Seed must be between 0 and 2**32 - 1") * with self.lock: # <<<<<<<<<<<<<< * rk_seed(idx, self.internal_state) @@ -14180,18 +14312,17 @@ __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; goto __pyx_L33_try_end; __pyx_L28_error:; - __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /*except:*/ { __Pyx_AddTraceback("mtrand.RandomState.seed", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_7, &__pyx_t_13) < 0) __PYX_ERR(0, 681, __pyx_L30_except_error) - __Pyx_GOTREF(__pyx_t_9); + if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_9, &__pyx_t_13) < 0) __PYX_ERR(0, 681, __pyx_L30_except_error) __Pyx_GOTREF(__pyx_t_7); + __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_13); - __pyx_t_8 = PyTuple_Pack(3, __pyx_t_9, __pyx_t_7, __pyx_t_13); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 681, __pyx_L30_except_error) + __pyx_t_8 = PyTuple_Pack(3, __pyx_t_7, __pyx_t_9, __pyx_t_13); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 681, __pyx_L30_except_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_15 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; @@ -14203,27 +14334,25 @@ if (__pyx_t_5 < 0) __PYX_ERR(0, 681, __pyx_L30_except_error) __pyx_t_4 = ((!(__pyx_t_5 != 0)) != 0); if (__pyx_t_4) { - __Pyx_GIVEREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); + __Pyx_GIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_13); - __Pyx_ErrRestoreWithState(__pyx_t_9, __pyx_t_7, __pyx_t_13); - __pyx_t_9 = 0; __pyx_t_7 = 0; __pyx_t_13 = 0; + __Pyx_ErrRestoreWithState(__pyx_t_7, __pyx_t_9, __pyx_t_13); + __pyx_t_7 = 0; __pyx_t_9 = 0; __pyx_t_13 = 0; __PYX_ERR(0, 681, __pyx_L30_except_error) } - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; goto __pyx_L29_exception_handled; } __pyx_L30_except_error:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_12); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_ExceptionReset(__pyx_t_12, __pyx_t_11, __pyx_t_10); goto __pyx_L3_error; __pyx_L29_exception_handled:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_12); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_10); @@ -14266,33 +14395,32 @@ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; - __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0; /* "mtrand.pyx":683 * with self.lock: * rk_seed(idx, self.internal_state) * except TypeError: # <<<<<<<<<<<<<< - * obj = np.asarray(seed).astype(np.int64, casting='safe') - * if ((obj > int(2**32 - 1)) | (obj < 0)).any(): + * obj = np.asarray(seed) + * if obj.size == 0: */ __pyx_t_16 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_16) { __Pyx_AddTraceback("mtrand.RandomState.seed", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_13, &__pyx_t_7, &__pyx_t_9) < 0) __PYX_ERR(0, 683, __pyx_L5_except_error) + if (__Pyx_GetException(&__pyx_t_13, &__pyx_t_9, &__pyx_t_7) < 0) __PYX_ERR(0, 683, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_13); - __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_9); + __Pyx_GOTREF(__pyx_t_7); /* "mtrand.pyx":684 * rk_seed(idx, self.internal_state) * except TypeError: - * obj = np.asarray(seed).astype(np.int64, casting='safe') # <<<<<<<<<<<<<< - * if ((obj > int(2**32 - 1)) | (obj < 0)).any(): - * raise ValueError("Seed must be between 0 and 2**32 - 1") + * obj = np.asarray(seed) # <<<<<<<<<<<<<< + * if obj.size == 0: + * raise ValueError("Seed must be non-empty") */ __pyx_t_17 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 684, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_17); @@ -14342,151 +14470,233 @@ } } __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; - __pyx_t_18 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_astype); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 684, __pyx_L5_except_error) + if (!(likely(((__pyx_t_8) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_8, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 684, __pyx_L5_except_error) + arrayObject_obj = ((PyArrayObject *)__pyx_t_8); + __pyx_t_8 = 0; + + /* "mtrand.pyx":685 + * except TypeError: + * obj = np.asarray(seed) + * if obj.size == 0: # <<<<<<<<<<<<<< + * raise ValueError("Seed must be non-empty") + * obj = obj.astype(np.int64, casting='safe') + */ + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)arrayObject_obj), __pyx_n_s_size); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 685, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_18 = __Pyx_PyInt_EqObjC(__pyx_t_8, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 685, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_18); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 684, __pyx_L5_except_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_18); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 685, __pyx_L5_except_error) + __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; + if (unlikely(__pyx_t_4)) { + + /* "mtrand.pyx":686 + * obj = np.asarray(seed) + * if obj.size == 0: + * raise ValueError("Seed must be non-empty") # <<<<<<<<<<<<<< + * obj = obj.astype(np.int64, casting='safe') + * if obj.ndim != 1: + */ + __pyx_t_18 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__39, NULL); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 686, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_18); + __Pyx_Raise(__pyx_t_18, 0, 0, 0); + __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; + __PYX_ERR(0, 686, __pyx_L5_except_error) + + /* "mtrand.pyx":685 + * except TypeError: + * obj = np.asarray(seed) + * if obj.size == 0: # <<<<<<<<<<<<<< + * raise ValueError("Seed must be non-empty") + * obj = obj.astype(np.int64, casting='safe') + */ + } + + /* "mtrand.pyx":687 + * if obj.size == 0: + * raise ValueError("Seed must be non-empty") + * obj = obj.astype(np.int64, casting='safe') # <<<<<<<<<<<<<< + * if obj.ndim != 1: + * raise ValueError("Seed array must be 1-d") + */ + __pyx_t_18 = __Pyx_PyObject_GetAttrStr(((PyObject *)arrayObject_obj), __pyx_n_s_astype); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 687, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_18); + __pyx_t_8 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 687, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); - __pyx_t_19 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_int64); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 684, __pyx_L5_except_error) + __pyx_t_19 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_int64); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 687, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_19); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 684, __pyx_L5_except_error) + __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 687, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_19); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_19); __pyx_t_19 = 0; - __pyx_t_19 = PyDict_New(); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 684, __pyx_L5_except_error) + __pyx_t_19 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 687, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_19); - if (PyDict_SetItem(__pyx_t_19, __pyx_n_s_casting, __pyx_n_s_safe) < 0) __PYX_ERR(0, 684, __pyx_L5_except_error) - __pyx_t_17 = __Pyx_PyObject_Call(__pyx_t_18, __pyx_t_8, __pyx_t_19); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 684, __pyx_L5_except_error) + if (PyDict_SetItem(__pyx_t_19, __pyx_n_s_casting, __pyx_n_s_safe) < 0) __PYX_ERR(0, 687, __pyx_L5_except_error) + __pyx_t_17 = __Pyx_PyObject_Call(__pyx_t_18, __pyx_t_8, __pyx_t_19); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 687, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_17); __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; - if (!(likely(((__pyx_t_17) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_17, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 684, __pyx_L5_except_error) - arrayObject_obj = ((PyArrayObject *)__pyx_t_17); + if (!(likely(((__pyx_t_17) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_17, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 687, __pyx_L5_except_error) + __Pyx_DECREF_SET(arrayObject_obj, ((PyArrayObject *)__pyx_t_17)); __pyx_t_17 = 0; - /* "mtrand.pyx":685 - * except TypeError: - * obj = np.asarray(seed).astype(np.int64, casting='safe') - * if ((obj > int(2**32 - 1)) | (obj < 0)).any(): # <<<<<<<<<<<<<< - * raise ValueError("Seed must be between 0 and 2**32 - 1") - * obj = obj.astype('L', casting='unsafe') + /* "mtrand.pyx":688 + * raise ValueError("Seed must be non-empty") + * obj = obj.astype(np.int64, casting='safe') + * if obj.ndim != 1: # <<<<<<<<<<<<<< + * raise ValueError("Seed array must be 1-d") + * if ((obj >= 2**32) | (obj < 0)).any(): */ - __pyx_t_19 = __Pyx_PyNumber_Int(__pyx_int_4294967295); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 685, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_19); - __pyx_t_8 = PyObject_RichCompare(((PyObject *)arrayObject_obj), __pyx_t_19, Py_GT); __Pyx_XGOTREF(__pyx_t_8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 685, __pyx_L5_except_error) + __pyx_t_17 = __Pyx_PyObject_GetAttrStr(((PyObject *)arrayObject_obj), __pyx_n_s_ndim); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 688, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_17); + __pyx_t_19 = PyObject_RichCompare(__pyx_t_17, __pyx_int_1, Py_NE); __Pyx_XGOTREF(__pyx_t_19); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 688, __pyx_L5_except_error) + __Pyx_DECREF(__pyx_t_17); __pyx_t_17 = 0; + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_19); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 688, __pyx_L5_except_error) __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; - __pyx_t_19 = PyObject_RichCompare(((PyObject *)arrayObject_obj), __pyx_int_0, Py_LT); __Pyx_XGOTREF(__pyx_t_19); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 685, __pyx_L5_except_error) - __pyx_t_18 = PyNumber_Or(__pyx_t_8, __pyx_t_19); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 685, __pyx_L5_except_error) + if (unlikely(__pyx_t_4)) { + + /* "mtrand.pyx":689 + * obj = obj.astype(np.int64, casting='safe') + * if obj.ndim != 1: + * raise ValueError("Seed array must be 1-d") # <<<<<<<<<<<<<< + * if ((obj >= 2**32) | (obj < 0)).any(): + * raise ValueError("Seed values must be between 0 and 2**32 - 1") + */ + __pyx_t_19 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__40, NULL); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 689, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_19); + __Pyx_Raise(__pyx_t_19, 0, 0, 0); + __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; + __PYX_ERR(0, 689, __pyx_L5_except_error) + + /* "mtrand.pyx":688 + * raise ValueError("Seed must be non-empty") + * obj = obj.astype(np.int64, casting='safe') + * if obj.ndim != 1: # <<<<<<<<<<<<<< + * raise ValueError("Seed array must be 1-d") + * if ((obj >= 2**32) | (obj < 0)).any(): + */ + } + + /* "mtrand.pyx":690 + * if obj.ndim != 1: + * raise ValueError("Seed array must be 1-d") + * if ((obj >= 2**32) | (obj < 0)).any(): # <<<<<<<<<<<<<< + * raise ValueError("Seed values must be between 0 and 2**32 - 1") + * obj = obj.astype('L', casting='unsafe') + */ + __pyx_t_17 = PyObject_RichCompare(((PyObject *)arrayObject_obj), __pyx_int_4294967296, Py_GE); __Pyx_XGOTREF(__pyx_t_17); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 690, __pyx_L5_except_error) + __pyx_t_8 = PyObject_RichCompare(((PyObject *)arrayObject_obj), __pyx_int_0, Py_LT); __Pyx_XGOTREF(__pyx_t_8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 690, __pyx_L5_except_error) + __pyx_t_18 = PyNumber_Or(__pyx_t_17, __pyx_t_8); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 690, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_18); + __Pyx_DECREF(__pyx_t_17); __pyx_t_17 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; - __pyx_t_19 = __Pyx_PyObject_GetAttrStr(__pyx_t_18, __pyx_n_s_any); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 685, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_19); + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_18, __pyx_n_s_any); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 690, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; __pyx_t_18 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_19))) { - __pyx_t_18 = PyMethod_GET_SELF(__pyx_t_19); + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) { + __pyx_t_18 = PyMethod_GET_SELF(__pyx_t_8); if (likely(__pyx_t_18)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_19); + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); __Pyx_INCREF(__pyx_t_18); __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_19, function); + __Pyx_DECREF_SET(__pyx_t_8, function); } } if (__pyx_t_18) { - __pyx_t_17 = __Pyx_PyObject_CallOneArg(__pyx_t_19, __pyx_t_18); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 685, __pyx_L5_except_error) + __pyx_t_19 = __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_18); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 690, __pyx_L5_except_error) __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; } else { - __pyx_t_17 = __Pyx_PyObject_CallNoArg(__pyx_t_19); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 685, __pyx_L5_except_error) + __pyx_t_19 = __Pyx_PyObject_CallNoArg(__pyx_t_8); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 690, __pyx_L5_except_error) } - __Pyx_GOTREF(__pyx_t_17); + __Pyx_GOTREF(__pyx_t_19); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_19); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 690, __pyx_L5_except_error) __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_17); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 685, __pyx_L5_except_error) - __Pyx_DECREF(__pyx_t_17); __pyx_t_17 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":686 - * obj = np.asarray(seed).astype(np.int64, casting='safe') - * if ((obj > int(2**32 - 1)) | (obj < 0)).any(): - * raise ValueError("Seed must be between 0 and 2**32 - 1") # <<<<<<<<<<<<<< + /* "mtrand.pyx":691 + * raise ValueError("Seed array must be 1-d") + * if ((obj >= 2**32) | (obj < 0)).any(): + * raise ValueError("Seed values must be between 0 and 2**32 - 1") # <<<<<<<<<<<<<< * obj = obj.astype('L', casting='unsafe') * with self.lock: */ - __pyx_t_17 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__39, NULL); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 686, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_17); - __Pyx_Raise(__pyx_t_17, 0, 0, 0); - __Pyx_DECREF(__pyx_t_17); __pyx_t_17 = 0; - __PYX_ERR(0, 686, __pyx_L5_except_error) + __pyx_t_19 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__41, NULL); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 691, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_19); + __Pyx_Raise(__pyx_t_19, 0, 0, 0); + __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; + __PYX_ERR(0, 691, __pyx_L5_except_error) - /* "mtrand.pyx":685 - * except TypeError: - * obj = np.asarray(seed).astype(np.int64, casting='safe') - * if ((obj > int(2**32 - 1)) | (obj < 0)).any(): # <<<<<<<<<<<<<< - * raise ValueError("Seed must be between 0 and 2**32 - 1") + /* "mtrand.pyx":690 + * if obj.ndim != 1: + * raise ValueError("Seed array must be 1-d") + * if ((obj >= 2**32) | (obj < 0)).any(): # <<<<<<<<<<<<<< + * raise ValueError("Seed values must be between 0 and 2**32 - 1") * obj = obj.astype('L', casting='unsafe') */ } - /* "mtrand.pyx":687 - * if ((obj > int(2**32 - 1)) | (obj < 0)).any(): - * raise ValueError("Seed must be between 0 and 2**32 - 1") + /* "mtrand.pyx":692 + * if ((obj >= 2**32) | (obj < 0)).any(): + * raise ValueError("Seed values must be between 0 and 2**32 - 1") * obj = obj.astype('L', casting='unsafe') # <<<<<<<<<<<<<< * with self.lock: * init_by_array(self.internal_state, PyArray_DATA(obj), */ - __pyx_t_17 = __Pyx_PyObject_GetAttrStr(((PyObject *)arrayObject_obj), __pyx_n_s_astype); if (unlikely(!__pyx_t_17)) __PYX_ERR(0, 687, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_17); - __pyx_t_19 = PyDict_New(); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 687, __pyx_L5_except_error) + __pyx_t_19 = __Pyx_PyObject_GetAttrStr(((PyObject *)arrayObject_obj), __pyx_n_s_astype); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 692, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_19); - if (PyDict_SetItem(__pyx_t_19, __pyx_n_s_casting, __pyx_n_s_unsafe) < 0) __PYX_ERR(0, 687, __pyx_L5_except_error) - __pyx_t_18 = __Pyx_PyObject_Call(__pyx_t_17, __pyx_tuple__40, __pyx_t_19); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 687, __pyx_L5_except_error) + __pyx_t_8 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 692, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + if (PyDict_SetItem(__pyx_t_8, __pyx_n_s_casting, __pyx_n_s_unsafe) < 0) __PYX_ERR(0, 692, __pyx_L5_except_error) + __pyx_t_18 = __Pyx_PyObject_Call(__pyx_t_19, __pyx_tuple__42, __pyx_t_8); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 692, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_18); - __Pyx_DECREF(__pyx_t_17); __pyx_t_17 = 0; __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; - if (!(likely(((__pyx_t_18) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_18, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 687, __pyx_L5_except_error) + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + if (!(likely(((__pyx_t_18) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_18, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 692, __pyx_L5_except_error) __Pyx_DECREF_SET(arrayObject_obj, ((PyArrayObject *)__pyx_t_18)); __pyx_t_18 = 0; - /* "mtrand.pyx":688 - * raise ValueError("Seed must be between 0 and 2**32 - 1") + /* "mtrand.pyx":693 + * raise ValueError("Seed values must be between 0 and 2**32 - 1") * obj = obj.astype('L', casting='unsafe') * with self.lock: # <<<<<<<<<<<<<< * init_by_array(self.internal_state, PyArray_DATA(obj), * PyArray_DIM(obj, 0)) */ /*with:*/ { - __pyx_t_6 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 688, __pyx_L5_except_error) + __pyx_t_6 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 693, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_19 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 688, __pyx_L41_error) - __Pyx_GOTREF(__pyx_t_19); - __pyx_t_17 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_19))) { - __pyx_t_17 = PyMethod_GET_SELF(__pyx_t_19); - if (likely(__pyx_t_17)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_19); - __Pyx_INCREF(__pyx_t_17); + __pyx_t_8 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 693, __pyx_L43_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_19 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) { + __pyx_t_19 = PyMethod_GET_SELF(__pyx_t_8); + if (likely(__pyx_t_19)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); + __Pyx_INCREF(__pyx_t_19); __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_19, function); + __Pyx_DECREF_SET(__pyx_t_8, function); } } - if (__pyx_t_17) { - __pyx_t_18 = __Pyx_PyObject_CallOneArg(__pyx_t_19, __pyx_t_17); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 688, __pyx_L41_error) - __Pyx_DECREF(__pyx_t_17); __pyx_t_17 = 0; + if (__pyx_t_19) { + __pyx_t_18 = __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_19); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 693, __pyx_L43_error) + __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; } else { - __pyx_t_18 = __Pyx_PyObject_CallNoArg(__pyx_t_19); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 688, __pyx_L41_error) + __pyx_t_18 = __Pyx_PyObject_CallNoArg(__pyx_t_8); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 693, __pyx_L43_error) } __Pyx_GOTREF(__pyx_t_18); - __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; /*try:*/ { { - if (__pyx_t_10||__pyx_t_11||__pyx_t_12); else {/*mark used*/} + (void)__pyx_t_10; (void)__pyx_t_11; (void)__pyx_t_12; /* mark used */ /*try:*/ { - /* "mtrand.pyx":689 + /* "mtrand.pyx":694 * obj = obj.astype('L', casting='unsafe') * with self.lock: * init_by_array(self.internal_state, PyArray_DATA(obj), # <<<<<<<<<<<<<< @@ -14495,8 +14705,8 @@ */ init_by_array(__pyx_v_self->internal_state, ((unsigned long *)PyArray_DATA(arrayObject_obj)), PyArray_DIM(arrayObject_obj, 0)); - /* "mtrand.pyx":688 - * raise ValueError("Seed must be between 0 and 2**32 - 1") + /* "mtrand.pyx":693 + * raise ValueError("Seed values must be between 0 and 2**32 - 1") * obj = obj.astype('L', casting='unsafe') * with self.lock: # <<<<<<<<<<<<<< * init_by_array(self.internal_state, PyArray_DATA(obj), @@ -14508,25 +14718,25 @@ /*finally:*/ { /*normal exit:*/{ if (__pyx_t_6) { - __pyx_t_12 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_tuple__41, NULL); + __pyx_t_12 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_tuple__43, NULL); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 688, __pyx_L5_except_error) + if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 693, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } - goto __pyx_L46; + goto __pyx_L48; } - __pyx_L46:; + __pyx_L48:; } - goto __pyx_L55; - __pyx_L41_error:; + goto __pyx_L57; + __pyx_L43_error:; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L5_except_error; - __pyx_L55:; + __pyx_L57:; } __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L4_exception_handled; } goto __pyx_L5_except_error; @@ -14539,14 +14749,12 @@ * if seed is None: * with self.lock: */ - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L4_exception_handled:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); @@ -14583,7 +14791,7 @@ return __pyx_r; } -/* "mtrand.pyx":692 +/* "mtrand.pyx":697 * PyArray_DIM(obj, 0)) * * def get_state(self): # <<<<<<<<<<<<<< @@ -14625,21 +14833,21 @@ double __pyx_t_11; __Pyx_RefNannySetupContext("get_state", 0); - /* "mtrand.pyx":723 + /* "mtrand.pyx":728 * """ * cdef ndarray state "arrayObject_state" * state = np.empty(624, np.uint) # <<<<<<<<<<<<<< * with self.lock: * memcpy(PyArray_DATA(state), (self.internal_state.key), 624*sizeof(long)) */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 723, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 728, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_empty); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 723, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_empty); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 728, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 723, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 728, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_uint); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 723, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_uint); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 728, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -14657,7 +14865,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_int_624, __pyx_t_4}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 723, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 728, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; @@ -14666,14 +14874,14 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_int_624, __pyx_t_4}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 723, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 728, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { - __pyx_t_6 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 723, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 728, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_2); __pyx_t_2 = NULL; @@ -14684,7 +14892,7 @@ __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_5, __pyx_t_4); __pyx_t_4 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 723, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 728, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } @@ -14695,7 +14903,7 @@ arrayObject_state = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; - /* "mtrand.pyx":724 + /* "mtrand.pyx":729 * cdef ndarray state "arrayObject_state" * state = np.empty(624, np.uint) * with self.lock: # <<<<<<<<<<<<<< @@ -14703,9 +14911,9 @@ * has_gauss = self.internal_state.has_gauss */ /*with:*/ { - __pyx_t_7 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 724, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 729, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_1 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 724, __pyx_L3_error) + __pyx_t_1 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 729, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { @@ -14718,29 +14926,29 @@ } } if (__pyx_t_6) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 724, __pyx_L3_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 729, __pyx_L3_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else { - __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 724, __pyx_L3_error) + __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 729, __pyx_L3_error) } __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /*try:*/ { { - if (__pyx_t_8||__pyx_t_9||__pyx_t_10); else {/*mark used*/} + (void)__pyx_t_8; (void)__pyx_t_9; (void)__pyx_t_10; /* mark used */ /*try:*/ { - /* "mtrand.pyx":725 + /* "mtrand.pyx":730 * state = np.empty(624, np.uint) * with self.lock: * memcpy(PyArray_DATA(state), (self.internal_state.key), 624*sizeof(long)) # <<<<<<<<<<<<<< * has_gauss = self.internal_state.has_gauss * gauss = self.internal_state.gauss */ - memcpy(((void *)PyArray_DATA(arrayObject_state)), ((void *)__pyx_v_self->internal_state->key), (0x270 * (sizeof(long)))); + (void)(memcpy(((void *)PyArray_DATA(arrayObject_state)), ((void *)__pyx_v_self->internal_state->key), (0x270 * (sizeof(long))))); - /* "mtrand.pyx":726 + /* "mtrand.pyx":731 * with self.lock: * memcpy(PyArray_DATA(state), (self.internal_state.key), 624*sizeof(long)) * has_gauss = self.internal_state.has_gauss # <<<<<<<<<<<<<< @@ -14750,7 +14958,7 @@ __pyx_t_5 = __pyx_v_self->internal_state->has_gauss; __pyx_v_has_gauss = __pyx_t_5; - /* "mtrand.pyx":727 + /* "mtrand.pyx":732 * memcpy(PyArray_DATA(state), (self.internal_state.key), 624*sizeof(long)) * has_gauss = self.internal_state.has_gauss * gauss = self.internal_state.gauss # <<<<<<<<<<<<<< @@ -14760,7 +14968,7 @@ __pyx_t_11 = __pyx_v_self->internal_state->gauss; __pyx_v_gauss = __pyx_t_11; - /* "mtrand.pyx":728 + /* "mtrand.pyx":733 * has_gauss = self.internal_state.has_gauss * gauss = self.internal_state.gauss * pos = self.internal_state.pos # <<<<<<<<<<<<<< @@ -14770,7 +14978,7 @@ __pyx_t_5 = __pyx_v_self->internal_state->pos; __pyx_v_pos = __pyx_t_5; - /* "mtrand.pyx":724 + /* "mtrand.pyx":729 * cdef ndarray state "arrayObject_state" * state = np.empty(624, np.uint) * with self.lock: # <<<<<<<<<<<<<< @@ -14783,9 +14991,9 @@ /*finally:*/ { /*normal exit:*/{ if (__pyx_t_7) { - __pyx_t_10 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_tuple__42, NULL); + __pyx_t_10 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_tuple__44, NULL); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 724, __pyx_L1_error) + if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 729, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } @@ -14800,21 +15008,21 @@ __pyx_L13:; } - /* "mtrand.pyx":729 + /* "mtrand.pyx":734 * gauss = self.internal_state.gauss * pos = self.internal_state.pos * state = np.asarray(state, np.uint32) # <<<<<<<<<<<<<< * return ('MT19937', state, pos, has_gauss, gauss) * */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 729, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_asarray); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 729, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_asarray); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 729, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_uint32); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 729, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_uint32); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; @@ -14832,7 +15040,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_1, ((PyObject *)arrayObject_state), __pyx_t_4}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 729, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 734, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; @@ -14841,14 +15049,14 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_1, ((PyObject *)arrayObject_state), __pyx_t_4}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 729, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 734, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { - __pyx_t_2 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 729, __pyx_L1_error) + __pyx_t_2 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (__pyx_t_1) { __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = NULL; @@ -14859,7 +15067,7 @@ __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1+__pyx_t_5, __pyx_t_4); __pyx_t_4 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 729, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } @@ -14870,7 +15078,7 @@ __Pyx_DECREF_SET(arrayObject_state, ((PyArrayObject *)__pyx_t_6)); __pyx_t_6 = 0; - /* "mtrand.pyx":730 + /* "mtrand.pyx":735 * pos = self.internal_state.pos * state = np.asarray(state, np.uint32) * return ('MT19937', state, pos, has_gauss, gauss) # <<<<<<<<<<<<<< @@ -14878,13 +15086,13 @@ * def set_state(self, state): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 730, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 735, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_has_gauss); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 730, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_has_gauss); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 735, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_gauss); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 730, __pyx_L1_error) + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_gauss); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 735, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = PyTuple_New(5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 730, __pyx_L1_error) + __pyx_t_4 = PyTuple_New(5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 735, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_n_s_MT19937); __Pyx_GIVEREF(__pyx_n_s_MT19937); @@ -14905,7 +15113,7 @@ __pyx_t_4 = 0; goto __pyx_L0; - /* "mtrand.pyx":692 + /* "mtrand.pyx":697 * PyArray_DIM(obj, 0)) * * def get_state(self): # <<<<<<<<<<<<<< @@ -14929,7 +15137,7 @@ return __pyx_r; } -/* "mtrand.pyx":732 +/* "mtrand.pyx":737 * return ('MT19937', state, pos, has_gauss, gauss) * * def set_state(self, state): # <<<<<<<<<<<<<< @@ -14978,42 +15186,42 @@ int __pyx_t_16; __Pyx_RefNannySetupContext("set_state", 0); - /* "mtrand.pyx":781 + /* "mtrand.pyx":786 * cdef ndarray obj "arrayObject_obj" * cdef int pos * algorithm_name = state[0] # <<<<<<<<<<<<<< * if algorithm_name != 'MT19937': * raise ValueError("algorithm must be 'MT19937'") */ - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 781, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 786, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_algorithm_name = __pyx_t_1; __pyx_t_1 = 0; - /* "mtrand.pyx":782 + /* "mtrand.pyx":787 * cdef int pos * algorithm_name = state[0] * if algorithm_name != 'MT19937': # <<<<<<<<<<<<<< * raise ValueError("algorithm must be 'MT19937'") * key, pos = state[1:3] */ - __pyx_t_2 = (__Pyx_PyString_Equals(__pyx_v_algorithm_name, __pyx_n_s_MT19937, Py_NE)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 782, __pyx_L1_error) - if (__pyx_t_2) { + __pyx_t_2 = (__Pyx_PyString_Equals(__pyx_v_algorithm_name, __pyx_n_s_MT19937, Py_NE)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 787, __pyx_L1_error) + if (unlikely(__pyx_t_2)) { - /* "mtrand.pyx":783 + /* "mtrand.pyx":788 * algorithm_name = state[0] * if algorithm_name != 'MT19937': * raise ValueError("algorithm must be 'MT19937'") # <<<<<<<<<<<<<< * key, pos = state[1:3] * if len(state) == 3: */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__43, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 783, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__45, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 788, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 783, __pyx_L1_error) + __PYX_ERR(0, 788, __pyx_L1_error) - /* "mtrand.pyx":782 + /* "mtrand.pyx":787 * cdef int pos * algorithm_name = state[0] * if algorithm_name != 'MT19937': # <<<<<<<<<<<<<< @@ -15022,26 +15230,22 @@ */ } - /* "mtrand.pyx":784 + /* "mtrand.pyx":789 * if algorithm_name != 'MT19937': * raise ValueError("algorithm must be 'MT19937'") * key, pos = state[1:3] # <<<<<<<<<<<<<< * if len(state) == 3: * has_gauss = 0 */ - __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_state, 1, 3, NULL, NULL, &__pyx_slice__44, 1, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 784, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_state, 1, 3, NULL, NULL, &__pyx_slice__46, 1, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { PyObject* sequence = __pyx_t_1; - #if !CYTHON_COMPILING_IN_PYPY - Py_ssize_t size = Py_SIZE(sequence); - #else - Py_ssize_t size = PySequence_Size(sequence); - #endif + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 784, __pyx_L1_error) + __PYX_ERR(0, 789, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { @@ -15054,15 +15258,15 @@ __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else - __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 784, __pyx_L1_error) + __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 784, __pyx_L1_error) + __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { Py_ssize_t index = -1; - __pyx_t_5 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 784, __pyx_L1_error) + __pyx_t_5 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; @@ -15070,7 +15274,7 @@ __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_4 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_4)) goto __pyx_L4_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) __PYX_ERR(0, 784, __pyx_L1_error) + if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) __PYX_ERR(0, 789, __pyx_L1_error) __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L5_unpacking_done; @@ -15078,27 +15282,27 @@ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 784, __pyx_L1_error) + __PYX_ERR(0, 789, __pyx_L1_error) __pyx_L5_unpacking_done:; } - __pyx_t_7 = __Pyx_PyInt_As_int(__pyx_t_4); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 784, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyInt_As_int(__pyx_t_4); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 789, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_key = __pyx_t_3; __pyx_t_3 = 0; __pyx_v_pos = __pyx_t_7; - /* "mtrand.pyx":785 + /* "mtrand.pyx":790 * raise ValueError("algorithm must be 'MT19937'") * key, pos = state[1:3] * if len(state) == 3: # <<<<<<<<<<<<<< * has_gauss = 0 * cached_gaussian = 0.0 */ - __pyx_t_8 = PyObject_Length(__pyx_v_state); if (unlikely(__pyx_t_8 == -1)) __PYX_ERR(0, 785, __pyx_L1_error) + __pyx_t_8 = PyObject_Length(__pyx_v_state); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 790, __pyx_L1_error) __pyx_t_2 = ((__pyx_t_8 == 3) != 0); if (__pyx_t_2) { - /* "mtrand.pyx":786 + /* "mtrand.pyx":791 * key, pos = state[1:3] * if len(state) == 3: * has_gauss = 0 # <<<<<<<<<<<<<< @@ -15108,7 +15312,7 @@ __Pyx_INCREF(__pyx_int_0); __pyx_v_has_gauss = __pyx_int_0; - /* "mtrand.pyx":787 + /* "mtrand.pyx":792 * if len(state) == 3: * has_gauss = 0 * cached_gaussian = 0.0 # <<<<<<<<<<<<<< @@ -15118,7 +15322,7 @@ __Pyx_INCREF(__pyx_float_0_0); __pyx_v_cached_gaussian = __pyx_float_0_0; - /* "mtrand.pyx":785 + /* "mtrand.pyx":790 * raise ValueError("algorithm must be 'MT19937'") * key, pos = state[1:3] * if len(state) == 3: # <<<<<<<<<<<<<< @@ -15128,7 +15332,7 @@ goto __pyx_L6; } - /* "mtrand.pyx":789 + /* "mtrand.pyx":794 * cached_gaussian = 0.0 * else: * has_gauss, cached_gaussian = state[3:5] # <<<<<<<<<<<<<< @@ -15136,19 +15340,15 @@ * obj = PyArray_ContiguousFromObject(key, NPY_ULONG, 1, 1) */ /*else*/ { - __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_state, 3, 5, NULL, NULL, &__pyx_slice__45, 1, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 789, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_v_state, 3, 5, NULL, NULL, &__pyx_slice__47, 1, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 794, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { PyObject* sequence = __pyx_t_1; - #if !CYTHON_COMPILING_IN_PYPY - Py_ssize_t size = Py_SIZE(sequence); - #else - Py_ssize_t size = PySequence_Size(sequence); - #endif + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 789, __pyx_L1_error) + __PYX_ERR(0, 794, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { @@ -15161,15 +15361,15 @@ __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_3); #else - __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 789, __pyx_L1_error) + __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 794, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 789, __pyx_L1_error) + __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 794, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { Py_ssize_t index = -1; - __pyx_t_5 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 789, __pyx_L1_error) + __pyx_t_5 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 794, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_6 = Py_TYPE(__pyx_t_5)->tp_iternext; @@ -15177,7 +15377,7 @@ __Pyx_GOTREF(__pyx_t_4); index = 1; __pyx_t_3 = __pyx_t_6(__pyx_t_5); if (unlikely(!__pyx_t_3)) goto __pyx_L7_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) __PYX_ERR(0, 789, __pyx_L1_error) + if (__Pyx_IternextUnpackEndCheck(__pyx_t_6(__pyx_t_5), 2) < 0) __PYX_ERR(0, 794, __pyx_L1_error) __pyx_t_6 = NULL; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L8_unpacking_done; @@ -15185,7 +15385,7 @@ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 789, __pyx_L1_error) + __PYX_ERR(0, 794, __pyx_L1_error) __pyx_L8_unpacking_done:; } __pyx_v_has_gauss = __pyx_t_4; @@ -15195,7 +15395,7 @@ } __pyx_L6:; - /* "mtrand.pyx":790 + /* "mtrand.pyx":795 * else: * has_gauss, cached_gaussian = state[3:5] * try: # <<<<<<<<<<<<<< @@ -15211,14 +15411,14 @@ __Pyx_XGOTREF(__pyx_t_11); /*try:*/ { - /* "mtrand.pyx":791 + /* "mtrand.pyx":796 * has_gauss, cached_gaussian = state[3:5] * try: * obj = PyArray_ContiguousFromObject(key, NPY_ULONG, 1, 1) # <<<<<<<<<<<<<< * except TypeError: * # compatibility -- could be an older pickle */ - __pyx_t_1 = PyArray_ContiguousFromObject(__pyx_v_key, NPY_ULONG, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 791, __pyx_L9_error) + __pyx_t_1 = PyArray_ContiguousFromObject(__pyx_v_key, NPY_ULONG, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 796, __pyx_L9_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __pyx_t_1; __Pyx_INCREF(__pyx_t_3); @@ -15226,7 +15426,7 @@ arrayObject_obj = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; - /* "mtrand.pyx":790 + /* "mtrand.pyx":795 * else: * has_gauss, cached_gaussian = state[3:5] * try: # <<<<<<<<<<<<<< @@ -15239,13 +15439,12 @@ __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L14_try_end; __pyx_L9_error:; - __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - /* "mtrand.pyx":792 + /* "mtrand.pyx":797 * try: * obj = PyArray_ContiguousFromObject(key, NPY_ULONG, 1, 1) * except TypeError: # <<<<<<<<<<<<<< @@ -15255,19 +15454,19 @@ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_7) { __Pyx_AddTraceback("mtrand.RandomState.set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_1, &__pyx_t_4) < 0) __PYX_ERR(0, 792, __pyx_L11_except_error) + if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_1, &__pyx_t_4) < 0) __PYX_ERR(0, 797, __pyx_L11_except_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_4); - /* "mtrand.pyx":794 + /* "mtrand.pyx":799 * except TypeError: * # compatibility -- could be an older pickle * obj = PyArray_ContiguousFromObject(key, NPY_LONG, 1, 1) # <<<<<<<<<<<<<< * if PyArray_DIM(obj, 0) != 624: * raise ValueError("state must be 624 longs") */ - __pyx_t_5 = PyArray_ContiguousFromObject(__pyx_v_key, NPY_LONG, 1, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 794, __pyx_L11_except_error) + __pyx_t_5 = PyArray_ContiguousFromObject(__pyx_v_key, NPY_LONG, 1, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 799, __pyx_L11_except_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_12 = __pyx_t_5; __Pyx_INCREF(__pyx_t_12); @@ -15282,21 +15481,19 @@ goto __pyx_L11_except_error; __pyx_L11_except_error:; - /* "mtrand.pyx":790 + /* "mtrand.pyx":795 * else: * has_gauss, cached_gaussian = state[3:5] * try: # <<<<<<<<<<<<<< * obj = PyArray_ContiguousFromObject(key, NPY_ULONG, 1, 1) * except TypeError: */ - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); goto __pyx_L1_error; __pyx_L10_exception_handled:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); @@ -15304,7 +15501,7 @@ __pyx_L14_try_end:; } - /* "mtrand.pyx":795 + /* "mtrand.pyx":800 * # compatibility -- could be an older pickle * obj = PyArray_ContiguousFromObject(key, NPY_LONG, 1, 1) * if PyArray_DIM(obj, 0) != 624: # <<<<<<<<<<<<<< @@ -15312,22 +15509,22 @@ * with self.lock: */ __pyx_t_2 = ((PyArray_DIM(arrayObject_obj, 0) != 0x270) != 0); - if (__pyx_t_2) { + if (unlikely(__pyx_t_2)) { - /* "mtrand.pyx":796 + /* "mtrand.pyx":801 * obj = PyArray_ContiguousFromObject(key, NPY_LONG, 1, 1) * if PyArray_DIM(obj, 0) != 624: * raise ValueError("state must be 624 longs") # <<<<<<<<<<<<<< * with self.lock: * memcpy((self.internal_state.key), PyArray_DATA(obj), 624*sizeof(long)) */ - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__46, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 796, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__48, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 801, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(0, 796, __pyx_L1_error) + __PYX_ERR(0, 801, __pyx_L1_error) - /* "mtrand.pyx":795 + /* "mtrand.pyx":800 * # compatibility -- could be an older pickle * obj = PyArray_ContiguousFromObject(key, NPY_LONG, 1, 1) * if PyArray_DIM(obj, 0) != 624: # <<<<<<<<<<<<<< @@ -15336,7 +15533,7 @@ */ } - /* "mtrand.pyx":797 + /* "mtrand.pyx":802 * if PyArray_DIM(obj, 0) != 624: * raise ValueError("state must be 624 longs") * with self.lock: # <<<<<<<<<<<<<< @@ -15344,9 +15541,9 @@ * self.internal_state.pos = pos */ /*with:*/ { - __pyx_t_11 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 797, __pyx_L1_error) + __pyx_t_11 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 802, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); - __pyx_t_1 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 797, __pyx_L18_error) + __pyx_t_1 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 802, __pyx_L18_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { @@ -15359,10 +15556,10 @@ } } if (__pyx_t_3) { - __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 797, __pyx_L18_error) + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 802, __pyx_L18_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { - __pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 797, __pyx_L18_error) + __pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 802, __pyx_L18_error) } __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -15377,16 +15574,16 @@ __Pyx_XGOTREF(__pyx_t_13); /*try:*/ { - /* "mtrand.pyx":798 + /* "mtrand.pyx":803 * raise ValueError("state must be 624 longs") * with self.lock: * memcpy((self.internal_state.key), PyArray_DATA(obj), 624*sizeof(long)) # <<<<<<<<<<<<<< * self.internal_state.pos = pos * self.internal_state.has_gauss = has_gauss */ - memcpy(((void *)__pyx_v_self->internal_state->key), ((void *)PyArray_DATA(arrayObject_obj)), (0x270 * (sizeof(long)))); + (void)(memcpy(((void *)__pyx_v_self->internal_state->key), ((void *)PyArray_DATA(arrayObject_obj)), (0x270 * (sizeof(long))))); - /* "mtrand.pyx":799 + /* "mtrand.pyx":804 * with self.lock: * memcpy((self.internal_state.key), PyArray_DATA(obj), 624*sizeof(long)) * self.internal_state.pos = pos # <<<<<<<<<<<<<< @@ -15395,27 +15592,27 @@ */ __pyx_v_self->internal_state->pos = __pyx_v_pos; - /* "mtrand.pyx":800 + /* "mtrand.pyx":805 * memcpy((self.internal_state.key), PyArray_DATA(obj), 624*sizeof(long)) * self.internal_state.pos = pos * self.internal_state.has_gauss = has_gauss # <<<<<<<<<<<<<< * self.internal_state.gauss = cached_gaussian * */ - __pyx_t_7 = __Pyx_PyInt_As_int(__pyx_v_has_gauss); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 800, __pyx_L22_error) + __pyx_t_7 = __Pyx_PyInt_As_int(__pyx_v_has_gauss); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 805, __pyx_L22_error) __pyx_v_self->internal_state->has_gauss = __pyx_t_7; - /* "mtrand.pyx":801 + /* "mtrand.pyx":806 * self.internal_state.pos = pos * self.internal_state.has_gauss = has_gauss * self.internal_state.gauss = cached_gaussian # <<<<<<<<<<<<<< * * # Pickling support: */ - __pyx_t_14 = __pyx_PyFloat_AsDouble(__pyx_v_cached_gaussian); if (unlikely((__pyx_t_14 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 801, __pyx_L22_error) + __pyx_t_14 = __pyx_PyFloat_AsDouble(__pyx_v_cached_gaussian); if (unlikely((__pyx_t_14 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 806, __pyx_L22_error) __pyx_v_self->internal_state->gauss = __pyx_t_14; - /* "mtrand.pyx":797 + /* "mtrand.pyx":802 * if PyArray_DIM(obj, 0) != 624: * raise ValueError("state must be 624 longs") * with self.lock: # <<<<<<<<<<<<<< @@ -15428,7 +15625,6 @@ __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0; goto __pyx_L27_try_end; __pyx_L22_error:; - __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -15436,20 +15632,20 @@ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; /*except:*/ { __Pyx_AddTraceback("mtrand.RandomState.set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_4, &__pyx_t_1, &__pyx_t_3) < 0) __PYX_ERR(0, 797, __pyx_L24_except_error) + if (__Pyx_GetException(&__pyx_t_4, &__pyx_t_1, &__pyx_t_3) < 0) __PYX_ERR(0, 802, __pyx_L24_except_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_3); - __pyx_t_12 = PyTuple_Pack(3, __pyx_t_4, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 797, __pyx_L24_except_error) + __pyx_t_12 = PyTuple_Pack(3, __pyx_t_4, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 802, __pyx_L24_except_error) __Pyx_GOTREF(__pyx_t_12); __pyx_t_15 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_12, NULL); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 797, __pyx_L24_except_error) + if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 802, __pyx_L24_except_error) __Pyx_GOTREF(__pyx_t_15); __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_15); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; - if (__pyx_t_2 < 0) __PYX_ERR(0, 797, __pyx_L24_except_error) + if (__pyx_t_2 < 0) __PYX_ERR(0, 802, __pyx_L24_except_error) __pyx_t_16 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_16) { __Pyx_GIVEREF(__pyx_t_4); @@ -15457,7 +15653,7 @@ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ErrRestoreWithState(__pyx_t_4, __pyx_t_1, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_t_3 = 0; - __PYX_ERR(0, 797, __pyx_L24_except_error) + __PYX_ERR(0, 802, __pyx_L24_except_error) } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -15465,14 +15661,12 @@ goto __pyx_L23_exception_handled; } __pyx_L24_except_error:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_13); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_9, __pyx_t_13); goto __pyx_L1_error; __pyx_L23_exception_handled:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_13); @@ -15483,9 +15677,9 @@ /*finally:*/ { /*normal exit:*/{ if (__pyx_t_11) { - __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_tuple__47, NULL); + __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_tuple__49, NULL); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 797, __pyx_L1_error) + if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 802, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; } @@ -15500,7 +15694,7 @@ __pyx_L31:; } - /* "mtrand.pyx":732 + /* "mtrand.pyx":737 * return ('MT19937', state, pos, has_gauss, gauss) * * def set_state(self, state): # <<<<<<<<<<<<<< @@ -15530,7 +15724,7 @@ return __pyx_r; } -/* "mtrand.pyx":804 +/* "mtrand.pyx":809 * * # Pickling support: * def __getstate__(self): # <<<<<<<<<<<<<< @@ -15559,7 +15753,7 @@ PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__getstate__", 0); - /* "mtrand.pyx":805 + /* "mtrand.pyx":810 * # Pickling support: * def __getstate__(self): * return self.get_state() # <<<<<<<<<<<<<< @@ -15567,7 +15761,7 @@ * def __setstate__(self, state): */ __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 805, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 810, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { @@ -15580,10 +15774,10 @@ } } if (__pyx_t_3) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 805, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 810, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { - __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 805, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 810, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -15591,7 +15785,7 @@ __pyx_t_1 = 0; goto __pyx_L0; - /* "mtrand.pyx":804 + /* "mtrand.pyx":809 * * # Pickling support: * def __getstate__(self): # <<<<<<<<<<<<<< @@ -15612,7 +15806,7 @@ return __pyx_r; } -/* "mtrand.pyx":807 +/* "mtrand.pyx":812 * return self.get_state() * * def __setstate__(self, state): # <<<<<<<<<<<<<< @@ -15642,14 +15836,14 @@ PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("__setstate__", 0); - /* "mtrand.pyx":808 + /* "mtrand.pyx":813 * * def __setstate__(self, state): * self.set_state(state) # <<<<<<<<<<<<<< * * def __reduce__(self): */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_set_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 808, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_set_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 813, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { @@ -15662,13 +15856,13 @@ } } if (!__pyx_t_3) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 808, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 813, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_v_state}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 808, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 813, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -15676,19 +15870,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_v_state}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 808, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 813, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 808, __pyx_L1_error) + __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 813, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_v_state); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 808, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 813, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } @@ -15696,7 +15890,7 @@ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":807 + /* "mtrand.pyx":812 * return self.get_state() * * def __setstate__(self, state): # <<<<<<<<<<<<<< @@ -15720,7 +15914,7 @@ return __pyx_r; } -/* "mtrand.pyx":810 +/* "mtrand.pyx":815 * self.set_state(state) * * def __reduce__(self): # <<<<<<<<<<<<<< @@ -15750,7 +15944,7 @@ PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("__reduce__", 0); - /* "mtrand.pyx":811 + /* "mtrand.pyx":816 * * def __reduce__(self): * return (np.random.__RandomState_ctor, (), self.get_state()) # <<<<<<<<<<<<<< @@ -15758,15 +15952,15 @@ * # Basic distributions: */ __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 811, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 816, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_random); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 811, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_random); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 816, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_RandomState_ctor); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 811, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_RandomState_ctor); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 816, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_state); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 811, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_get_state); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 816, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { @@ -15779,14 +15973,14 @@ } } if (__pyx_t_4) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 811, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 816, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else { - __pyx_t_2 = __Pyx_PyObject_CallNoArg(__pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 811, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallNoArg(__pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 816, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 811, __pyx_L1_error) + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 816, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); @@ -15801,7 +15995,7 @@ __pyx_t_3 = 0; goto __pyx_L0; - /* "mtrand.pyx":810 + /* "mtrand.pyx":815 * self.set_state(state) * * def __reduce__(self): # <<<<<<<<<<<<<< @@ -15823,7 +16017,7 @@ return __pyx_r; } -/* "mtrand.pyx":814 +/* "mtrand.pyx":819 * * # Basic distributions: * def random_sample(self, size=None): # <<<<<<<<<<<<<< @@ -15856,12 +16050,12 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[0] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "random_sample") < 0)) __PYX_ERR(0, 814, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "random_sample") < 0)) __PYX_ERR(0, 819, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -15875,7 +16069,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("random_sample", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 814, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("random_sample", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 819, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.random_sample", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -15895,7 +16089,7 @@ PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("random_sample", 0); - /* "mtrand.pyx":856 + /* "mtrand.pyx":861 * * """ * return cont0_array(self.internal_state, rk_double, size, self.lock) # <<<<<<<<<<<<<< @@ -15905,14 +16099,14 @@ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - __pyx_t_2 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_double, __pyx_v_size, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 856, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_double, __pyx_v_size, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 861, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":814 + /* "mtrand.pyx":819 * * # Basic distributions: * def random_sample(self, size=None): # <<<<<<<<<<<<<< @@ -15932,7 +16126,7 @@ return __pyx_r; } -/* "mtrand.pyx":858 +/* "mtrand.pyx":863 * return cont0_array(self.internal_state, rk_double, size, self.lock) * * def tomaxint(self, size=None): # <<<<<<<<<<<<<< @@ -15942,7 +16136,7 @@ /* Python wrapper */ static PyObject *__pyx_pw_6mtrand_11RandomState_19tomaxint(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6mtrand_11RandomState_18tomaxint[] = "\n tomaxint(size=None)\n\n Random integers between 0 and ``sys.maxint``, inclusive.\n\n Return a sample of uniformly distributed random integers in the interval\n [0, ``sys.maxint``].\n\n Parameters\n ----------\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. Default is None, in which case a\n single value is returned.\n\n Returns\n -------\n out : ndarray\n Drawn samples, with shape `size`.\n\n See Also\n --------\n randint : Uniform sampling over a given half-open interval of integers.\n random_integers : Uniform sampling over a given closed interval of\n integers.\n\n Examples\n --------\n >>> RS = np.random.mtrand.RandomState() # need a RandomState object\n >>> RS.tomaxint((2,2,2))\n array([[[1170048599, 1600360186],\n [ 739731006, 1947757578]],\n [[1871712945, 752307660],\n [1601631370, 1479324245]]])\n >>> import sys\n >>> sys.maxint\n 2147483647\n >>> RS.tomaxint((2,2,2)) < sys.maxint\n array([[[ True, True],\n [ True, True]],\n [[ True, True],\n [ True, True]]], dtype=bool)\n\n "; +static char __pyx_doc_6mtrand_11RandomState_18tomaxint[] = "\n tomaxint(size=None)\n\n Random integers between 0 and ``sys.maxint``, inclusive.\n\n Return a sample of uniformly distributed random integers in the interval\n [0, ``sys.maxint``].\n\n Parameters\n ----------\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. Default is None, in which case a\n single value is returned.\n\n Returns\n -------\n out : ndarray\n Drawn samples, with shape `size`.\n\n See Also\n --------\n randint : Uniform sampling over a given half-open interval of integers.\n random_integers : Uniform sampling over a given closed interval of\n integers.\n\n Examples\n --------\n >>> RS = np.random.mtrand.RandomState() # need a RandomState object\n >>> RS.tomaxint((2,2,2))\n array([[[1170048599, 1600360186],\n [ 739731006, 1947757578]],\n [[1871712945, 752307660],\n [1601631370, 1479324245]]])\n >>> import sys\n >>> sys.maxint\n 2147483647\n >>> RS.tomaxint((2,2,2)) < sys.maxint\n array([[[ True, True],\n [ True, True]],\n [[ True, True],\n [ True, True]]])\n\n "; static PyObject *__pyx_pw_6mtrand_11RandomState_19tomaxint(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_size = 0; PyObject *__pyx_r = 0; @@ -15965,12 +16159,12 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[0] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "tomaxint") < 0)) __PYX_ERR(0, 858, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "tomaxint") < 0)) __PYX_ERR(0, 863, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -15984,7 +16178,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("tomaxint", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 858, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("tomaxint", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 863, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.tomaxint", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -16004,7 +16198,7 @@ PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("tomaxint", 0); - /* "mtrand.pyx":903 + /* "mtrand.pyx":908 * * """ * return disc0_array(self.internal_state, rk_long, size, self.lock) # <<<<<<<<<<<<<< @@ -16014,14 +16208,14 @@ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - __pyx_t_2 = __pyx_f_6mtrand_disc0_array(__pyx_v_self->internal_state, rk_long, __pyx_v_size, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 903, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_disc0_array(__pyx_v_self->internal_state, rk_long, __pyx_v_size, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 908, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":858 + /* "mtrand.pyx":863 * return cont0_array(self.internal_state, rk_double, size, self.lock) * * def tomaxint(self, size=None): # <<<<<<<<<<<<<< @@ -16041,7 +16235,7 @@ return __pyx_r; } -/* "mtrand.pyx":905 +/* "mtrand.pyx":910 * return disc0_array(self.internal_state, rk_long, size, self.lock) * * def randint(self, low, high=None, size=None, dtype=int): # <<<<<<<<<<<<<< @@ -16065,7 +16259,7 @@ PyObject* values[4] = {0,0,0,0}; values[1] = ((PyObject *)Py_None); values[2] = ((PyObject *)Py_None); - values[3] = __pyx_k__48; + values[3] = __pyx_k__50; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); @@ -16084,29 +16278,29 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_high); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_high); if (value) { values[1] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "randint") < 0)) __PYX_ERR(0, 905, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "randint") < 0)) __PYX_ERR(0, 910, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -16128,7 +16322,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("randint", 0, 1, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 905, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("randint", 0, 1, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 910, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.randint", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -16169,7 +16363,7 @@ __Pyx_INCREF(__pyx_v_low); __Pyx_INCREF(__pyx_v_high); - /* "mtrand.pyx":963 + /* "mtrand.pyx":968 * * """ * if high is None: # <<<<<<<<<<<<<< @@ -16180,7 +16374,7 @@ __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { - /* "mtrand.pyx":964 + /* "mtrand.pyx":969 * """ * if high is None: * high = low # <<<<<<<<<<<<<< @@ -16190,7 +16384,7 @@ __Pyx_INCREF(__pyx_v_low); __Pyx_DECREF_SET(__pyx_v_high, __pyx_v_low); - /* "mtrand.pyx":965 + /* "mtrand.pyx":970 * if high is None: * high = low * low = 0 # <<<<<<<<<<<<<< @@ -16200,7 +16394,7 @@ __Pyx_INCREF(__pyx_int_0); __Pyx_DECREF_SET(__pyx_v_low, __pyx_int_0); - /* "mtrand.pyx":963 + /* "mtrand.pyx":968 * * """ * if high is None: # <<<<<<<<<<<<<< @@ -16209,16 +16403,16 @@ */ } - /* "mtrand.pyx":969 + /* "mtrand.pyx":974 * # '_randint_type' is defined in * # 'generate_randint_helpers.py' * key = np.dtype(dtype).name # <<<<<<<<<<<<<< * if key not in _randint_type: * raise TypeError('Unsupported dtype "%s" for randint' % key) */ - __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 969, __pyx_L1_error) + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 974, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 969, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 974, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; @@ -16232,13 +16426,13 @@ } } if (!__pyx_t_4) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_v_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 969, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_v_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 974, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_v_dtype}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 969, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 974, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -16246,66 +16440,61 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_v_dtype}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 969, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 974, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 969, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 974, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL; __Pyx_INCREF(__pyx_v_dtype); __Pyx_GIVEREF(__pyx_v_dtype); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_v_dtype); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 969, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 974, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_name); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 969, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_name); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 974, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_key = __pyx_t_5; __pyx_t_5 = 0; - /* "mtrand.pyx":970 + /* "mtrand.pyx":975 * # 'generate_randint_helpers.py' * key = np.dtype(dtype).name * if key not in _randint_type: # <<<<<<<<<<<<<< * raise TypeError('Unsupported dtype "%s" for randint' % key) * */ - __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_randint_type); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 970, __pyx_L1_error) + __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_randint_type); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 975, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); - __pyx_t_2 = (__Pyx_PySequence_ContainsTF(__pyx_v_key, __pyx_t_5, Py_NE)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 970, __pyx_L1_error) + __pyx_t_2 = (__Pyx_PySequence_ContainsTF(__pyx_v_key, __pyx_t_5, Py_NE)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 975, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = (__pyx_t_2 != 0); - if (__pyx_t_1) { + if (unlikely(__pyx_t_1)) { - /* "mtrand.pyx":971 + /* "mtrand.pyx":976 * key = np.dtype(dtype).name * if key not in _randint_type: * raise TypeError('Unsupported dtype "%s" for randint' % key) # <<<<<<<<<<<<<< * * lowbnd, highbnd, randfunc = _randint_type[key] */ - __pyx_t_5 = __Pyx_PyString_Format(__pyx_kp_s_Unsupported_dtype_s_for_randint, __pyx_v_key); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 971, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyString_Format(__pyx_kp_s_Unsupported_dtype_s_for_randint, __pyx_v_key); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 976, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 971, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 976, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 971, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __PYX_ERR(0, 971, __pyx_L1_error) + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(0, 976, __pyx_L1_error) - /* "mtrand.pyx":970 + /* "mtrand.pyx":975 * # 'generate_randint_helpers.py' * key = np.dtype(dtype).name * if key not in _randint_type: # <<<<<<<<<<<<<< @@ -16314,65 +16503,61 @@ */ } - /* "mtrand.pyx":973 + /* "mtrand.pyx":978 * raise TypeError('Unsupported dtype "%s" for randint' % key) * * lowbnd, highbnd, randfunc = _randint_type[key] # <<<<<<<<<<<<<< * * # TODO: Do not cast these inputs to Python int */ - __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_randint_type); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 973, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = PyObject_GetItem(__pyx_t_5, __pyx_v_key); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 973, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_randint_type); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if ((likely(PyTuple_CheckExact(__pyx_t_3))) || (PyList_CheckExact(__pyx_t_3))) { - PyObject* sequence = __pyx_t_3; - #if !CYTHON_COMPILING_IN_PYPY - Py_ssize_t size = Py_SIZE(sequence); - #else - Py_ssize_t size = PySequence_Size(sequence); - #endif + __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_v_key); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 978, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if ((likely(PyTuple_CheckExact(__pyx_t_5))) || (PyList_CheckExact(__pyx_t_5))) { + PyObject* sequence = __pyx_t_5; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 3)) { if (size > 3) __Pyx_RaiseTooManyValuesError(3); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 973, __pyx_L1_error) + __PYX_ERR(0, 978, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_5 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_6 = PyTuple_GET_ITEM(sequence, 1); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 2); } else { - __pyx_t_5 = PyList_GET_ITEM(sequence, 0); + __pyx_t_3 = PyList_GET_ITEM(sequence, 0); __pyx_t_6 = PyList_GET_ITEM(sequence, 1); __pyx_t_4 = PyList_GET_ITEM(sequence, 2); } - __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(__pyx_t_4); #else - __pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 973, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 973, __pyx_L1_error) + __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 978, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_4 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 973, __pyx_L1_error) + __pyx_t_4 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else { Py_ssize_t index = -1; - __pyx_t_7 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 973, __pyx_L1_error) + __pyx_t_7 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_8 = Py_TYPE(__pyx_t_7)->tp_iternext; - index = 0; __pyx_t_5 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_5)) goto __pyx_L5_unpacking_failed; - __Pyx_GOTREF(__pyx_t_5); + index = 0; __pyx_t_3 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_3)) goto __pyx_L5_unpacking_failed; + __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_6 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_6)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_6); index = 2; __pyx_t_4 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_4)) goto __pyx_L5_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_7), 3) < 0) __PYX_ERR(0, 973, __pyx_L1_error) + if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_7), 3) < 0) __PYX_ERR(0, 978, __pyx_L1_error) __pyx_t_8 = NULL; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L6_unpacking_done; @@ -16380,80 +16565,75 @@ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_8 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 973, __pyx_L1_error) + __PYX_ERR(0, 978, __pyx_L1_error) __pyx_L6_unpacking_done:; } - __pyx_v_lowbnd = __pyx_t_5; - __pyx_t_5 = 0; + __pyx_v_lowbnd = __pyx_t_3; + __pyx_t_3 = 0; __pyx_v_highbnd = __pyx_t_6; __pyx_t_6 = 0; __pyx_v_randfunc = __pyx_t_4; __pyx_t_4 = 0; - /* "mtrand.pyx":980 + /* "mtrand.pyx":985 * # integer comparison and subtraction involving uint64 and non- * # uint64). Afterwards, remove these two lines. * ilow = int(low) # <<<<<<<<<<<<<< * ihigh = int(high) * */ - __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_low); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 980, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_v_ilow = __pyx_t_3; - __pyx_t_3 = 0; + __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_low); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 985, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_v_ilow = __pyx_t_5; + __pyx_t_5 = 0; - /* "mtrand.pyx":981 + /* "mtrand.pyx":986 * # uint64). Afterwards, remove these two lines. * ilow = int(low) * ihigh = int(high) # <<<<<<<<<<<<<< * * if ilow < lowbnd: */ - __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_high); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 981, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_v_ihigh = __pyx_t_3; - __pyx_t_3 = 0; + __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_high); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 986, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_v_ihigh = __pyx_t_5; + __pyx_t_5 = 0; - /* "mtrand.pyx":983 + /* "mtrand.pyx":988 * ihigh = int(high) * * if ilow < lowbnd: # <<<<<<<<<<<<<< * raise ValueError("low is out of bounds for %s" % (key,)) * if ihigh > highbnd: */ - __pyx_t_3 = PyObject_RichCompare(__pyx_v_ilow, __pyx_v_lowbnd, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 983, __pyx_L1_error) - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 983, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_1) { + __pyx_t_5 = PyObject_RichCompare(__pyx_v_ilow, __pyx_v_lowbnd, Py_LT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 988, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 988, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (unlikely(__pyx_t_1)) { - /* "mtrand.pyx":984 + /* "mtrand.pyx":989 * * if ilow < lowbnd: * raise ValueError("low is out of bounds for %s" % (key,)) # <<<<<<<<<<<<<< * if ihigh > highbnd: * raise ValueError("high is out of bounds for %s" % (key,)) */ - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 984, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 989, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_key); __Pyx_GIVEREF(__pyx_v_key); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_key); - __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_low_is_out_of_bounds_for_s, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 984, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 984, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 984, __pyx_L1_error) + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_key); + __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_low_is_out_of_bounds_for_s, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 989, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 989, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(0, 984, __pyx_L1_error) + __Pyx_Raise(__pyx_t_5, 0, 0, 0); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __PYX_ERR(0, 989, __pyx_L1_error) - /* "mtrand.pyx":983 + /* "mtrand.pyx":988 * ihigh = int(high) * * if ilow < lowbnd: # <<<<<<<<<<<<<< @@ -16462,46 +16642,41 @@ */ } - /* "mtrand.pyx":985 + /* "mtrand.pyx":990 * if ilow < lowbnd: * raise ValueError("low is out of bounds for %s" % (key,)) * if ihigh > highbnd: # <<<<<<<<<<<<<< * raise ValueError("high is out of bounds for %s" % (key,)) * if ilow >= ihigh: */ - __pyx_t_4 = PyObject_RichCompare(__pyx_v_ihigh, __pyx_v_highbnd, Py_GT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 985, __pyx_L1_error) - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 985, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (__pyx_t_1) { + __pyx_t_5 = PyObject_RichCompare(__pyx_v_ihigh, __pyx_v_highbnd, Py_GT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 990, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 990, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (unlikely(__pyx_t_1)) { - /* "mtrand.pyx":986 + /* "mtrand.pyx":991 * raise ValueError("low is out of bounds for %s" % (key,)) * if ihigh > highbnd: * raise ValueError("high is out of bounds for %s" % (key,)) # <<<<<<<<<<<<<< * if ilow >= ihigh: * raise ValueError("low >= high") */ - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 986, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 991, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_key); __Pyx_GIVEREF(__pyx_v_key); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_key); - __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_high_is_out_of_bounds_for_s, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 986, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 986, __pyx_L1_error) + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_key); + __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_high_is_out_of_bounds_for_s, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 991, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 986, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 991, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 986, __pyx_L1_error) + __Pyx_Raise(__pyx_t_5, 0, 0, 0); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __PYX_ERR(0, 991, __pyx_L1_error) - /* "mtrand.pyx":985 + /* "mtrand.pyx":990 * if ilow < lowbnd: * raise ValueError("low is out of bounds for %s" % (key,)) * if ihigh > highbnd: # <<<<<<<<<<<<<< @@ -16510,32 +16685,32 @@ */ } - /* "mtrand.pyx":987 + /* "mtrand.pyx":992 * if ihigh > highbnd: * raise ValueError("high is out of bounds for %s" % (key,)) * if ilow >= ihigh: # <<<<<<<<<<<<<< * raise ValueError("low >= high") * */ - __pyx_t_3 = PyObject_RichCompare(__pyx_v_ilow, __pyx_v_ihigh, Py_GE); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 987, __pyx_L1_error) - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 987, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_1) { + __pyx_t_5 = PyObject_RichCompare(__pyx_v_ilow, __pyx_v_ihigh, Py_GE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 992, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 992, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (unlikely(__pyx_t_1)) { - /* "mtrand.pyx":988 + /* "mtrand.pyx":993 * raise ValueError("high is out of bounds for %s" % (key,)) * if ilow >= ihigh: * raise ValueError("low >= high") # <<<<<<<<<<<<<< * * with self.lock: */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__49, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 988, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 988, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__51, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 993, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_Raise(__pyx_t_5, 0, 0, 0); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __PYX_ERR(0, 993, __pyx_L1_error) - /* "mtrand.pyx":987 + /* "mtrand.pyx":992 * if ihigh > highbnd: * raise ValueError("high is out of bounds for %s" % (key,)) * if ilow >= ihigh: # <<<<<<<<<<<<<< @@ -16544,7 +16719,7 @@ */ } - /* "mtrand.pyx":990 + /* "mtrand.pyx":995 * raise ValueError("low >= high") * * with self.lock: # <<<<<<<<<<<<<< @@ -16552,9 +16727,9 @@ * */ /*with:*/ { - __pyx_t_9 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 990, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_4 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 990, __pyx_L10_error) + __pyx_t_4 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 995, __pyx_L10_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { @@ -16567,14 +16742,14 @@ } } if (__pyx_t_6) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 990, __pyx_L10_error) + __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 995, __pyx_L10_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else { - __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 990, __pyx_L10_error) + __pyx_t_5 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 995, __pyx_L10_error) } - __Pyx_GOTREF(__pyx_t_3); + __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /*try:*/ { { __Pyx_PyThreadState_declare @@ -16585,23 +16760,23 @@ __Pyx_XGOTREF(__pyx_t_12); /*try:*/ { - /* "mtrand.pyx":991 + /* "mtrand.pyx":996 * * with self.lock: * ret = randfunc(ilow, ihigh - 1, size, self.state_address) # <<<<<<<<<<<<<< * * if size is None: */ - __pyx_t_4 = __Pyx_PyInt_SubtractObjC(__pyx_v_ihigh, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 991, __pyx_L14_error) + __pyx_t_4 = __Pyx_PyInt_SubtractObjC(__pyx_v_ihigh, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 996, __pyx_L14_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_v_randfunc); - __pyx_t_6 = __pyx_v_randfunc; __pyx_t_5 = NULL; + __pyx_t_6 = __pyx_v_randfunc; __pyx_t_3 = NULL; __pyx_t_13 = 0; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); - if (likely(__pyx_t_5)) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); - __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_13 = 1; @@ -16609,27 +16784,27 @@ } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { - PyObject *__pyx_temp[5] = {__pyx_t_5, __pyx_v_ilow, __pyx_t_4, __pyx_v_size, __pyx_v_self->state_address}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_13, 4+__pyx_t_13); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 991, __pyx_L14_error) - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_GOTREF(__pyx_t_3); + PyObject *__pyx_temp[5] = {__pyx_t_3, __pyx_v_ilow, __pyx_t_4, __pyx_v_size, __pyx_v_self->state_address}; + __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_13, 4+__pyx_t_13); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 996, __pyx_L14_error) + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { - PyObject *__pyx_temp[5] = {__pyx_t_5, __pyx_v_ilow, __pyx_t_4, __pyx_v_size, __pyx_v_self->state_address}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_13, 4+__pyx_t_13); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 991, __pyx_L14_error) - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_GOTREF(__pyx_t_3); + PyObject *__pyx_temp[5] = {__pyx_t_3, __pyx_v_ilow, __pyx_t_4, __pyx_v_size, __pyx_v_self->state_address}; + __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_13, 4+__pyx_t_13); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 996, __pyx_L14_error) + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { - __pyx_t_7 = PyTuple_New(4+__pyx_t_13); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 991, __pyx_L14_error) + __pyx_t_7 = PyTuple_New(4+__pyx_t_13); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 996, __pyx_L14_error) __Pyx_GOTREF(__pyx_t_7); - if (__pyx_t_5) { - __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; + if (__pyx_t_3) { + __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); __pyx_t_3 = NULL; } __Pyx_INCREF(__pyx_v_ilow); __Pyx_GIVEREF(__pyx_v_ilow); @@ -16643,15 +16818,15 @@ __Pyx_GIVEREF(__pyx_v_self->state_address); PyTuple_SET_ITEM(__pyx_t_7, 3+__pyx_t_13, __pyx_v_self->state_address); __pyx_t_4 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 991, __pyx_L14_error) - __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_7, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 996, __pyx_L14_error) + __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_v_ret = __pyx_t_3; - __pyx_t_3 = 0; + __pyx_v_ret = __pyx_t_5; + __pyx_t_5 = 0; - /* "mtrand.pyx":993 + /* "mtrand.pyx":998 * ret = randfunc(ilow, ihigh - 1, size, self.state_address) * * if size is None: # <<<<<<<<<<<<<< @@ -16662,7 +16837,7 @@ __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { - /* "mtrand.pyx":994 + /* "mtrand.pyx":999 * * if size is None: * if dtype in (np.bool, np.int, np.long): # <<<<<<<<<<<<<< @@ -16670,51 +16845,51 @@ * */ __Pyx_INCREF(__pyx_v_dtype); - __pyx_t_3 = __pyx_v_dtype; - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 994, __pyx_L14_error) + __pyx_t_5 = __pyx_v_dtype; + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 999, __pyx_L14_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_bool_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 994, __pyx_L14_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_bool_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 999, __pyx_L14_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyObject_RichCompare(__pyx_t_3, __pyx_t_7, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 994, __pyx_L14_error) + __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_7, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 999, __pyx_L14_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 994, __pyx_L14_error) + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 999, __pyx_L14_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L22_bool_binop_done; } - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 994, __pyx_L14_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 999, __pyx_L14_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_int); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 994, __pyx_L14_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_int); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 999, __pyx_L14_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyObject_RichCompare(__pyx_t_3, __pyx_t_7, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 994, __pyx_L14_error) + __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_7, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 999, __pyx_L14_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 994, __pyx_L14_error) + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 999, __pyx_L14_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L22_bool_binop_done; } - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 994, __pyx_L14_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 999, __pyx_L14_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_long); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 994, __pyx_L14_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_long); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 999, __pyx_L14_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyObject_RichCompare(__pyx_t_3, __pyx_t_7, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 994, __pyx_L14_error) + __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_t_7, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 999, __pyx_L14_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 994, __pyx_L14_error) + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 999, __pyx_L14_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_2 = __pyx_t_1; __pyx_L22_bool_binop_done:; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { - /* "mtrand.pyx":995 + /* "mtrand.pyx":1000 * if size is None: * if dtype in (np.bool, np.int, np.long): * return dtype(ret) # <<<<<<<<<<<<<< @@ -16734,43 +16909,43 @@ } } if (!__pyx_t_7) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_ret); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 995, __pyx_L14_error) - __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_ret); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1000, __pyx_L14_error) + __Pyx_GOTREF(__pyx_t_5); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_v_ret}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 995, __pyx_L14_error) + __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1000, __pyx_L14_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_GOTREF(__pyx_t_3); + __Pyx_GOTREF(__pyx_t_5); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_v_ret}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 995, __pyx_L14_error) + __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1000, __pyx_L14_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_GOTREF(__pyx_t_3); + __Pyx_GOTREF(__pyx_t_5); } else #endif { - __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 995, __pyx_L14_error) + __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1000, __pyx_L14_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_INCREF(__pyx_v_ret); __Pyx_GIVEREF(__pyx_v_ret); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_v_ret); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 995, __pyx_L14_error) - __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_4, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1000, __pyx_L14_error) + __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; goto __pyx_L18_try_return; - /* "mtrand.pyx":994 + /* "mtrand.pyx":999 * * if size is None: * if dtype in (np.bool, np.int, np.long): # <<<<<<<<<<<<<< @@ -16779,7 +16954,7 @@ */ } - /* "mtrand.pyx":993 + /* "mtrand.pyx":998 * ret = randfunc(ilow, ihigh - 1, size, self.state_address) * * if size is None: # <<<<<<<<<<<<<< @@ -16788,7 +16963,7 @@ */ } - /* "mtrand.pyx":997 + /* "mtrand.pyx":1002 * return dtype(ret) * * return ret # <<<<<<<<<<<<<< @@ -16800,7 +16975,7 @@ __pyx_r = __pyx_v_ret; goto __pyx_L18_try_return; - /* "mtrand.pyx":990 + /* "mtrand.pyx":995 * raise ValueError("low >= high") * * with self.lock: # <<<<<<<<<<<<<< @@ -16809,58 +16984,54 @@ */ } __pyx_L14_error:; - __Pyx_PyThreadState_assign - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; /*except:*/ { __Pyx_AddTraceback("mtrand.RandomState.randint", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_6, &__pyx_t_4) < 0) __PYX_ERR(0, 990, __pyx_L16_except_error) - __Pyx_GOTREF(__pyx_t_3); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_4) < 0) __PYX_ERR(0, 995, __pyx_L16_except_error) + __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_4); - __pyx_t_7 = PyTuple_Pack(3, __pyx_t_3, __pyx_t_6, __pyx_t_4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 990, __pyx_L16_except_error) + __pyx_t_7 = PyTuple_Pack(3, __pyx_t_5, __pyx_t_6, __pyx_t_4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 995, __pyx_L16_except_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_7, NULL); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 990, __pyx_L16_except_error) + if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 995, __pyx_L16_except_error) __Pyx_GOTREF(__pyx_t_14); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_14); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - if (__pyx_t_1 < 0) __PYX_ERR(0, 990, __pyx_L16_except_error) + if (__pyx_t_1 < 0) __PYX_ERR(0, 995, __pyx_L16_except_error) __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { - __Pyx_GIVEREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_ErrRestoreWithState(__pyx_t_3, __pyx_t_6, __pyx_t_4); - __pyx_t_3 = 0; __pyx_t_6 = 0; __pyx_t_4 = 0; - __PYX_ERR(0, 990, __pyx_L16_except_error) + __Pyx_ErrRestoreWithState(__pyx_t_5, __pyx_t_6, __pyx_t_4); + __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_4 = 0; + __PYX_ERR(0, 995, __pyx_L16_except_error) } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L15_exception_handled; } __pyx_L16_except_error:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); goto __pyx_L1_error; __pyx_L18_try_return:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); goto __pyx_L11_return; __pyx_L15_exception_handled:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); @@ -16870,9 +17041,9 @@ /*finally:*/ { /*normal exit:*/{ if (__pyx_t_9) { - __pyx_t_12 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_tuple__50, NULL); + __pyx_t_12 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_tuple__52, NULL); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 990, __pyx_L1_error) + if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } @@ -16882,9 +17053,9 @@ __pyx_t_12 = __pyx_r; __pyx_r = 0; if (__pyx_t_9) { - __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_tuple__51, NULL); + __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_tuple__53, NULL); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 990, __pyx_L1_error) + if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } @@ -16901,7 +17072,7 @@ __pyx_L28:; } - /* "mtrand.pyx":905 + /* "mtrand.pyx":910 * return disc0_array(self.internal_state, rk_long, size, self.lock) * * def randint(self, low, high=None, size=None, dtype=int): # <<<<<<<<<<<<<< @@ -16935,7 +17106,7 @@ return __pyx_r; } -/* "mtrand.pyx":999 +/* "mtrand.pyx":1004 * return ret * * def bytes(self, npy_intp length): # <<<<<<<<<<<<<< @@ -16952,7 +17123,7 @@ __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bytes (wrapper)", 0); assert(__pyx_arg_length); { - __pyx_v_length = __Pyx_PyInt_As_npy_intp(__pyx_arg_length); if (unlikely((__pyx_v_length == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 999, __pyx_L3_error) + __pyx_v_length = __Pyx_PyInt_As_npy_intp(__pyx_arg_length); if (unlikely((__pyx_v_length == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 1004, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; @@ -16981,19 +17152,19 @@ PyObject *__pyx_t_7 = NULL; __Pyx_RefNannySetupContext("bytes", 0); - /* "mtrand.pyx":1022 + /* "mtrand.pyx":1027 * """ * cdef void *bytes * bytestring = empty_py_bytes(length, &bytes) # <<<<<<<<<<<<<< * with self.lock, nogil: * rk_fill(bytes, length, self.internal_state) */ - __pyx_t_1 = empty_py_bytes(__pyx_v_length, (&__pyx_v_bytes)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1022, __pyx_L1_error) + __pyx_t_1 = empty_py_bytes(__pyx_v_length, (&__pyx_v_bytes)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1027, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytestring = __pyx_t_1; __pyx_t_1 = 0; - /* "mtrand.pyx":1023 + /* "mtrand.pyx":1028 * cdef void *bytes * bytestring = empty_py_bytes(length, &bytes) * with self.lock, nogil: # <<<<<<<<<<<<<< @@ -17001,9 +17172,9 @@ * return bytestring */ /*with:*/ { - __pyx_t_2 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1023, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1023, __pyx_L3_error) + __pyx_t_3 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1028, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { @@ -17016,17 +17187,17 @@ } } if (__pyx_t_4) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1023, __pyx_L3_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1028, __pyx_L3_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else { - __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1023, __pyx_L3_error) + __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1028, __pyx_L3_error) } __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*try:*/ { { - if (__pyx_t_5||__pyx_t_6||__pyx_t_7); else {/*mark used*/} + (void)__pyx_t_5; (void)__pyx_t_6; (void)__pyx_t_7; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -17036,7 +17207,7 @@ #endif /*try:*/ { - /* "mtrand.pyx":1024 + /* "mtrand.pyx":1029 * bytestring = empty_py_bytes(length, &bytes) * with self.lock, nogil: * rk_fill(bytes, length, self.internal_state) # <<<<<<<<<<<<<< @@ -17046,7 +17217,7 @@ rk_fill(__pyx_v_bytes, __pyx_v_length, __pyx_v_self->internal_state); } - /* "mtrand.pyx":1023 + /* "mtrand.pyx":1028 * cdef void *bytes * bytestring = empty_py_bytes(length, &bytes) * with self.lock, nogil: # <<<<<<<<<<<<<< @@ -17070,9 +17241,9 @@ /*finally:*/ { /*normal exit:*/{ if (__pyx_t_2) { - __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_tuple__52, NULL); + __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_tuple__54, NULL); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1023, __pyx_L1_error) + if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } @@ -17087,7 +17258,7 @@ __pyx_L16:; } - /* "mtrand.pyx":1025 + /* "mtrand.pyx":1030 * with self.lock, nogil: * rk_fill(bytes, length, self.internal_state) * return bytestring # <<<<<<<<<<<<<< @@ -17099,7 +17270,7 @@ __pyx_r = __pyx_v_bytestring; goto __pyx_L0; - /* "mtrand.pyx":999 + /* "mtrand.pyx":1004 * return ret * * def bytes(self, npy_intp length): # <<<<<<<<<<<<<< @@ -17121,7 +17292,7 @@ return __pyx_r; } -/* "mtrand.pyx":1028 +/* "mtrand.pyx":1033 * * * def choice(self, a, size=None, replace=True, p=None): # <<<<<<<<<<<<<< @@ -17164,29 +17335,29 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_a)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_a)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[1] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_replace); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_replace); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_p); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_p); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "choice") < 0)) __PYX_ERR(0, 1028, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "choice") < 0)) __PYX_ERR(0, 1033, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -17208,7 +17379,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("choice", 0, 1, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1028, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("choice", 0, 1, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1033, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.choice", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -17223,7 +17394,7 @@ static PyObject *__pyx_pf_6mtrand_11RandomState_24choice(struct __pyx_obj_6mtrand_RandomState *__pyx_v_self, PyObject *__pyx_v_a, PyObject *__pyx_v_size, PyObject *__pyx_v_replace, PyObject *__pyx_v_p) { PyObject *__pyx_v_pop_size = NULL; - Py_ssize_t __pyx_v_d; + PyObject *__pyx_v_d = NULL; PyObject *__pyx_v_atol = NULL; double *__pyx_v_pix; PyObject *__pyx_v_shape = NULL; @@ -17254,34 +17425,35 @@ Py_ssize_t __pyx_t_12; PyObject *__pyx_t_13 = NULL; PyObject *__pyx_t_14 = NULL; - PyObject *(*__pyx_t_15)(PyObject *); - int __pyx_t_16; + npy_intp __pyx_t_15; + PyObject *(*__pyx_t_16)(PyObject *); + int __pyx_t_17; __Pyx_RefNannySetupContext("choice", 0); __Pyx_INCREF(__pyx_v_a); __Pyx_INCREF(__pyx_v_size); __Pyx_INCREF(__pyx_v_p); - /* "mtrand.pyx":1107 + /* "mtrand.pyx":1112 * * # Format and Verify input * a = np.array(a, copy=False) # <<<<<<<<<<<<<< * if a.ndim == 0: * try: */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1107, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1107, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1107, __pyx_L1_error) + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_a); __Pyx_GIVEREF(__pyx_v_a); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_a); - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1107, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_copy, Py_False) < 0) __PYX_ERR(0, 1107, __pyx_L1_error) - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1107, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_copy, Py_False) < 0) __PYX_ERR(0, 1112, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -17289,23 +17461,23 @@ __Pyx_DECREF_SET(__pyx_v_a, __pyx_t_4); __pyx_t_4 = 0; - /* "mtrand.pyx":1108 + /* "mtrand.pyx":1113 * # Format and Verify input * a = np.array(a, copy=False) * if a.ndim == 0: # <<<<<<<<<<<<<< * try: * # __index__ must return an integer by python rules. */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s_ndim); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1108, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s_ndim); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyInt_EqObjC(__pyx_t_4, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1108, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyInt_EqObjC(__pyx_t_4, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1113, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 1108, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 1113, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_5) { - /* "mtrand.pyx":1109 + /* "mtrand.pyx":1114 * a = np.array(a, copy=False) * if a.ndim == 0: * try: # <<<<<<<<<<<<<< @@ -17321,19 +17493,19 @@ __Pyx_XGOTREF(__pyx_t_8); /*try:*/ { - /* "mtrand.pyx":1111 + /* "mtrand.pyx":1116 * try: * # __index__ must return an integer by python rules. * pop_size = operator.index(a.item()) # <<<<<<<<<<<<<< * except TypeError: * raise ValueError("a must be 1-dimensional or an integer") */ - __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_operator); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1111, __pyx_L4_error) + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_operator); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1116, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_index); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1111, __pyx_L4_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_index); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1116, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1111, __pyx_L4_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1116, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_9 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { @@ -17346,10 +17518,10 @@ } } if (__pyx_t_9) { - __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_9); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1111, __pyx_L4_error) + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_9); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1116, __pyx_L4_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { - __pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1111, __pyx_L4_error) + __pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1116, __pyx_L4_error) } __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -17364,14 +17536,14 @@ } } if (!__pyx_t_2) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1111, __pyx_L4_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1116, __pyx_L4_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1111, __pyx_L4_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1116, __pyx_L4_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; @@ -17380,20 +17552,20 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1111, __pyx_L4_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1116, __pyx_L4_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1111, __pyx_L4_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1116, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_4); __pyx_t_4 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1111, __pyx_L4_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1116, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -17402,7 +17574,7 @@ __pyx_v_pop_size = __pyx_t_3; __pyx_t_3 = 0; - /* "mtrand.pyx":1109 + /* "mtrand.pyx":1114 * a = np.array(a, copy=False) * if a.ndim == 0: * try: # <<<<<<<<<<<<<< @@ -17415,14 +17587,13 @@ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; - __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - /* "mtrand.pyx":1112 + /* "mtrand.pyx":1117 * # __index__ must return an integer by python rules. * pop_size = operator.index(a.item()) * except TypeError: # <<<<<<<<<<<<<< @@ -17432,35 +17603,34 @@ __pyx_t_10 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_10) { __Pyx_AddTraceback("mtrand.RandomState.choice", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_1, &__pyx_t_9) < 0) __PYX_ERR(0, 1112, __pyx_L6_except_error) + if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_1, &__pyx_t_9) < 0) __PYX_ERR(0, 1117, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_9); - /* "mtrand.pyx":1113 + /* "mtrand.pyx":1118 * pop_size = operator.index(a.item()) * except TypeError: * raise ValueError("a must be 1-dimensional or an integer") # <<<<<<<<<<<<<< * if pop_size <= 0: * raise ValueError("a must be greater than 0") */ - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__53, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1113, __pyx_L6_except_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__55, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1118, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(0, 1113, __pyx_L6_except_error) + __PYX_ERR(0, 1118, __pyx_L6_except_error) } goto __pyx_L6_except_error; __pyx_L6_except_error:; - /* "mtrand.pyx":1109 + /* "mtrand.pyx":1114 * a = np.array(a, copy=False) * if a.ndim == 0: * try: # <<<<<<<<<<<<<< * # __index__ must return an integer by python rules. * pop_size = operator.index(a.item()) */ - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); @@ -17469,32 +17639,32 @@ __pyx_L9_try_end:; } - /* "mtrand.pyx":1114 + /* "mtrand.pyx":1119 * except TypeError: * raise ValueError("a must be 1-dimensional or an integer") * if pop_size <= 0: # <<<<<<<<<<<<<< * raise ValueError("a must be greater than 0") * elif a.ndim != 1: */ - __pyx_t_9 = PyObject_RichCompare(__pyx_v_pop_size, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1114, __pyx_L1_error) - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 1114, __pyx_L1_error) + __pyx_t_9 = PyObject_RichCompare(__pyx_v_pop_size, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1119, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 1119, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":1115 + /* "mtrand.pyx":1120 * raise ValueError("a must be 1-dimensional or an integer") * if pop_size <= 0: * raise ValueError("a must be greater than 0") # <<<<<<<<<<<<<< * elif a.ndim != 1: * raise ValueError("a must be 1-dimensional") */ - __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__54, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1115, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__56, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1120, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __PYX_ERR(0, 1115, __pyx_L1_error) + __PYX_ERR(0, 1120, __pyx_L1_error) - /* "mtrand.pyx":1114 + /* "mtrand.pyx":1119 * except TypeError: * raise ValueError("a must be 1-dimensional or an integer") * if pop_size <= 0: # <<<<<<<<<<<<<< @@ -17503,7 +17673,7 @@ */ } - /* "mtrand.pyx":1108 + /* "mtrand.pyx":1113 * # Format and Verify input * a = np.array(a, copy=False) * if a.ndim == 0: # <<<<<<<<<<<<<< @@ -17513,35 +17683,35 @@ goto __pyx_L3; } - /* "mtrand.pyx":1116 + /* "mtrand.pyx":1121 * if pop_size <= 0: * raise ValueError("a must be greater than 0") * elif a.ndim != 1: # <<<<<<<<<<<<<< * raise ValueError("a must be 1-dimensional") * else: */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s_ndim); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1116, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s_ndim); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1121, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_9, __pyx_int_1, Py_NE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1116, __pyx_L1_error) + __pyx_t_1 = PyObject_RichCompare(__pyx_t_9, __pyx_int_1, Py_NE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1121, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 1116, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 1121, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":1117 + /* "mtrand.pyx":1122 * raise ValueError("a must be greater than 0") * elif a.ndim != 1: * raise ValueError("a must be 1-dimensional") # <<<<<<<<<<<<<< * else: * pop_size = a.shape[0] */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__55, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1117, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__57, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1122, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 1117, __pyx_L1_error) + __PYX_ERR(0, 1122, __pyx_L1_error) - /* "mtrand.pyx":1116 + /* "mtrand.pyx":1121 * if pop_size <= 0: * raise ValueError("a must be greater than 0") * elif a.ndim != 1: # <<<<<<<<<<<<<< @@ -17550,7 +17720,7 @@ */ } - /* "mtrand.pyx":1119 + /* "mtrand.pyx":1124 * raise ValueError("a must be 1-dimensional") * else: * pop_size = a.shape[0] # <<<<<<<<<<<<<< @@ -17558,15 +17728,15 @@ * raise ValueError("a must be non-empty") */ /*else*/ { - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1119, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1124, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_9 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1119, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1124, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_pop_size = __pyx_t_9; __pyx_t_9 = 0; - /* "mtrand.pyx":1120 + /* "mtrand.pyx":1125 * else: * pop_size = a.shape[0] * if pop_size is 0: # <<<<<<<<<<<<<< @@ -17575,22 +17745,22 @@ */ __pyx_t_5 = (__pyx_v_pop_size == __pyx_int_0); __pyx_t_11 = (__pyx_t_5 != 0); - if (__pyx_t_11) { + if (unlikely(__pyx_t_11)) { - /* "mtrand.pyx":1121 + /* "mtrand.pyx":1126 * pop_size = a.shape[0] * if pop_size is 0: * raise ValueError("a must be non-empty") # <<<<<<<<<<<<<< * * if p is not None: */ - __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__56, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1121, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__58, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1126, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __PYX_ERR(0, 1121, __pyx_L1_error) + __PYX_ERR(0, 1126, __pyx_L1_error) - /* "mtrand.pyx":1120 + /* "mtrand.pyx":1125 * else: * pop_size = a.shape[0] * if pop_size is 0: # <<<<<<<<<<<<<< @@ -17601,7 +17771,7 @@ } __pyx_L3:; - /* "mtrand.pyx":1123 + /* "mtrand.pyx":1128 * raise ValueError("a must be non-empty") * * if p is not None: # <<<<<<<<<<<<<< @@ -17612,36 +17782,39 @@ __pyx_t_5 = (__pyx_t_11 != 0); if (__pyx_t_5) { - /* "mtrand.pyx":1124 + /* "mtrand.pyx":1129 * * if p is not None: * d = len(p) # <<<<<<<<<<<<<< * * atol = np.sqrt(np.finfo(np.float64).eps) */ - __pyx_t_12 = PyObject_Length(__pyx_v_p); if (unlikely(__pyx_t_12 == -1)) __PYX_ERR(0, 1124, __pyx_L1_error) - __pyx_v_d = __pyx_t_12; + __pyx_t_12 = PyObject_Length(__pyx_v_p); if (unlikely(__pyx_t_12 == ((Py_ssize_t)-1))) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_9 = PyInt_FromSsize_t(__pyx_t_12); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1129, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_v_d = __pyx_t_9; + __pyx_t_9 = 0; - /* "mtrand.pyx":1126 + /* "mtrand.pyx":1131 * d = len(p) * * atol = np.sqrt(np.finfo(np.float64).eps) # <<<<<<<<<<<<<< * if isinstance(p, np.ndarray): * if np.issubdtype(p.dtype, np.floating): */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_sqrt); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_sqrt); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_finfo); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_finfo); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float64); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float64); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; @@ -17655,14 +17828,14 @@ } } if (!__pyx_t_4) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_13); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_13); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_t_13}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; @@ -17671,26 +17844,26 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_t_13}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; } else #endif { - __pyx_t_14 = PyTuple_New(1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_14 = PyTuple_New(1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_4); __pyx_t_4 = NULL; __Pyx_GIVEREF(__pyx_t_13); PyTuple_SET_ITEM(__pyx_t_14, 0+1, __pyx_t_13); __pyx_t_13 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; } } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_eps); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_eps); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; @@ -17704,14 +17877,14 @@ } } if (!__pyx_t_1) { - __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_9); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_t_2}; - __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -17720,20 +17893,20 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_t_2}; - __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_14 = PyTuple_New(1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_14 = PyTuple_New(1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_1); __pyx_t_1 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_14, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_14, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1126, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_14, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; } @@ -17742,40 +17915,40 @@ __pyx_v_atol = __pyx_t_9; __pyx_t_9 = 0; - /* "mtrand.pyx":1127 + /* "mtrand.pyx":1132 * * atol = np.sqrt(np.finfo(np.float64).eps) * if isinstance(p, np.ndarray): # <<<<<<<<<<<<<< * if np.issubdtype(p.dtype, np.floating): * atol = max(atol, np.sqrt(np.finfo(p.dtype).eps)) */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1127, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1127, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_5 = PyObject_IsInstance(__pyx_v_p, __pyx_t_3); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(0, 1127, __pyx_L1_error) + __pyx_t_5 = PyObject_IsInstance(__pyx_v_p, __pyx_t_3); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 1132, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_11 = (__pyx_t_5 != 0); if (__pyx_t_11) { - /* "mtrand.pyx":1128 + /* "mtrand.pyx":1133 * atol = np.sqrt(np.finfo(np.float64).eps) * if isinstance(p, np.ndarray): * if np.issubdtype(p.dtype, np.floating): # <<<<<<<<<<<<<< * atol = max(atol, np.sqrt(np.finfo(p.dtype).eps)) * */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1128, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_issubdtype); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1128, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_issubdtype); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s_dtype); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1128, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s_dtype); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1128, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_floating); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1128, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_floating); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -17793,7 +17966,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_14)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_t_9, __pyx_t_1}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_14, __pyx_temp+1-__pyx_t_10, 2+__pyx_t_10); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1128, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_14, __pyx_temp+1-__pyx_t_10, 2+__pyx_t_10); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1133, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -17803,7 +17976,7 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_14)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_t_9, __pyx_t_1}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_14, __pyx_temp+1-__pyx_t_10, 2+__pyx_t_10); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1128, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_14, __pyx_temp+1-__pyx_t_10, 2+__pyx_t_10); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1133, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -17811,7 +17984,7 @@ } else #endif { - __pyx_t_13 = PyTuple_New(2+__pyx_t_10); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1128, __pyx_L1_error) + __pyx_t_13 = PyTuple_New(2+__pyx_t_10); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_13, 0, __pyx_t_2); __pyx_t_2 = NULL; @@ -17822,33 +17995,33 @@ PyTuple_SET_ITEM(__pyx_t_13, 1+__pyx_t_10, __pyx_t_1); __pyx_t_9 = 0; __pyx_t_1 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_14, __pyx_t_13, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1128, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_14, __pyx_t_13, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; } __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1128, __pyx_L1_error) + __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1133, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_11) { - /* "mtrand.pyx":1129 + /* "mtrand.pyx":1134 * if isinstance(p, np.ndarray): * if np.issubdtype(p.dtype, np.floating): * atol = max(atol, np.sqrt(np.finfo(p.dtype).eps)) # <<<<<<<<<<<<<< * * p = PyArray_ContiguousFromObject(p, NPY_DOUBLE, 1, 1) */ - __pyx_t_14 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_14 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); - __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_14, __pyx_n_s_sqrt); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_14, __pyx_n_s_sqrt); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_finfo); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_finfo); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s_dtype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { @@ -17861,14 +18034,14 @@ } } if (!__pyx_t_2) { - __pyx_t_14 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_14); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_1}; - __pyx_t_14 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -17877,26 +18050,26 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_1}; - __pyx_t_14 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_4, NULL); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_4, NULL); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_14, __pyx_n_s_eps); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_14, __pyx_n_s_eps); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __pyx_t_14 = NULL; @@ -17910,14 +18083,14 @@ } } if (!__pyx_t_14) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_13)) { PyObject *__pyx_temp[2] = {__pyx_t_14, __pyx_t_9}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -17926,20 +18099,20 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_13)) { PyObject *__pyx_temp[2] = {__pyx_t_14, __pyx_t_9}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else #endif { - __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_14); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_14); __pyx_t_14 = NULL; __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_9); __pyx_t_9 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_13, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_13, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } @@ -17947,8 +18120,8 @@ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __Pyx_INCREF(__pyx_v_atol); __pyx_t_13 = __pyx_v_atol; - __pyx_t_9 = PyObject_RichCompare(__pyx_t_3, __pyx_t_13, Py_GT); __Pyx_XGOTREF(__pyx_t_9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1129, __pyx_L1_error) - __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1129, __pyx_L1_error) + __pyx_t_9 = PyObject_RichCompare(__pyx_t_3, __pyx_t_13, Py_GT); __Pyx_XGOTREF(__pyx_t_9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1134, __pyx_L1_error) + __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1134, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__pyx_t_11) { __Pyx_INCREF(__pyx_t_3); @@ -17965,7 +18138,7 @@ __Pyx_DECREF_SET(__pyx_v_atol, __pyx_t_3); __pyx_t_3 = 0; - /* "mtrand.pyx":1128 + /* "mtrand.pyx":1133 * atol = np.sqrt(np.finfo(np.float64).eps) * if isinstance(p, np.ndarray): * if np.issubdtype(p.dtype, np.floating): # <<<<<<<<<<<<<< @@ -17974,7 +18147,7 @@ */ } - /* "mtrand.pyx":1127 + /* "mtrand.pyx":1132 * * atol = np.sqrt(np.finfo(np.float64).eps) * if isinstance(p, np.ndarray): # <<<<<<<<<<<<<< @@ -17983,14 +18156,14 @@ */ } - /* "mtrand.pyx":1131 + /* "mtrand.pyx":1136 * atol = max(atol, np.sqrt(np.finfo(p.dtype).eps)) * * p = PyArray_ContiguousFromObject(p, NPY_DOUBLE, 1, 1) # <<<<<<<<<<<<<< * pix = PyArray_DATA(p) * */ - __pyx_t_3 = PyArray_ContiguousFromObject(__pyx_v_p, NPY_DOUBLE, 1, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1131, __pyx_L1_error) + __pyx_t_3 = PyArray_ContiguousFromObject(__pyx_v_p, NPY_DOUBLE, 1, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __pyx_t_3; __Pyx_INCREF(__pyx_t_4); @@ -17998,45 +18171,45 @@ __Pyx_DECREF_SET(__pyx_v_p, __pyx_t_4); __pyx_t_4 = 0; - /* "mtrand.pyx":1132 + /* "mtrand.pyx":1137 * * p = PyArray_ContiguousFromObject(p, NPY_DOUBLE, 1, 1) * pix = PyArray_DATA(p) # <<<<<<<<<<<<<< * * if p.ndim != 1: */ - if (!(likely(((__pyx_v_p) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_p, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 1132, __pyx_L1_error) + if (!(likely(((__pyx_v_p) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_p, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 1137, __pyx_L1_error) __pyx_v_pix = ((double *)PyArray_DATA(((PyArrayObject *)__pyx_v_p))); - /* "mtrand.pyx":1134 + /* "mtrand.pyx":1139 * pix = PyArray_DATA(p) * * if p.ndim != 1: # <<<<<<<<<<<<<< * raise ValueError("p must be 1-dimensional") * if p.size != pop_size: */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s_ndim); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1134, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s_ndim); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_int_1, Py_NE); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1134, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_int_1, Py_NE); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1139, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1134, __pyx_L1_error) + __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1139, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_11) { + if (unlikely(__pyx_t_11)) { - /* "mtrand.pyx":1135 + /* "mtrand.pyx":1140 * * if p.ndim != 1: * raise ValueError("p must be 1-dimensional") # <<<<<<<<<<<<<< * if p.size != pop_size: * raise ValueError("a and p must have same size") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__57, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1135, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__59, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1140, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 1135, __pyx_L1_error) + __PYX_ERR(0, 1140, __pyx_L1_error) - /* "mtrand.pyx":1134 + /* "mtrand.pyx":1139 * pix = PyArray_DATA(p) * * if p.ndim != 1: # <<<<<<<<<<<<<< @@ -18045,35 +18218,35 @@ */ } - /* "mtrand.pyx":1136 + /* "mtrand.pyx":1141 * if p.ndim != 1: * raise ValueError("p must be 1-dimensional") * if p.size != pop_size: # <<<<<<<<<<<<<< * raise ValueError("a and p must have same size") * if np.logical_or.reduce(p < 0): */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1136, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1141, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_pop_size, Py_NE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1136, __pyx_L1_error) + __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_pop_size, Py_NE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1141, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1136, __pyx_L1_error) + __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1141, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (__pyx_t_11) { + if (unlikely(__pyx_t_11)) { - /* "mtrand.pyx":1137 + /* "mtrand.pyx":1142 * raise ValueError("p must be 1-dimensional") * if p.size != pop_size: * raise ValueError("a and p must have same size") # <<<<<<<<<<<<<< * if np.logical_or.reduce(p < 0): * raise ValueError("probabilities are not non-negative") */ - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__58, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1137, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__60, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(0, 1137, __pyx_L1_error) + __PYX_ERR(0, 1142, __pyx_L1_error) - /* "mtrand.pyx":1136 + /* "mtrand.pyx":1141 * if p.ndim != 1: * raise ValueError("p must be 1-dimensional") * if p.size != pop_size: # <<<<<<<<<<<<<< @@ -18082,22 +18255,22 @@ */ } - /* "mtrand.pyx":1138 + /* "mtrand.pyx":1143 * if p.size != pop_size: * raise ValueError("a and p must have same size") * if np.logical_or.reduce(p < 0): # <<<<<<<<<<<<<< * raise ValueError("probabilities are not non-negative") * if abs(kahan_sum(pix, d) - 1.) > atol: */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1138, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_logical_or); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1138, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_logical_or); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_13, __pyx_n_s_reduce); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1138, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_13, __pyx_n_s_reduce); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - __pyx_t_13 = PyObject_RichCompare(__pyx_v_p, __pyx_int_0, Py_LT); __Pyx_XGOTREF(__pyx_t_13); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1138, __pyx_L1_error) + __pyx_t_13 = PyObject_RichCompare(__pyx_v_p, __pyx_int_0, Py_LT); __Pyx_XGOTREF(__pyx_t_13); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1143, __pyx_L1_error) __pyx_t_9 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_3); @@ -18109,14 +18282,14 @@ } } if (!__pyx_t_9) { - __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_13); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1138, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_13); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1143, __pyx_L1_error) __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_9, __pyx_t_13}; - __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1138, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1143, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; @@ -18125,43 +18298,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_9, __pyx_t_13}; - __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1138, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1143, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; } else #endif { - __pyx_t_14 = PyTuple_New(1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1138, __pyx_L1_error) + __pyx_t_14 = PyTuple_New(1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_9); __pyx_t_9 = NULL; __Pyx_GIVEREF(__pyx_t_13); PyTuple_SET_ITEM(__pyx_t_14, 0+1, __pyx_t_13); __pyx_t_13 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_14, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1138, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_14, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; } } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1138, __pyx_L1_error) + __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1143, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (__pyx_t_11) { + if (unlikely(__pyx_t_11)) { - /* "mtrand.pyx":1139 + /* "mtrand.pyx":1144 * raise ValueError("a and p must have same size") * if np.logical_or.reduce(p < 0): * raise ValueError("probabilities are not non-negative") # <<<<<<<<<<<<<< * if abs(kahan_sum(pix, d) - 1.) > atol: * raise ValueError("probabilities do not sum to 1") */ - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__59, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1139, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__61, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1144, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(0, 1139, __pyx_L1_error) + __PYX_ERR(0, 1144, __pyx_L1_error) - /* "mtrand.pyx":1138 + /* "mtrand.pyx":1143 * if p.size != pop_size: * raise ValueError("a and p must have same size") * if np.logical_or.reduce(p < 0): # <<<<<<<<<<<<<< @@ -18170,35 +18343,36 @@ */ } - /* "mtrand.pyx":1140 + /* "mtrand.pyx":1145 * if np.logical_or.reduce(p < 0): * raise ValueError("probabilities are not non-negative") * if abs(kahan_sum(pix, d) - 1.) > atol: # <<<<<<<<<<<<<< * raise ValueError("probabilities do not sum to 1") * */ - __pyx_t_4 = PyFloat_FromDouble(fabs((__pyx_f_6mtrand_kahan_sum(__pyx_v_pix, __pyx_v_d) - 1.))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1140, __pyx_L1_error) + __pyx_t_15 = __Pyx_PyInt_As_npy_intp(__pyx_v_d); if (unlikely((__pyx_t_15 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 1145, __pyx_L1_error) + __pyx_t_4 = PyFloat_FromDouble(fabs((__pyx_f_6mtrand_kahan_sum(__pyx_v_pix, __pyx_t_15) - 1.))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1145, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_v_atol, Py_GT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1140, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_v_atol, Py_GT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1145, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1140, __pyx_L1_error) + __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1145, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_11) { + if (unlikely(__pyx_t_11)) { - /* "mtrand.pyx":1141 + /* "mtrand.pyx":1146 * raise ValueError("probabilities are not non-negative") * if abs(kahan_sum(pix, d) - 1.) > atol: * raise ValueError("probabilities do not sum to 1") # <<<<<<<<<<<<<< * * shape = size */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__60, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1141, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__62, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 1141, __pyx_L1_error) + __PYX_ERR(0, 1146, __pyx_L1_error) - /* "mtrand.pyx":1140 + /* "mtrand.pyx":1145 * if np.logical_or.reduce(p < 0): * raise ValueError("probabilities are not non-negative") * if abs(kahan_sum(pix, d) - 1.) > atol: # <<<<<<<<<<<<<< @@ -18207,7 +18381,7 @@ */ } - /* "mtrand.pyx":1123 + /* "mtrand.pyx":1128 * raise ValueError("a must be non-empty") * * if p is not None: # <<<<<<<<<<<<<< @@ -18216,7 +18390,7 @@ */ } - /* "mtrand.pyx":1143 + /* "mtrand.pyx":1148 * raise ValueError("probabilities do not sum to 1") * * shape = size # <<<<<<<<<<<<<< @@ -18226,7 +18400,7 @@ __Pyx_INCREF(__pyx_v_size); __pyx_v_shape = __pyx_v_size; - /* "mtrand.pyx":1144 + /* "mtrand.pyx":1149 * * shape = size * if shape is not None: # <<<<<<<<<<<<<< @@ -18237,33 +18411,33 @@ __pyx_t_5 = (__pyx_t_11 != 0); if (__pyx_t_5) { - /* "mtrand.pyx":1145 + /* "mtrand.pyx":1150 * shape = size * if shape is not None: * size = np.prod(shape, dtype=np.intp) # <<<<<<<<<<<<<< * else: * size = 1 */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1145, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_prod); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1145, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_prod); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1145, __pyx_L1_error) + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_shape); - __pyx_t_14 = PyDict_New(); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1145, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); - __pyx_t_13 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1145, __pyx_L1_error) + __pyx_t_13 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_13, __pyx_n_s_intp); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1145, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_13, __pyx_n_s_intp); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - if (PyDict_SetItem(__pyx_t_14, __pyx_n_s_dtype, __pyx_t_9) < 0) __PYX_ERR(0, 1145, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_14, __pyx_n_s_dtype, __pyx_t_9) < 0) __PYX_ERR(0, 1150, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_14); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1145, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_14); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -18271,7 +18445,7 @@ __Pyx_DECREF_SET(__pyx_v_size, __pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":1144 + /* "mtrand.pyx":1149 * * shape = size * if shape is not None: # <<<<<<<<<<<<<< @@ -18281,7 +18455,7 @@ goto __pyx_L21; } - /* "mtrand.pyx":1147 + /* "mtrand.pyx":1152 * size = np.prod(shape, dtype=np.intp) * else: * size = 1 # <<<<<<<<<<<<<< @@ -18294,17 +18468,17 @@ } __pyx_L21:; - /* "mtrand.pyx":1150 + /* "mtrand.pyx":1155 * * # Actual sampling * if replace: # <<<<<<<<<<<<<< * if p is not None: * cdf = p.cumsum() */ - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_v_replace); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 1150, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_v_replace); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 1155, __pyx_L1_error) if (__pyx_t_5) { - /* "mtrand.pyx":1151 + /* "mtrand.pyx":1156 * # Actual sampling * if replace: * if p is not None: # <<<<<<<<<<<<<< @@ -18315,14 +18489,14 @@ __pyx_t_11 = (__pyx_t_5 != 0); if (__pyx_t_11) { - /* "mtrand.pyx":1152 + /* "mtrand.pyx":1157 * if replace: * if p is not None: * cdf = p.cumsum() # <<<<<<<<<<<<<< * cdf /= cdf[-1] * uniform_samples = self.random_sample(shape) */ - __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s_cumsum); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1152, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s_cumsum); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1157, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_14))) { @@ -18335,39 +18509,39 @@ } } if (__pyx_t_3) { - __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_14, __pyx_t_3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1152, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_14, __pyx_t_3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1157, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { - __pyx_t_9 = __Pyx_PyObject_CallNoArg(__pyx_t_14); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1152, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_CallNoArg(__pyx_t_14); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1157, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __pyx_v_cdf = __pyx_t_9; __pyx_t_9 = 0; - /* "mtrand.pyx":1153 + /* "mtrand.pyx":1158 * if p is not None: * cdf = p.cumsum() * cdf /= cdf[-1] # <<<<<<<<<<<<<< * uniform_samples = self.random_sample(shape) * idx = cdf.searchsorted(uniform_samples, side='right') */ - __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_cdf, -1L, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1153, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_cdf, -1L, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_14 = __Pyx_PyNumber_InPlaceDivide(__pyx_v_cdf, __pyx_t_9); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1153, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyNumber_InPlaceDivide(__pyx_v_cdf, __pyx_t_9); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_cdf, __pyx_t_14); __pyx_t_14 = 0; - /* "mtrand.pyx":1154 + /* "mtrand.pyx":1159 * cdf = p.cumsum() * cdf /= cdf[-1] * uniform_samples = self.random_sample(shape) # <<<<<<<<<<<<<< * idx = cdf.searchsorted(uniform_samples, side='right') * idx = np.array(idx, copy=False) # searchsorted returns a scalar */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_random_sample); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1154, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_random_sample); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_9))) { @@ -18380,13 +18554,13 @@ } } if (!__pyx_t_3) { - __pyx_t_14 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_v_shape); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1154, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_v_shape); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_v_shape}; - __pyx_t_14 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1154, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1159, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_14); } else @@ -18394,19 +18568,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_v_shape}; - __pyx_t_14 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1154, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1159, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_14); } else #endif { - __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1154, __pyx_L1_error) + __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_v_shape); - __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_4, NULL); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1154, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_4, NULL); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } @@ -18415,24 +18589,24 @@ __pyx_v_uniform_samples = __pyx_t_14; __pyx_t_14 = 0; - /* "mtrand.pyx":1155 + /* "mtrand.pyx":1160 * cdf /= cdf[-1] * uniform_samples = self.random_sample(shape) * idx = cdf.searchsorted(uniform_samples, side='right') # <<<<<<<<<<<<<< * idx = np.array(idx, copy=False) # searchsorted returns a scalar * else: */ - __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_v_cdf, __pyx_n_s_searchsorted); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1155, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_v_cdf, __pyx_n_s_searchsorted); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); - __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1155, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_v_uniform_samples); __Pyx_GIVEREF(__pyx_v_uniform_samples); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_v_uniform_samples); - __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1155, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_side, __pyx_n_s_right) < 0) __PYX_ERR(0, 1155, __pyx_L1_error) - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_14, __pyx_t_9, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1155, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_side, __pyx_n_s_right) < 0) __PYX_ERR(0, 1160, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_14, __pyx_t_9, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -18440,27 +18614,27 @@ __pyx_v_idx = __pyx_t_3; __pyx_t_3 = 0; - /* "mtrand.pyx":1156 + /* "mtrand.pyx":1161 * uniform_samples = self.random_sample(shape) * idx = cdf.searchsorted(uniform_samples, side='right') * idx = np.array(idx, copy=False) # searchsorted returns a scalar # <<<<<<<<<<<<<< * else: * idx = self.randint(0, pop_size, size=shape) */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1156, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_array); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1156, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_array); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1156, __pyx_L1_error) + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_idx); __Pyx_GIVEREF(__pyx_v_idx); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_idx); - __pyx_t_9 = PyDict_New(); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1156, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - if (PyDict_SetItem(__pyx_t_9, __pyx_n_s_copy, Py_False) < 0) __PYX_ERR(0, 1156, __pyx_L1_error) - __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_9); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1156, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_n_s_copy, Py_False) < 0) __PYX_ERR(0, 1161, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_9); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -18468,7 +18642,7 @@ __Pyx_DECREF_SET(__pyx_v_idx, __pyx_t_14); __pyx_t_14 = 0; - /* "mtrand.pyx":1151 + /* "mtrand.pyx":1156 * # Actual sampling * if replace: * if p is not None: # <<<<<<<<<<<<<< @@ -18478,7 +18652,7 @@ goto __pyx_L23; } - /* "mtrand.pyx":1158 + /* "mtrand.pyx":1163 * idx = np.array(idx, copy=False) # searchsorted returns a scalar * else: * idx = self.randint(0, pop_size, size=shape) # <<<<<<<<<<<<<< @@ -18486,9 +18660,9 @@ * if size > pop_size: */ /*else*/ { - __pyx_t_14 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_randint); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1158, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_randint); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1163, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); - __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1158, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1163, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); @@ -18496,10 +18670,10 @@ __Pyx_INCREF(__pyx_v_pop_size); __Pyx_GIVEREF(__pyx_v_pop_size); PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_v_pop_size); - __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1158, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1163, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_size, __pyx_v_shape) < 0) __PYX_ERR(0, 1158, __pyx_L1_error) - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_14, __pyx_t_9, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1158, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_size, __pyx_v_shape) < 0) __PYX_ERR(0, 1163, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_14, __pyx_t_9, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1163, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -18509,7 +18683,7 @@ } __pyx_L23:; - /* "mtrand.pyx":1150 + /* "mtrand.pyx":1155 * * # Actual sampling * if replace: # <<<<<<<<<<<<<< @@ -18519,7 +18693,7 @@ goto __pyx_L22; } - /* "mtrand.pyx":1160 + /* "mtrand.pyx":1165 * idx = self.randint(0, pop_size, size=shape) * else: * if size > pop_size: # <<<<<<<<<<<<<< @@ -18527,25 +18701,25 @@ * "population when 'replace=False'") */ /*else*/ { - __pyx_t_4 = PyObject_RichCompare(__pyx_v_size, __pyx_v_pop_size, Py_GT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1160, __pyx_L1_error) - __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1160, __pyx_L1_error) + __pyx_t_4 = PyObject_RichCompare(__pyx_v_size, __pyx_v_pop_size, Py_GT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1165, __pyx_L1_error) + __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1165, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (__pyx_t_11) { + if (unlikely(__pyx_t_11)) { - /* "mtrand.pyx":1161 + /* "mtrand.pyx":1166 * else: * if size > pop_size: * raise ValueError("Cannot take a larger sample than " # <<<<<<<<<<<<<< * "population when 'replace=False'") * */ - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__61, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1161, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__63, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1166, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(0, 1161, __pyx_L1_error) + __PYX_ERR(0, 1166, __pyx_L1_error) - /* "mtrand.pyx":1160 + /* "mtrand.pyx":1165 * idx = self.randint(0, pop_size, size=shape) * else: * if size > pop_size: # <<<<<<<<<<<<<< @@ -18554,7 +18728,7 @@ */ } - /* "mtrand.pyx":1164 + /* "mtrand.pyx":1169 * "population when 'replace=False'") * * if p is not None: # <<<<<<<<<<<<<< @@ -18565,19 +18739,19 @@ __pyx_t_5 = (__pyx_t_11 != 0); if (__pyx_t_5) { - /* "mtrand.pyx":1165 + /* "mtrand.pyx":1170 * * if p is not None: * if np.count_nonzero(p > 0) < size: # <<<<<<<<<<<<<< * raise ValueError("Fewer non-zero entries in p than size") * n_uniq = 0 */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1165, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1170, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_count_nonzero); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1165, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_count_nonzero); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1170, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_RichCompare(__pyx_v_p, __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1165, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_v_p, __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1170, __pyx_L1_error) __pyx_t_14 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_14 = PyMethod_GET_SELF(__pyx_t_9); @@ -18589,14 +18763,14 @@ } } if (!__pyx_t_14) { - __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1165, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_14, __pyx_t_3}; - __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1165, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1170, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -18605,45 +18779,45 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_14, __pyx_t_3}; - __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1165, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1170, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_13 = PyTuple_New(1+1); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1165, __pyx_L1_error) + __pyx_t_13 = PyTuple_New(1+1); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1170, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_GIVEREF(__pyx_t_14); PyTuple_SET_ITEM(__pyx_t_13, 0, __pyx_t_14); __pyx_t_14 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_13, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_13, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1165, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_13, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1170, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; } } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = PyObject_RichCompare(__pyx_t_4, __pyx_v_size, Py_LT); __Pyx_XGOTREF(__pyx_t_9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1165, __pyx_L1_error) + __pyx_t_9 = PyObject_RichCompare(__pyx_t_4, __pyx_v_size, Py_LT); __Pyx_XGOTREF(__pyx_t_9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 1165, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 1170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":1166 + /* "mtrand.pyx":1171 * if p is not None: * if np.count_nonzero(p > 0) < size: * raise ValueError("Fewer non-zero entries in p than size") # <<<<<<<<<<<<<< * n_uniq = 0 * p = p.copy() */ - __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__62, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1166, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__64, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1171, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __PYX_ERR(0, 1166, __pyx_L1_error) + __PYX_ERR(0, 1171, __pyx_L1_error) - /* "mtrand.pyx":1165 + /* "mtrand.pyx":1170 * * if p is not None: * if np.count_nonzero(p > 0) < size: # <<<<<<<<<<<<<< @@ -18652,7 +18826,7 @@ */ } - /* "mtrand.pyx":1167 + /* "mtrand.pyx":1172 * if np.count_nonzero(p > 0) < size: * raise ValueError("Fewer non-zero entries in p than size") * n_uniq = 0 # <<<<<<<<<<<<<< @@ -18662,14 +18836,14 @@ __Pyx_INCREF(__pyx_int_0); __pyx_v_n_uniq = __pyx_int_0; - /* "mtrand.pyx":1168 + /* "mtrand.pyx":1173 * raise ValueError("Fewer non-zero entries in p than size") * n_uniq = 0 * p = p.copy() # <<<<<<<<<<<<<< * found = np.zeros(shape, dtype=np.int) * flat_found = found.ravel() */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s_copy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1168, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_p, __pyx_n_s_copy); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1173, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_13 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { @@ -18682,43 +18856,43 @@ } } if (__pyx_t_13) { - __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_13); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1168, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_13); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1173, __pyx_L1_error) __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; } else { - __pyx_t_9 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1168, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1173, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF_SET(__pyx_v_p, __pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":1169 + /* "mtrand.pyx":1174 * n_uniq = 0 * p = p.copy() * found = np.zeros(shape, dtype=np.int) # <<<<<<<<<<<<<< * flat_found = found.ravel() * while n_uniq < size: */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1169, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1174, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1169, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1174, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1169, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1174, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_v_shape); - __pyx_t_13 = PyDict_New(); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1169, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1174, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1169, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1174, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_int); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1169, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_int); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1174, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (PyDict_SetItem(__pyx_t_13, __pyx_n_s_dtype, __pyx_t_14) < 0) __PYX_ERR(0, 1169, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_13, __pyx_n_s_dtype, __pyx_t_14) < 0) __PYX_ERR(0, 1174, __pyx_L1_error) __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_9, __pyx_t_13); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1169, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_9, __pyx_t_13); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1174, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -18726,14 +18900,14 @@ __pyx_v_found = __pyx_t_14; __pyx_t_14 = 0; - /* "mtrand.pyx":1170 + /* "mtrand.pyx":1175 * p = p.copy() * found = np.zeros(shape, dtype=np.int) * flat_found = found.ravel() # <<<<<<<<<<<<<< * while n_uniq < size: * x = self.rand(size - n_uniq) */ - __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_found, __pyx_n_s_ravel); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1170, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_found, __pyx_n_s_ravel); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1175, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __pyx_t_9 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_13))) { @@ -18746,17 +18920,17 @@ } } if (__pyx_t_9) { - __pyx_t_14 = __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_t_9); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1170, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_t_9); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1175, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { - __pyx_t_14 = __Pyx_PyObject_CallNoArg(__pyx_t_13); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1170, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_CallNoArg(__pyx_t_13); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1175, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __pyx_v_flat_found = __pyx_t_14; __pyx_t_14 = 0; - /* "mtrand.pyx":1171 + /* "mtrand.pyx":1176 * found = np.zeros(shape, dtype=np.int) * flat_found = found.ravel() * while n_uniq < size: # <<<<<<<<<<<<<< @@ -18764,21 +18938,21 @@ * if n_uniq > 0: */ while (1) { - __pyx_t_14 = PyObject_RichCompare(__pyx_v_n_uniq, __pyx_v_size, Py_LT); __Pyx_XGOTREF(__pyx_t_14); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1171, __pyx_L1_error) - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_14); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 1171, __pyx_L1_error) + __pyx_t_14 = PyObject_RichCompare(__pyx_v_n_uniq, __pyx_v_size, Py_LT); __Pyx_XGOTREF(__pyx_t_14); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1176, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_14); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 1176, __pyx_L1_error) __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; if (!__pyx_t_5) break; - /* "mtrand.pyx":1172 + /* "mtrand.pyx":1177 * flat_found = found.ravel() * while n_uniq < size: * x = self.rand(size - n_uniq) # <<<<<<<<<<<<<< * if n_uniq > 0: * p[flat_found[0:n_uniq]] = 0 */ - __pyx_t_13 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_rand); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1172, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_rand); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); - __pyx_t_9 = PyNumber_Subtract(__pyx_v_size, __pyx_v_n_uniq); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1172, __pyx_L1_error) + __pyx_t_9 = PyNumber_Subtract(__pyx_v_size, __pyx_v_n_uniq); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_13))) { @@ -18791,14 +18965,14 @@ } } if (!__pyx_t_4) { - __pyx_t_14 = __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_t_9); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1172, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_t_9); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1177, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_14); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_13)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_t_9}; - __pyx_t_14 = __Pyx_PyFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1172, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1177, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -18807,20 +18981,20 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_13)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_t_9}; - __pyx_t_14 = __Pyx_PyCFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1172, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyCFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1177, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else #endif { - __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1172, __pyx_L1_error) + __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = NULL; __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_t_9); __pyx_t_9 = 0; - __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_13, __pyx_t_3, NULL); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1172, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_13, __pyx_t_3, NULL); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1177, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } @@ -18829,31 +19003,31 @@ __Pyx_XDECREF_SET(__pyx_v_x, __pyx_t_14); __pyx_t_14 = 0; - /* "mtrand.pyx":1173 + /* "mtrand.pyx":1178 * while n_uniq < size: * x = self.rand(size - n_uniq) * if n_uniq > 0: # <<<<<<<<<<<<<< * p[flat_found[0:n_uniq]] = 0 * cdf = np.cumsum(p) */ - __pyx_t_14 = PyObject_RichCompare(__pyx_v_n_uniq, __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_14); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1173, __pyx_L1_error) - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_14); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 1173, __pyx_L1_error) + __pyx_t_14 = PyObject_RichCompare(__pyx_v_n_uniq, __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_14); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1178, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_14); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 1178, __pyx_L1_error) __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; if (__pyx_t_5) { - /* "mtrand.pyx":1174 + /* "mtrand.pyx":1179 * x = self.rand(size - n_uniq) * if n_uniq > 0: * p[flat_found[0:n_uniq]] = 0 # <<<<<<<<<<<<<< * cdf = np.cumsum(p) * cdf /= cdf[-1] */ - __pyx_t_14 = __Pyx_PyObject_GetSlice(__pyx_v_flat_found, 0, 0, NULL, &__pyx_v_n_uniq, NULL, 1, 0, 1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1174, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_GetSlice(__pyx_v_flat_found, 0, 0, NULL, &__pyx_v_n_uniq, NULL, 1, 0, 1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); - if (unlikely(PyObject_SetItem(__pyx_v_p, __pyx_t_14, __pyx_int_0) < 0)) __PYX_ERR(0, 1174, __pyx_L1_error) + if (unlikely(PyObject_SetItem(__pyx_v_p, __pyx_t_14, __pyx_int_0) < 0)) __PYX_ERR(0, 1179, __pyx_L1_error) __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - /* "mtrand.pyx":1173 + /* "mtrand.pyx":1178 * while n_uniq < size: * x = self.rand(size - n_uniq) * if n_uniq > 0: # <<<<<<<<<<<<<< @@ -18862,16 +19036,16 @@ */ } - /* "mtrand.pyx":1175 + /* "mtrand.pyx":1180 * if n_uniq > 0: * p[flat_found[0:n_uniq]] = 0 * cdf = np.cumsum(p) # <<<<<<<<<<<<<< * cdf /= cdf[-1] * new = cdf.searchsorted(x, side='right') */ - __pyx_t_13 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1175, __pyx_L1_error) + __pyx_t_13 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1180, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_13, __pyx_n_s_cumsum); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1175, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_13, __pyx_n_s_cumsum); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1180, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __pyx_t_13 = NULL; @@ -18885,13 +19059,13 @@ } } if (!__pyx_t_13) { - __pyx_t_14 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_p); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1175, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_p); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1180, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_13, __pyx_v_p}; - __pyx_t_14 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1175, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1180, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0; __Pyx_GOTREF(__pyx_t_14); } else @@ -18899,19 +19073,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_13, __pyx_v_p}; - __pyx_t_14 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1175, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1180, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0; __Pyx_GOTREF(__pyx_t_14); } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1175, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1180, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_13); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_13); __pyx_t_13 = NULL; __Pyx_INCREF(__pyx_v_p); __Pyx_GIVEREF(__pyx_v_p); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_v_p); - __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_9, NULL); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1175, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_9, NULL); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1180, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -18920,39 +19094,39 @@ __Pyx_XDECREF_SET(__pyx_v_cdf, __pyx_t_14); __pyx_t_14 = 0; - /* "mtrand.pyx":1176 + /* "mtrand.pyx":1181 * p[flat_found[0:n_uniq]] = 0 * cdf = np.cumsum(p) * cdf /= cdf[-1] # <<<<<<<<<<<<<< * new = cdf.searchsorted(x, side='right') * _, unique_indices = np.unique(new, return_index=True) */ - __pyx_t_14 = __Pyx_GetItemInt(__pyx_v_cdf, -1L, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1176, __pyx_L1_error) + __pyx_t_14 = __Pyx_GetItemInt(__pyx_v_cdf, -1L, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); - __pyx_t_3 = __Pyx_PyNumber_InPlaceDivide(__pyx_v_cdf, __pyx_t_14); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1176, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyNumber_InPlaceDivide(__pyx_v_cdf, __pyx_t_14); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1181, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_DECREF_SET(__pyx_v_cdf, __pyx_t_3); __pyx_t_3 = 0; - /* "mtrand.pyx":1177 + /* "mtrand.pyx":1182 * cdf = np.cumsum(p) * cdf /= cdf[-1] * new = cdf.searchsorted(x, side='right') # <<<<<<<<<<<<<< * _, unique_indices = np.unique(new, return_index=True) * unique_indices.sort() */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_cdf, __pyx_n_s_searchsorted); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1177, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_cdf, __pyx_n_s_searchsorted); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_14 = PyTuple_New(1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1177, __pyx_L1_error) + __pyx_t_14 = PyTuple_New(1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_v_x); - __pyx_t_9 = PyDict_New(); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1177, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - if (PyDict_SetItem(__pyx_t_9, __pyx_n_s_side, __pyx_n_s_right) < 0) __PYX_ERR(0, 1177, __pyx_L1_error) - __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_14, __pyx_t_9); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1177, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_n_s_side, __pyx_n_s_right) < 0) __PYX_ERR(0, 1182, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_14, __pyx_t_9); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; @@ -18960,42 +19134,38 @@ __Pyx_XDECREF_SET(__pyx_v_new, __pyx_t_13); __pyx_t_13 = 0; - /* "mtrand.pyx":1178 + /* "mtrand.pyx":1183 * cdf /= cdf[-1] * new = cdf.searchsorted(x, side='right') * _, unique_indices = np.unique(new, return_index=True) # <<<<<<<<<<<<<< * unique_indices.sort() * new = new.take(unique_indices) */ - __pyx_t_13 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1178, __pyx_L1_error) + __pyx_t_13 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1183, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_13, __pyx_n_s_unique); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1178, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_13, __pyx_n_s_unique); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1183, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - __pyx_t_13 = PyTuple_New(1); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1178, __pyx_L1_error) + __pyx_t_13 = PyTuple_New(1); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1183, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_INCREF(__pyx_v_new); __Pyx_GIVEREF(__pyx_v_new); PyTuple_SET_ITEM(__pyx_t_13, 0, __pyx_v_new); - __pyx_t_14 = PyDict_New(); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1178, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1183, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); - if (PyDict_SetItem(__pyx_t_14, __pyx_n_s_return_index, Py_True) < 0) __PYX_ERR(0, 1178, __pyx_L1_error) - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_13, __pyx_t_14); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1178, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_14, __pyx_n_s_return_index, Py_True) < 0) __PYX_ERR(0, 1183, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_13, __pyx_t_14); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1183, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_3))) || (PyList_CheckExact(__pyx_t_3))) { PyObject* sequence = __pyx_t_3; - #if !CYTHON_COMPILING_IN_PYPY - Py_ssize_t size = Py_SIZE(sequence); - #else - Py_ssize_t size = PySequence_Size(sequence); - #endif + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 1178, __pyx_L1_error) + __PYX_ERR(0, 1183, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { @@ -19008,31 +19178,31 @@ __Pyx_INCREF(__pyx_t_14); __Pyx_INCREF(__pyx_t_13); #else - __pyx_t_14 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1178, __pyx_L1_error) + __pyx_t_14 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1183, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); - __pyx_t_13 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1178, __pyx_L1_error) + __pyx_t_13 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1183, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { Py_ssize_t index = -1; - __pyx_t_9 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1178, __pyx_L1_error) + __pyx_t_9 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1183, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_15 = Py_TYPE(__pyx_t_9)->tp_iternext; - index = 0; __pyx_t_14 = __pyx_t_15(__pyx_t_9); if (unlikely(!__pyx_t_14)) goto __pyx_L30_unpacking_failed; + __pyx_t_16 = Py_TYPE(__pyx_t_9)->tp_iternext; + index = 0; __pyx_t_14 = __pyx_t_16(__pyx_t_9); if (unlikely(!__pyx_t_14)) goto __pyx_L30_unpacking_failed; __Pyx_GOTREF(__pyx_t_14); - index = 1; __pyx_t_13 = __pyx_t_15(__pyx_t_9); if (unlikely(!__pyx_t_13)) goto __pyx_L30_unpacking_failed; + index = 1; __pyx_t_13 = __pyx_t_16(__pyx_t_9); if (unlikely(!__pyx_t_13)) goto __pyx_L30_unpacking_failed; __Pyx_GOTREF(__pyx_t_13); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_15(__pyx_t_9), 2) < 0) __PYX_ERR(0, 1178, __pyx_L1_error) - __pyx_t_15 = NULL; + if (__Pyx_IternextUnpackEndCheck(__pyx_t_16(__pyx_t_9), 2) < 0) __PYX_ERR(0, 1183, __pyx_L1_error) + __pyx_t_16 = NULL; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L31_unpacking_done; __pyx_L30_unpacking_failed:; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_15 = NULL; + __pyx_t_16 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 1178, __pyx_L1_error) + __PYX_ERR(0, 1183, __pyx_L1_error) __pyx_L31_unpacking_done:; } __Pyx_XDECREF_SET(__pyx_v__, __pyx_t_14); @@ -19040,14 +19210,14 @@ __Pyx_XDECREF_SET(__pyx_v_unique_indices, __pyx_t_13); __pyx_t_13 = 0; - /* "mtrand.pyx":1179 + /* "mtrand.pyx":1184 * new = cdf.searchsorted(x, side='right') * _, unique_indices = np.unique(new, return_index=True) * unique_indices.sort() # <<<<<<<<<<<<<< * new = new.take(unique_indices) * flat_found[n_uniq:n_uniq + new.size] = new */ - __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_unique_indices, __pyx_n_s_sort); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1179, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_unique_indices, __pyx_n_s_sort); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1184, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __pyx_t_14 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_13))) { @@ -19060,23 +19230,23 @@ } } if (__pyx_t_14) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_t_14); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1179, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_t_14); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1184, __pyx_L1_error) __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; } else { - __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_13); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1179, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_13); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1184, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - /* "mtrand.pyx":1180 + /* "mtrand.pyx":1185 * _, unique_indices = np.unique(new, return_index=True) * unique_indices.sort() * new = new.take(unique_indices) # <<<<<<<<<<<<<< * flat_found[n_uniq:n_uniq + new.size] = new * n_uniq += new.size */ - __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_new, __pyx_n_s_take); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1180, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_new, __pyx_n_s_take); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __pyx_t_14 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_13))) { @@ -19089,13 +19259,13 @@ } } if (!__pyx_t_14) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_v_unique_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1180, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_v_unique_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_13)) { PyObject *__pyx_temp[2] = {__pyx_t_14, __pyx_v_unique_indices}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1180, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1185, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -19103,19 +19273,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_13)) { PyObject *__pyx_temp[2] = {__pyx_t_14, __pyx_v_unique_indices}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1180, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1185, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1180, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_14); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_14); __pyx_t_14 = NULL; __Pyx_INCREF(__pyx_v_unique_indices); __Pyx_GIVEREF(__pyx_v_unique_indices); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_v_unique_indices); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_13, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1180, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_13, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -19124,38 +19294,38 @@ __Pyx_DECREF_SET(__pyx_v_new, __pyx_t_3); __pyx_t_3 = 0; - /* "mtrand.pyx":1181 + /* "mtrand.pyx":1186 * unique_indices.sort() * new = new.take(unique_indices) * flat_found[n_uniq:n_uniq + new.size] = new # <<<<<<<<<<<<<< * n_uniq += new.size * idx = found */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_new, __pyx_n_s_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1181, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_new, __pyx_n_s_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_13 = PyNumber_Add(__pyx_v_n_uniq, __pyx_t_3); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1181, __pyx_L1_error) + __pyx_t_13 = PyNumber_Add(__pyx_v_n_uniq, __pyx_t_3); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__Pyx_PyObject_SetSlice(__pyx_v_flat_found, __pyx_v_new, 0, 0, &__pyx_v_n_uniq, &__pyx_t_13, NULL, 0, 0, 1) < 0) __PYX_ERR(0, 1181, __pyx_L1_error) + if (__Pyx_PyObject_SetSlice(__pyx_v_flat_found, __pyx_v_new, 0, 0, &__pyx_v_n_uniq, &__pyx_t_13, NULL, 0, 0, 1) < 0) __PYX_ERR(0, 1186, __pyx_L1_error) __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - /* "mtrand.pyx":1182 + /* "mtrand.pyx":1187 * new = new.take(unique_indices) * flat_found[n_uniq:n_uniq + new.size] = new * n_uniq += new.size # <<<<<<<<<<<<<< * idx = found * else: */ - __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_new, __pyx_n_s_size); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1182, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_new, __pyx_n_s_size); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1187, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); - __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_n_uniq, __pyx_t_13); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1182, __pyx_L1_error) + __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_n_uniq, __pyx_t_13); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1187, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __Pyx_DECREF_SET(__pyx_v_n_uniq, __pyx_t_3); __pyx_t_3 = 0; } - /* "mtrand.pyx":1183 + /* "mtrand.pyx":1188 * flat_found[n_uniq:n_uniq + new.size] = new * n_uniq += new.size * idx = found # <<<<<<<<<<<<<< @@ -19165,7 +19335,7 @@ __Pyx_INCREF(__pyx_v_found); __pyx_v_idx = __pyx_v_found; - /* "mtrand.pyx":1164 + /* "mtrand.pyx":1169 * "population when 'replace=False'") * * if p is not None: # <<<<<<<<<<<<<< @@ -19175,7 +19345,7 @@ goto __pyx_L25; } - /* "mtrand.pyx":1185 + /* "mtrand.pyx":1190 * idx = found * else: * idx = self.permutation(pop_size)[:size] # <<<<<<<<<<<<<< @@ -19183,7 +19353,7 @@ * idx.shape = shape */ /*else*/ { - __pyx_t_13 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_permutation); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1185, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_permutation); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __pyx_t_9 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_13))) { @@ -19196,13 +19366,13 @@ } } if (!__pyx_t_9) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_v_pop_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1185, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_13, __pyx_v_pop_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_13)) { PyObject *__pyx_temp[2] = {__pyx_t_9, __pyx_v_pop_size}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1185, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1190, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -19210,31 +19380,31 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_13)) { PyObject *__pyx_temp[2] = {__pyx_t_9, __pyx_v_pop_size}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1185, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_13, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1190, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_14 = PyTuple_New(1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1185, __pyx_L1_error) + __pyx_t_14 = PyTuple_New(1+1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_9); __pyx_t_9 = NULL; __Pyx_INCREF(__pyx_v_pop_size); __Pyx_GIVEREF(__pyx_v_pop_size); PyTuple_SET_ITEM(__pyx_t_14, 0+1, __pyx_v_pop_size); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_13, __pyx_t_14, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1185, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_13, __pyx_t_14, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; } } __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - __pyx_t_13 = __Pyx_PyObject_GetSlice(__pyx_t_3, 0, 0, NULL, &__pyx_v_size, NULL, 0, 0, 1); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1185, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_GetSlice(__pyx_t_3, 0, 0, NULL, &__pyx_v_size, NULL, 0, 0, 1); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_idx = __pyx_t_13; __pyx_t_13 = 0; - /* "mtrand.pyx":1186 + /* "mtrand.pyx":1191 * else: * idx = self.permutation(pop_size)[:size] * if shape is not None: # <<<<<<<<<<<<<< @@ -19245,16 +19415,16 @@ __pyx_t_11 = (__pyx_t_5 != 0); if (__pyx_t_11) { - /* "mtrand.pyx":1187 + /* "mtrand.pyx":1192 * idx = self.permutation(pop_size)[:size] * if shape is not None: * idx.shape = shape # <<<<<<<<<<<<<< * * if shape is None and isinstance(idx, np.ndarray): */ - if (__Pyx_PyObject_SetAttrStr(__pyx_v_idx, __pyx_n_s_shape, __pyx_v_shape) < 0) __PYX_ERR(0, 1187, __pyx_L1_error) + if (__Pyx_PyObject_SetAttrStr(__pyx_v_idx, __pyx_n_s_shape, __pyx_v_shape) < 0) __PYX_ERR(0, 1192, __pyx_L1_error) - /* "mtrand.pyx":1186 + /* "mtrand.pyx":1191 * else: * idx = self.permutation(pop_size)[:size] * if shape is not None: # <<<<<<<<<<<<<< @@ -19267,7 +19437,7 @@ } __pyx_L22:; - /* "mtrand.pyx":1189 + /* "mtrand.pyx":1194 * idx.shape = shape * * if shape is None and isinstance(idx, np.ndarray): # <<<<<<<<<<<<<< @@ -19275,40 +19445,40 @@ * idx = idx.item(0) */ __pyx_t_5 = (__pyx_v_shape == Py_None); - __pyx_t_16 = (__pyx_t_5 != 0); - if (__pyx_t_16) { + __pyx_t_17 = (__pyx_t_5 != 0); + if (__pyx_t_17) { } else { - __pyx_t_11 = __pyx_t_16; + __pyx_t_11 = __pyx_t_17; goto __pyx_L34_bool_binop_done; } - __pyx_t_13 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1189, __pyx_L1_error) + __pyx_t_13 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1194, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_13, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1189, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_13, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1194, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - __pyx_t_16 = PyObject_IsInstance(__pyx_v_idx, __pyx_t_3); if (unlikely(__pyx_t_16 == -1)) __PYX_ERR(0, 1189, __pyx_L1_error) + __pyx_t_17 = PyObject_IsInstance(__pyx_v_idx, __pyx_t_3); if (unlikely(__pyx_t_17 == ((int)-1))) __PYX_ERR(0, 1194, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_5 = (__pyx_t_16 != 0); + __pyx_t_5 = (__pyx_t_17 != 0); __pyx_t_11 = __pyx_t_5; __pyx_L34_bool_binop_done:; if (__pyx_t_11) { - /* "mtrand.pyx":1191 + /* "mtrand.pyx":1196 * if shape is None and isinstance(idx, np.ndarray): * # In most cases a scalar will have been made an array * idx = idx.item(0) # <<<<<<<<<<<<<< * * #Use samples as indices for a if a is array-like */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_idx, __pyx_n_s_item); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1191, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_idx, __pyx_n_s_item); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__63, NULL); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1191, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__65, NULL); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_idx, __pyx_t_13); __pyx_t_13 = 0; - /* "mtrand.pyx":1189 + /* "mtrand.pyx":1194 * idx.shape = shape * * if shape is None and isinstance(idx, np.ndarray): # <<<<<<<<<<<<<< @@ -19317,23 +19487,23 @@ */ } - /* "mtrand.pyx":1194 + /* "mtrand.pyx":1199 * * #Use samples as indices for a if a is array-like * if a.ndim == 0: # <<<<<<<<<<<<<< * return idx * */ - __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s_ndim); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1194, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s_ndim); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); - __pyx_t_3 = __Pyx_PyInt_EqObjC(__pyx_t_13, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1194, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyInt_EqObjC(__pyx_t_13, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1194, __pyx_L1_error) + __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 1199, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_11) { - /* "mtrand.pyx":1195 + /* "mtrand.pyx":1200 * #Use samples as indices for a if a is array-like * if a.ndim == 0: * return idx # <<<<<<<<<<<<<< @@ -19345,7 +19515,7 @@ __pyx_r = __pyx_v_idx; goto __pyx_L0; - /* "mtrand.pyx":1194 + /* "mtrand.pyx":1199 * * #Use samples as indices for a if a is array-like * if a.ndim == 0: # <<<<<<<<<<<<<< @@ -19354,7 +19524,7 @@ */ } - /* "mtrand.pyx":1197 + /* "mtrand.pyx":1202 * return idx * * if shape is not None and idx.ndim == 0: # <<<<<<<<<<<<<< @@ -19362,61 +19532,61 @@ * # a scalar object when size is None. However a[idx] is always a */ __pyx_t_5 = (__pyx_v_shape != Py_None); - __pyx_t_16 = (__pyx_t_5 != 0); - if (__pyx_t_16) { + __pyx_t_17 = (__pyx_t_5 != 0); + if (__pyx_t_17) { } else { - __pyx_t_11 = __pyx_t_16; + __pyx_t_11 = __pyx_t_17; goto __pyx_L38_bool_binop_done; } - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_idx, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1197, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_idx, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1202, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_13 = __Pyx_PyInt_EqObjC(__pyx_t_3, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1197, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyInt_EqObjC(__pyx_t_3, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1202, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_16 = __Pyx_PyObject_IsTrue(__pyx_t_13); if (unlikely(__pyx_t_16 < 0)) __PYX_ERR(0, 1197, __pyx_L1_error) + __pyx_t_17 = __Pyx_PyObject_IsTrue(__pyx_t_13); if (unlikely(__pyx_t_17 < 0)) __PYX_ERR(0, 1202, __pyx_L1_error) __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - __pyx_t_11 = __pyx_t_16; + __pyx_t_11 = __pyx_t_17; __pyx_L38_bool_binop_done:; if (__pyx_t_11) { - /* "mtrand.pyx":1203 + /* "mtrand.pyx":1208 * # array, taking into account that np.array(item) may not work * # for object arrays. * res = np.empty((), dtype=a.dtype) # <<<<<<<<<<<<<< * res[()] = a[idx] * return res */ - __pyx_t_13 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1203, __pyx_L1_error) + __pyx_t_13 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_13, __pyx_n_s_empty); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1203, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_13, __pyx_n_s_empty); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - __pyx_t_13 = PyDict_New(); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1203, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 1208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); - __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s_dtype); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1203, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_v_a, __pyx_n_s_dtype); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); - if (PyDict_SetItem(__pyx_t_13, __pyx_n_s_dtype, __pyx_t_14) < 0) __PYX_ERR(0, 1203, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_13, __pyx_n_s_dtype, __pyx_t_14) < 0) __PYX_ERR(0, 1208, __pyx_L1_error) __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__64, __pyx_t_13); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1203, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__66, __pyx_t_13); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __pyx_v_res = __pyx_t_14; __pyx_t_14 = 0; - /* "mtrand.pyx":1204 + /* "mtrand.pyx":1209 * # for object arrays. * res = np.empty((), dtype=a.dtype) * res[()] = a[idx] # <<<<<<<<<<<<<< * return res * */ - __pyx_t_14 = PyObject_GetItem(__pyx_v_a, __pyx_v_idx); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1204, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_GetItem(__pyx_v_a, __pyx_v_idx); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); - if (unlikely(PyObject_SetItem(__pyx_v_res, __pyx_empty_tuple, __pyx_t_14) < 0)) __PYX_ERR(0, 1204, __pyx_L1_error) + if (unlikely(PyObject_SetItem(__pyx_v_res, __pyx_empty_tuple, __pyx_t_14) < 0)) __PYX_ERR(0, 1209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - /* "mtrand.pyx":1205 + /* "mtrand.pyx":1210 * res = np.empty((), dtype=a.dtype) * res[()] = a[idx] * return res # <<<<<<<<<<<<<< @@ -19428,7 +19598,7 @@ __pyx_r = __pyx_v_res; goto __pyx_L0; - /* "mtrand.pyx":1197 + /* "mtrand.pyx":1202 * return idx * * if shape is not None and idx.ndim == 0: # <<<<<<<<<<<<<< @@ -19437,7 +19607,7 @@ */ } - /* "mtrand.pyx":1207 + /* "mtrand.pyx":1212 * return res * * return a[idx] # <<<<<<<<<<<<<< @@ -19445,13 +19615,13 @@ * */ __Pyx_XDECREF(__pyx_r); - __pyx_t_14 = PyObject_GetItem(__pyx_v_a, __pyx_v_idx); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1207, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyObject_GetItem(__pyx_v_a, __pyx_v_idx); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 1212, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __pyx_r = __pyx_t_14; __pyx_t_14 = 0; goto __pyx_L0; - /* "mtrand.pyx":1028 + /* "mtrand.pyx":1033 * * * def choice(self, a, size=None, replace=True, p=None): # <<<<<<<<<<<<<< @@ -19472,6 +19642,7 @@ __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pop_size); + __Pyx_XDECREF(__pyx_v_d); __Pyx_XDECREF(__pyx_v_atol); __Pyx_XDECREF(__pyx_v_shape); __Pyx_XDECREF(__pyx_v_cdf); @@ -19493,7 +19664,7 @@ return __pyx_r; } -/* "mtrand.pyx":1210 +/* "mtrand.pyx":1215 * * * def uniform(self, low=0.0, high=1.0, size=None): # <<<<<<<<<<<<<< @@ -19534,24 +19705,24 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_low); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_low); if (value) { values[0] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_high); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_high); if (value) { values[1] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "uniform") < 0)) __PYX_ERR(0, 1210, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "uniform") < 0)) __PYX_ERR(0, 1215, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -19571,7 +19742,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("uniform", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1210, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("uniform", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1215, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.uniform", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -19606,14 +19777,14 @@ int __pyx_t_10; __Pyx_RefNannySetupContext("uniform", 0); - /* "mtrand.pyx":1291 + /* "mtrand.pyx":1296 * cdef object temp * * olow = PyArray_FROM_OTF(low, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * ohigh = PyArray_FROM_OTF(high, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_low, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1291, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_low, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1296, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -19621,14 +19792,14 @@ __pyx_v_olow = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":1292 + /* "mtrand.pyx":1297 * * olow = PyArray_FROM_OTF(low, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * ohigh = PyArray_FROM_OTF(high, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if olow.shape == ohigh.shape == (): */ - __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_high, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1292, __pyx_L1_error) + __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_high, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1297, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); @@ -19636,49 +19807,49 @@ __pyx_v_ohigh = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":1294 + /* "mtrand.pyx":1299 * ohigh = PyArray_FROM_OTF(high, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if olow.shape == ohigh.shape == (): # <<<<<<<<<<<<<< * flow = PyFloat_AsDouble(low) * fhigh = PyFloat_AsDouble(high) */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_olow), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1294, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_olow), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1299, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_ohigh), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1294, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_ohigh), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1299, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1294, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1299, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1294, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1299, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1294, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1299, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { - /* "mtrand.pyx":1295 + /* "mtrand.pyx":1300 * * if olow.shape == ohigh.shape == (): * flow = PyFloat_AsDouble(low) # <<<<<<<<<<<<<< * fhigh = PyFloat_AsDouble(high) * fscale = fhigh - flow */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_low); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 1295, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_low); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 1300, __pyx_L1_error) __pyx_v_flow = __pyx_t_5; - /* "mtrand.pyx":1296 + /* "mtrand.pyx":1301 * if olow.shape == ohigh.shape == (): * flow = PyFloat_AsDouble(low) * fhigh = PyFloat_AsDouble(high) # <<<<<<<<<<<<<< * fscale = fhigh - flow * */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_high); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 1296, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_high); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 1301, __pyx_L1_error) __pyx_v_fhigh = __pyx_t_5; - /* "mtrand.pyx":1297 + /* "mtrand.pyx":1302 * flow = PyFloat_AsDouble(low) * fhigh = PyFloat_AsDouble(high) * fscale = fhigh - flow # <<<<<<<<<<<<<< @@ -19687,7 +19858,7 @@ */ __pyx_v_fscale = (__pyx_v_fhigh - __pyx_v_flow); - /* "mtrand.pyx":1299 + /* "mtrand.pyx":1304 * fscale = fhigh - flow * * if not npy_isfinite(fscale): # <<<<<<<<<<<<<< @@ -19695,22 +19866,22 @@ * */ __pyx_t_4 = ((!(npy_isfinite(__pyx_v_fscale) != 0)) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":1300 + /* "mtrand.pyx":1305 * * if not npy_isfinite(fscale): * raise OverflowError('Range exceeds valid bounds') # <<<<<<<<<<<<<< * * return cont2_array_sc(self.internal_state, rk_uniform, size, flow, */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_OverflowError, __pyx_tuple__65, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1300, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_OverflowError, __pyx_tuple__67, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1305, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 1300, __pyx_L1_error) + __PYX_ERR(0, 1305, __pyx_L1_error) - /* "mtrand.pyx":1299 + /* "mtrand.pyx":1304 * fscale = fhigh - flow * * if not npy_isfinite(fscale): # <<<<<<<<<<<<<< @@ -19719,7 +19890,7 @@ */ } - /* "mtrand.pyx":1302 + /* "mtrand.pyx":1307 * raise OverflowError('Range exceeds valid bounds') * * return cont2_array_sc(self.internal_state, rk_uniform, size, flow, # <<<<<<<<<<<<<< @@ -19728,7 +19899,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":1303 + /* "mtrand.pyx":1308 * * return cont2_array_sc(self.internal_state, rk_uniform, size, flow, * fscale, self.lock) # <<<<<<<<<<<<<< @@ -19738,21 +19909,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":1302 + /* "mtrand.pyx":1307 * raise OverflowError('Range exceeds valid bounds') * * return cont2_array_sc(self.internal_state, rk_uniform, size, flow, # <<<<<<<<<<<<<< * fscale, self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_uniform, __pyx_v_size, __pyx_v_flow, __pyx_v_fscale, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1302, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_uniform, __pyx_v_size, __pyx_v_flow, __pyx_v_fscale, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1307, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":1294 + /* "mtrand.pyx":1299 * ohigh = PyArray_FROM_OTF(high, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if olow.shape == ohigh.shape == (): # <<<<<<<<<<<<<< @@ -19761,16 +19932,16 @@ */ } - /* "mtrand.pyx":1305 + /* "mtrand.pyx":1310 * fscale, self.lock) * * temp = np.subtract(ohigh, olow) # <<<<<<<<<<<<<< * Py_INCREF(temp) # needed to get around Pyrex's automatic reference-counting * # rules because EnsureArray steals a reference */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1305, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_subtract); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1305, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_subtract); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; @@ -19788,7 +19959,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_ohigh), ((PyObject *)__pyx_v_olow)}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1305, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1310, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else @@ -19796,13 +19967,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_ohigh), ((PyObject *)__pyx_v_olow)}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1305, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1310, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif { - __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1305, __pyx_L1_error) + __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_3) { __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); __pyx_t_3 = NULL; @@ -19813,7 +19984,7 @@ __Pyx_INCREF(((PyObject *)__pyx_v_olow)); __Pyx_GIVEREF(((PyObject *)__pyx_v_olow)); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, ((PyObject *)__pyx_v_olow)); - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1305, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } @@ -19821,7 +19992,7 @@ __pyx_v_temp = __pyx_t_2; __pyx_t_2 = 0; - /* "mtrand.pyx":1306 + /* "mtrand.pyx":1311 * * temp = np.subtract(ohigh, olow) * Py_INCREF(temp) # needed to get around Pyrex's automatic reference-counting # <<<<<<<<<<<<<< @@ -19830,14 +20001,14 @@ */ Py_INCREF(__pyx_v_temp); - /* "mtrand.pyx":1308 + /* "mtrand.pyx":1313 * Py_INCREF(temp) # needed to get around Pyrex's automatic reference-counting * # rules because EnsureArray steals a reference * odiff = PyArray_EnsureArray(temp) # <<<<<<<<<<<<<< * * if not np.all(np.isfinite(odiff)): */ - __pyx_t_2 = PyArray_EnsureArray(__pyx_v_temp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1308, __pyx_L1_error) + __pyx_t_2 = PyArray_EnsureArray(__pyx_v_temp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1313, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); @@ -19845,21 +20016,21 @@ __pyx_v_odiff = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":1310 + /* "mtrand.pyx":1315 * odiff = PyArray_EnsureArray(temp) * * if not np.all(np.isfinite(odiff)): # <<<<<<<<<<<<<< * raise OverflowError('Range exceeds valid bounds') * */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1310, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_all); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1310, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_all); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1310, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_isfinite); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1310, __pyx_L1_error) + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_isfinite); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; @@ -19873,13 +20044,13 @@ } } if (!__pyx_t_3) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_8, ((PyObject *)__pyx_v_odiff)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1310, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_8, ((PyObject *)__pyx_v_odiff)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_8)) { PyObject *__pyx_temp[2] = {__pyx_t_3, ((PyObject *)__pyx_v_odiff)}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_8, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1310, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_8, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1315, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else @@ -19887,19 +20058,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_8)) { PyObject *__pyx_temp[2] = {__pyx_t_3, ((PyObject *)__pyx_v_odiff)}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_8, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1310, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_8, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1315, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1310, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_INCREF(((PyObject *)__pyx_v_odiff)); __Pyx_GIVEREF(((PyObject *)__pyx_v_odiff)); PyTuple_SET_ITEM(__pyx_t_9, 0+1, ((PyObject *)__pyx_v_odiff)); - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1310, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -19916,14 +20087,14 @@ } } if (!__pyx_t_8) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1310, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1315, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1310, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1315, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -19932,44 +20103,44 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1310, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1315, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1310, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8); __pyx_t_8 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1310, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1310, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1315, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_10 = ((!__pyx_t_4) != 0); - if (__pyx_t_10) { + if (unlikely(__pyx_t_10)) { - /* "mtrand.pyx":1311 + /* "mtrand.pyx":1316 * * if not np.all(np.isfinite(odiff)): * raise OverflowError('Range exceeds valid bounds') # <<<<<<<<<<<<<< * * return cont2_array(self.internal_state, rk_uniform, size, olow, odiff, */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_OverflowError, __pyx_tuple__66, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1311, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_OverflowError, __pyx_tuple__68, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1316, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 1311, __pyx_L1_error) + __PYX_ERR(0, 1316, __pyx_L1_error) - /* "mtrand.pyx":1310 + /* "mtrand.pyx":1315 * odiff = PyArray_EnsureArray(temp) * * if not np.all(np.isfinite(odiff)): # <<<<<<<<<<<<<< @@ -19978,7 +20149,7 @@ */ } - /* "mtrand.pyx":1313 + /* "mtrand.pyx":1318 * raise OverflowError('Range exceeds valid bounds') * * return cont2_array(self.internal_state, rk_uniform, size, olow, odiff, # <<<<<<<<<<<<<< @@ -19987,7 +20158,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":1314 + /* "mtrand.pyx":1319 * * return cont2_array(self.internal_state, rk_uniform, size, olow, odiff, * self.lock) # <<<<<<<<<<<<<< @@ -19997,21 +20168,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":1313 + /* "mtrand.pyx":1318 * raise OverflowError('Range exceeds valid bounds') * * return cont2_array(self.internal_state, rk_uniform, size, olow, odiff, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_7 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_uniform, __pyx_v_size, __pyx_v_olow, __pyx_v_odiff, __pyx_t_1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1313, __pyx_L1_error) + __pyx_t_7 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_uniform, __pyx_v_size, __pyx_v_olow, __pyx_v_odiff, __pyx_t_1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1318, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; - /* "mtrand.pyx":1210 + /* "mtrand.pyx":1215 * * * def uniform(self, low=0.0, high=1.0, size=None): # <<<<<<<<<<<<<< @@ -20039,7 +20210,7 @@ return __pyx_r; } -/* "mtrand.pyx":1316 +/* "mtrand.pyx":1321 * self.lock) * * def rand(self, *args): # <<<<<<<<<<<<<< @@ -20076,18 +20247,18 @@ PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("rand", 0); - /* "mtrand.pyx":1355 + /* "mtrand.pyx":1360 * * """ * if len(args) == 0: # <<<<<<<<<<<<<< * return self.random_sample() * else: */ - __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_args); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 1355, __pyx_L1_error) + __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_args); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 1360, __pyx_L1_error) __pyx_t_2 = ((__pyx_t_1 == 0) != 0); if (__pyx_t_2) { - /* "mtrand.pyx":1356 + /* "mtrand.pyx":1361 * """ * if len(args) == 0: * return self.random_sample() # <<<<<<<<<<<<<< @@ -20095,7 +20266,7 @@ * return self.random_sample(size=args) */ __Pyx_XDECREF(__pyx_r); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_random_sample); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1356, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_random_sample); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1361, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { @@ -20108,10 +20279,10 @@ } } if (__pyx_t_5) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1356, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1361, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else { - __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1356, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1361, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; @@ -20119,7 +20290,7 @@ __pyx_t_3 = 0; goto __pyx_L0; - /* "mtrand.pyx":1355 + /* "mtrand.pyx":1360 * * """ * if len(args) == 0: # <<<<<<<<<<<<<< @@ -20128,7 +20299,7 @@ */ } - /* "mtrand.pyx":1358 + /* "mtrand.pyx":1363 * return self.random_sample() * else: * return self.random_sample(size=args) # <<<<<<<<<<<<<< @@ -20137,12 +20308,12 @@ */ /*else*/ { __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_random_sample); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1358, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_random_sample); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1363, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1358, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1363, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_size, __pyx_v_args) < 0) __PYX_ERR(0, 1358, __pyx_L1_error) - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_empty_tuple, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1358, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_size, __pyx_v_args) < 0) __PYX_ERR(0, 1363, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_empty_tuple, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1363, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; @@ -20151,7 +20322,7 @@ goto __pyx_L0; } - /* "mtrand.pyx":1316 + /* "mtrand.pyx":1321 * self.lock) * * def rand(self, *args): # <<<<<<<<<<<<<< @@ -20172,7 +20343,7 @@ return __pyx_r; } -/* "mtrand.pyx":1360 +/* "mtrand.pyx":1365 * return self.random_sample(size=args) * * def randn(self, *args): # <<<<<<<<<<<<<< @@ -20210,18 +20381,18 @@ PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("randn", 0); - /* "mtrand.pyx":1412 + /* "mtrand.pyx":1417 * * """ * if len(args) == 0: # <<<<<<<<<<<<<< * return self.standard_normal() * else: */ - __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_args); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 1412, __pyx_L1_error) + __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_args); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 1417, __pyx_L1_error) __pyx_t_2 = ((__pyx_t_1 == 0) != 0); if (__pyx_t_2) { - /* "mtrand.pyx":1413 + /* "mtrand.pyx":1418 * """ * if len(args) == 0: * return self.standard_normal() # <<<<<<<<<<<<<< @@ -20229,7 +20400,7 @@ * return self.standard_normal(args) */ __Pyx_XDECREF(__pyx_r); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_standard_normal); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1413, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_standard_normal); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { @@ -20242,10 +20413,10 @@ } } if (__pyx_t_5) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1413, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1418, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else { - __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1413, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1418, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; @@ -20253,7 +20424,7 @@ __pyx_t_3 = 0; goto __pyx_L0; - /* "mtrand.pyx":1412 + /* "mtrand.pyx":1417 * * """ * if len(args) == 0: # <<<<<<<<<<<<<< @@ -20262,7 +20433,7 @@ */ } - /* "mtrand.pyx":1415 + /* "mtrand.pyx":1420 * return self.standard_normal() * else: * return self.standard_normal(args) # <<<<<<<<<<<<<< @@ -20271,7 +20442,7 @@ */ /*else*/ { __Pyx_XDECREF(__pyx_r); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_standard_normal); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1415, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_standard_normal); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { @@ -20284,13 +20455,13 @@ } } if (!__pyx_t_5) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_args); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1415, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_args); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_v_args}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1415, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1420, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -20298,19 +20469,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_v_args}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1415, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1420, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1415, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_v_args); __Pyx_GIVEREF(__pyx_v_args); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_v_args); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1415, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } @@ -20321,7 +20492,7 @@ goto __pyx_L0; } - /* "mtrand.pyx":1360 + /* "mtrand.pyx":1365 * return self.random_sample(size=args) * * def randn(self, *args): # <<<<<<<<<<<<<< @@ -20343,7 +20514,7 @@ return __pyx_r; } -/* "mtrand.pyx":1417 +/* "mtrand.pyx":1422 * return self.standard_normal(args) * * def random_integers(self, low, high=None, size=None): # <<<<<<<<<<<<<< @@ -20382,23 +20553,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_low)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_high); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_high); if (value) { values[1] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "random_integers") < 0)) __PYX_ERR(0, 1417, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "random_integers") < 0)) __PYX_ERR(0, 1422, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -20417,7 +20588,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("random_integers", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1417, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("random_integers", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1422, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.random_integers", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -20445,7 +20616,7 @@ __Pyx_INCREF(__pyx_v_low); __Pyx_INCREF(__pyx_v_high); - /* "mtrand.pyx":1497 + /* "mtrand.pyx":1502 * * """ * if high is None: # <<<<<<<<<<<<<< @@ -20456,37 +20627,37 @@ __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { - /* "mtrand.pyx":1498 + /* "mtrand.pyx":1503 * """ * if high is None: * warnings.warn(("This function is deprecated. Please call " # <<<<<<<<<<<<<< * "randint(1, {low} + 1) instead".format(low=low)), * DeprecationWarning) */ - __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_warnings); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1498, __pyx_L1_error) + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_warnings); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_warn); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1498, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_warn); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - /* "mtrand.pyx":1499 + /* "mtrand.pyx":1504 * if high is None: * warnings.warn(("This function is deprecated. Please call " * "randint(1, {low} + 1) instead".format(low=low)), # <<<<<<<<<<<<<< * DeprecationWarning) * high = low */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_This_function_is_deprecated_Plea, __pyx_n_s_format); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1499, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_This_function_is_deprecated_Plea, __pyx_n_s_format); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = PyDict_New(); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1499, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_low, __pyx_v_low) < 0) __PYX_ERR(0, 1499, __pyx_L1_error) - __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_empty_tuple, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1499, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_low, __pyx_v_low) < 0) __PYX_ERR(0, 1504, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_empty_tuple, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - /* "mtrand.pyx":1500 + /* "mtrand.pyx":1505 * warnings.warn(("This function is deprecated. Please call " * "randint(1, {low} + 1) instead".format(low=low)), * DeprecationWarning) # <<<<<<<<<<<<<< @@ -20508,7 +20679,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_t_7, __pyx_builtin_DeprecationWarning}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1498, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1503, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; @@ -20517,14 +20688,14 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_t_7, __pyx_builtin_DeprecationWarning}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1498, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1503, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } else #endif { - __pyx_t_4 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1498, __pyx_L1_error) + __pyx_t_4 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -20535,14 +20706,14 @@ __Pyx_GIVEREF(__pyx_builtin_DeprecationWarning); PyTuple_SET_ITEM(__pyx_t_4, 1+__pyx_t_8, __pyx_builtin_DeprecationWarning); __pyx_t_7 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1498, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - /* "mtrand.pyx":1501 + /* "mtrand.pyx":1506 * "randint(1, {low} + 1) instead".format(low=low)), * DeprecationWarning) * high = low # <<<<<<<<<<<<<< @@ -20552,7 +20723,7 @@ __Pyx_INCREF(__pyx_v_low); __Pyx_DECREF_SET(__pyx_v_high, __pyx_v_low); - /* "mtrand.pyx":1502 + /* "mtrand.pyx":1507 * DeprecationWarning) * high = low * low = 1 # <<<<<<<<<<<<<< @@ -20562,7 +20733,7 @@ __Pyx_INCREF(__pyx_int_1); __Pyx_DECREF_SET(__pyx_v_low, __pyx_int_1); - /* "mtrand.pyx":1497 + /* "mtrand.pyx":1502 * * """ * if high is None: # <<<<<<<<<<<<<< @@ -20572,7 +20743,7 @@ goto __pyx_L3; } - /* "mtrand.pyx":1505 + /* "mtrand.pyx":1510 * * else: * warnings.warn(("This function is deprecated. Please call " # <<<<<<<<<<<<<< @@ -20580,47 +20751,47 @@ * low=low, high=high)), DeprecationWarning) */ /*else*/ { - __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_warnings); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1505, __pyx_L1_error) + __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_warnings); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_warn); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1505, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_warn); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - /* "mtrand.pyx":1506 + /* "mtrand.pyx":1511 * else: * warnings.warn(("This function is deprecated. Please call " * "randint({low}, {high} + 1) instead".format( # <<<<<<<<<<<<<< * low=low, high=high)), DeprecationWarning) * */ - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_This_function_is_deprecated_Plea_2, __pyx_n_s_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1506, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_This_function_is_deprecated_Plea_2, __pyx_n_s_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); - /* "mtrand.pyx":1507 + /* "mtrand.pyx":1512 * warnings.warn(("This function is deprecated. Please call " * "randint({low}, {high} + 1) instead".format( * low=low, high=high)), DeprecationWarning) # <<<<<<<<<<<<<< * * return self.randint(low, high + 1, size=size, dtype='l') */ - __pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1507, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_low, __pyx_v_low) < 0) __PYX_ERR(0, 1507, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_high, __pyx_v_high) < 0) __PYX_ERR(0, 1507, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_low, __pyx_v_low) < 0) __PYX_ERR(0, 1512, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_high, __pyx_v_high) < 0) __PYX_ERR(0, 1512, __pyx_L1_error) - /* "mtrand.pyx":1506 + /* "mtrand.pyx":1511 * else: * warnings.warn(("This function is deprecated. Please call " * "randint({low}, {high} + 1) instead".format( # <<<<<<<<<<<<<< * low=low, high=high)), DeprecationWarning) * */ - __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_empty_tuple, __pyx_t_7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1506, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_empty_tuple, __pyx_t_7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":1507 + /* "mtrand.pyx":1512 * warnings.warn(("This function is deprecated. Please call " * "randint({low}, {high} + 1) instead".format( * low=low, high=high)), DeprecationWarning) # <<<<<<<<<<<<<< @@ -20642,7 +20813,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_builtin_DeprecationWarning}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1505, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1510, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; @@ -20651,14 +20822,14 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_builtin_DeprecationWarning}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1505, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1510, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { - __pyx_t_5 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1505, __pyx_L1_error) + __pyx_t_5 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_7); __pyx_t_7 = NULL; @@ -20669,7 +20840,7 @@ __Pyx_GIVEREF(__pyx_builtin_DeprecationWarning); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_8, __pyx_builtin_DeprecationWarning); __pyx_t_6 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1505, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } @@ -20678,7 +20849,7 @@ } __pyx_L3:; - /* "mtrand.pyx":1509 + /* "mtrand.pyx":1514 * low=low, high=high)), DeprecationWarning) * * return self.randint(low, high + 1, size=size, dtype='l') # <<<<<<<<<<<<<< @@ -20686,11 +20857,11 @@ * */ __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_randint); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1509, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_randint); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1514, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyInt_AddObjC(__pyx_v_high, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1509, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyInt_AddObjC(__pyx_v_high, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1514, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1509, __pyx_L1_error) + __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1514, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_low); __Pyx_GIVEREF(__pyx_v_low); @@ -20698,11 +20869,11 @@ __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1509, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1514, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_size, __pyx_v_size) < 0) __PYX_ERR(0, 1509, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_n_s_l) < 0) __PYX_ERR(0, 1509, __pyx_L1_error) - __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1509, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_size, __pyx_v_size) < 0) __PYX_ERR(0, 1514, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_n_s_l) < 0) __PYX_ERR(0, 1514, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1514, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; @@ -20711,7 +20882,7 @@ __pyx_t_6 = 0; goto __pyx_L0; - /* "mtrand.pyx":1417 + /* "mtrand.pyx":1422 * return self.standard_normal(args) * * def random_integers(self, low, high=None, size=None): # <<<<<<<<<<<<<< @@ -20736,7 +20907,7 @@ return __pyx_r; } -/* "mtrand.pyx":1514 +/* "mtrand.pyx":1519 * * # Complicated, continuous distributions: * def standard_normal(self, size=None): # <<<<<<<<<<<<<< @@ -20769,12 +20940,12 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[0] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_normal") < 0)) __PYX_ERR(0, 1514, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_normal") < 0)) __PYX_ERR(0, 1519, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -20788,7 +20959,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("standard_normal", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1514, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("standard_normal", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1519, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.standard_normal", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -20808,7 +20979,7 @@ PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("standard_normal", 0); - /* "mtrand.pyx":1545 + /* "mtrand.pyx":1550 * * """ * return cont0_array(self.internal_state, rk_gauss, size, self.lock) # <<<<<<<<<<<<<< @@ -20818,14 +20989,14 @@ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - __pyx_t_2 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_gauss, __pyx_v_size, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1545, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_gauss, __pyx_v_size, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":1514 + /* "mtrand.pyx":1519 * * # Complicated, continuous distributions: * def standard_normal(self, size=None): # <<<<<<<<<<<<<< @@ -20845,7 +21016,7 @@ return __pyx_r; } -/* "mtrand.pyx":1547 +/* "mtrand.pyx":1552 * return cont0_array(self.internal_state, rk_gauss, size, self.lock) * * def normal(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< @@ -20886,24 +21057,24 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_loc); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_loc); if (value) { values[0] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_scale); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_scale); if (value) { values[1] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "normal") < 0)) __PYX_ERR(0, 1547, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "normal") < 0)) __PYX_ERR(0, 1552, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -20923,7 +21094,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("normal", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1547, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("normal", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1552, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.normal", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -20953,14 +21124,14 @@ PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("normal", 0); - /* "mtrand.pyx":1639 + /* "mtrand.pyx":1644 * cdef double floc, fscale * * oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1639, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1644, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -20968,14 +21139,14 @@ __pyx_v_oloc = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":1640 + /* "mtrand.pyx":1645 * * oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if oloc.shape == oscale.shape == (): */ - __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1640, __pyx_L1_error) + __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1645, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); @@ -20983,61 +21154,61 @@ __pyx_v_oscale = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":1642 + /* "mtrand.pyx":1647 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oloc.shape == oscale.shape == (): # <<<<<<<<<<<<<< * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oloc), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1642, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oloc), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1647, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1642, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1647, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1642, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1647, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1642, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1647, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1642, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1647, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { - /* "mtrand.pyx":1643 + /* "mtrand.pyx":1648 * * if oloc.shape == oscale.shape == (): * floc = PyFloat_AsDouble(loc) # <<<<<<<<<<<<<< * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_loc); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 1643, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_loc); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 1648, __pyx_L1_error) __pyx_v_floc = __pyx_t_5; - /* "mtrand.pyx":1644 + /* "mtrand.pyx":1649 * if oloc.shape == oscale.shape == (): * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< * if np.signbit(fscale): * raise ValueError("scale < 0") */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 1644, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 1649, __pyx_L1_error) __pyx_v_fscale = __pyx_t_5; - /* "mtrand.pyx":1645 + /* "mtrand.pyx":1650 * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): # <<<<<<<<<<<<<< * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_normal, size, floc, */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1645, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1650, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1645, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1650, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fscale); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1645, __pyx_L1_error) + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fscale); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1650, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { @@ -21050,14 +21221,14 @@ } } if (!__pyx_t_6) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1645, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1650, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1645, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1650, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -21066,43 +21237,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1645, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1650, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1645, __pyx_L1_error) + __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1650, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1645, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1650, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1645, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1650, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":1646 + /* "mtrand.pyx":1651 * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_normal, size, floc, * fscale, self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__67, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1646, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__69, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1651, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 1646, __pyx_L1_error) + __PYX_ERR(0, 1651, __pyx_L1_error) - /* "mtrand.pyx":1645 + /* "mtrand.pyx":1650 * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): # <<<<<<<<<<<<<< @@ -21111,7 +21282,7 @@ */ } - /* "mtrand.pyx":1647 + /* "mtrand.pyx":1652 * if np.signbit(fscale): * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_normal, size, floc, # <<<<<<<<<<<<<< @@ -21120,7 +21291,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":1648 + /* "mtrand.pyx":1653 * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_normal, size, floc, * fscale, self.lock) # <<<<<<<<<<<<<< @@ -21130,21 +21301,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":1647 + /* "mtrand.pyx":1652 * if np.signbit(fscale): * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_normal, size, floc, # <<<<<<<<<<<<<< * fscale, self.lock) * */ - __pyx_t_1 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_normal, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1647, __pyx_L1_error) + __pyx_t_1 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_normal, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1652, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; - /* "mtrand.pyx":1642 + /* "mtrand.pyx":1647 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oloc.shape == oscale.shape == (): # <<<<<<<<<<<<<< @@ -21153,21 +21324,21 @@ */ } - /* "mtrand.pyx":1650 + /* "mtrand.pyx":1655 * fscale, self.lock) * * if np.any(np.signbit(oscale)): # <<<<<<<<<<<<<< * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_normal, size, oloc, oscale, */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1650, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1650, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1650, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1650, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -21181,13 +21352,13 @@ } } if (!__pyx_t_2) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oscale)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1650, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oscale)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oscale)}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1650, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1655, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -21195,19 +21366,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oscale)}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1650, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1655, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1650, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_INCREF(((PyObject *)__pyx_v_oscale)); __Pyx_GIVEREF(((PyObject *)__pyx_v_oscale)); PyTuple_SET_ITEM(__pyx_t_8, 0+1, ((PyObject *)__pyx_v_oscale)); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1650, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } @@ -21224,14 +21395,14 @@ } } if (!__pyx_t_6) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1650, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1655, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1650, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1655, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -21240,43 +21411,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1650, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1655, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1650, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1650, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1650, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1655, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":1651 + /* "mtrand.pyx":1656 * * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_normal, size, oloc, oscale, * self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__68, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1651, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__70, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1656, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 1651, __pyx_L1_error) + __PYX_ERR(0, 1656, __pyx_L1_error) - /* "mtrand.pyx":1650 + /* "mtrand.pyx":1655 * fscale, self.lock) * * if np.any(np.signbit(oscale)): # <<<<<<<<<<<<<< @@ -21285,7 +21456,7 @@ */ } - /* "mtrand.pyx":1652 + /* "mtrand.pyx":1657 * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_normal, size, oloc, oscale, # <<<<<<<<<<<<<< @@ -21294,7 +21465,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":1653 + /* "mtrand.pyx":1658 * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_normal, size, oloc, oscale, * self.lock) # <<<<<<<<<<<<<< @@ -21304,21 +21475,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":1652 + /* "mtrand.pyx":1657 * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_normal, size, oloc, oscale, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_7 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_normal, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale, __pyx_t_1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1652, __pyx_L1_error) + __pyx_t_7 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_normal, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale, __pyx_t_1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1657, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; - /* "mtrand.pyx":1547 + /* "mtrand.pyx":1552 * return cont0_array(self.internal_state, rk_gauss, size, self.lock) * * def normal(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< @@ -21344,7 +21515,7 @@ return __pyx_r; } -/* "mtrand.pyx":1655 +/* "mtrand.pyx":1660 * self.lock) * * def beta(self, a, b, size=None): # <<<<<<<<<<<<<< @@ -21382,23 +21553,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_a)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_a)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_b)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_b)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("beta", 0, 2, 3, 1); __PYX_ERR(0, 1655, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("beta", 0, 2, 3, 1); __PYX_ERR(0, 1660, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "beta") < 0)) __PYX_ERR(0, 1655, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "beta") < 0)) __PYX_ERR(0, 1660, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -21416,7 +21587,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("beta", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1655, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("beta", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1660, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.beta", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -21447,14 +21618,14 @@ PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("beta", 0); - /* "mtrand.pyx":1696 + /* "mtrand.pyx":1701 * cdef double fa, fb * * oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * ob = PyArray_FROM_OTF(b, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1696, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1701, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -21462,14 +21633,14 @@ __pyx_v_oa = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":1697 + /* "mtrand.pyx":1702 * * oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * ob = PyArray_FROM_OTF(b, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if oa.shape == ob.shape == (): */ - __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_b, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1697, __pyx_L1_error) + __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_b, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1702, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); @@ -21477,49 +21648,49 @@ __pyx_v_ob = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":1699 + /* "mtrand.pyx":1704 * ob = PyArray_FROM_OTF(b, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oa.shape == ob.shape == (): # <<<<<<<<<<<<<< * fa = PyFloat_AsDouble(a) * fb = PyFloat_AsDouble(b) */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oa), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1699, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oa), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1704, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_ob), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1699, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_ob), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1704, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1699, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1704, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1699, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1704, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1699, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1704, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { - /* "mtrand.pyx":1700 + /* "mtrand.pyx":1705 * * if oa.shape == ob.shape == (): * fa = PyFloat_AsDouble(a) # <<<<<<<<<<<<<< * fb = PyFloat_AsDouble(b) * */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_a); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 1700, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_a); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 1705, __pyx_L1_error) __pyx_v_fa = __pyx_t_5; - /* "mtrand.pyx":1701 + /* "mtrand.pyx":1706 * if oa.shape == ob.shape == (): * fa = PyFloat_AsDouble(a) * fb = PyFloat_AsDouble(b) # <<<<<<<<<<<<<< * * if fa <= 0: */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_b); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 1701, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_b); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 1706, __pyx_L1_error) __pyx_v_fb = __pyx_t_5; - /* "mtrand.pyx":1703 + /* "mtrand.pyx":1708 * fb = PyFloat_AsDouble(b) * * if fa <= 0: # <<<<<<<<<<<<<< @@ -21527,22 +21698,22 @@ * if fb <= 0: */ __pyx_t_4 = ((__pyx_v_fa <= 0.0) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":1704 + /* "mtrand.pyx":1709 * * if fa <= 0: * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * if fb <= 0: * raise ValueError("b <= 0") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__69, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1704, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__71, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1709, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 1704, __pyx_L1_error) + __PYX_ERR(0, 1709, __pyx_L1_error) - /* "mtrand.pyx":1703 + /* "mtrand.pyx":1708 * fb = PyFloat_AsDouble(b) * * if fa <= 0: # <<<<<<<<<<<<<< @@ -21551,7 +21722,7 @@ */ } - /* "mtrand.pyx":1705 + /* "mtrand.pyx":1710 * if fa <= 0: * raise ValueError("a <= 0") * if fb <= 0: # <<<<<<<<<<<<<< @@ -21559,22 +21730,22 @@ * return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb, */ __pyx_t_4 = ((__pyx_v_fb <= 0.0) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":1706 + /* "mtrand.pyx":1711 * raise ValueError("a <= 0") * if fb <= 0: * raise ValueError("b <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb, * self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__70, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1706, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__72, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1711, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 1706, __pyx_L1_error) + __PYX_ERR(0, 1711, __pyx_L1_error) - /* "mtrand.pyx":1705 + /* "mtrand.pyx":1710 * if fa <= 0: * raise ValueError("a <= 0") * if fb <= 0: # <<<<<<<<<<<<<< @@ -21583,7 +21754,7 @@ */ } - /* "mtrand.pyx":1707 + /* "mtrand.pyx":1712 * if fb <= 0: * raise ValueError("b <= 0") * return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb, # <<<<<<<<<<<<<< @@ -21592,7 +21763,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":1708 + /* "mtrand.pyx":1713 * raise ValueError("b <= 0") * return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb, * self.lock) # <<<<<<<<<<<<<< @@ -21602,21 +21773,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":1707 + /* "mtrand.pyx":1712 * if fb <= 0: * raise ValueError("b <= 0") * return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_beta, __pyx_v_size, __pyx_v_fa, __pyx_v_fb, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1707, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_beta, __pyx_v_size, __pyx_v_fa, __pyx_v_fb, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1712, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":1699 + /* "mtrand.pyx":1704 * ob = PyArray_FROM_OTF(b, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oa.shape == ob.shape == (): # <<<<<<<<<<<<<< @@ -21625,21 +21796,21 @@ */ } - /* "mtrand.pyx":1710 + /* "mtrand.pyx":1715 * self.lock) * * if np.any(np.less_equal(oa, 0)): # <<<<<<<<<<<<<< * raise ValueError("a <= 0") * if np.any(np.less_equal(ob, 0)): */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1710, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1710, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1710, __pyx_L1_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1710, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; @@ -21657,7 +21828,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_oa), __pyx_int_0}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1710, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1715, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -21665,13 +21836,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_oa), __pyx_int_0}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1710, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1715, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1710, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -21682,7 +21853,7 @@ __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_int_0); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1710, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -21698,14 +21869,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1710, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1715, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1710, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1715, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -21714,43 +21885,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1710, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1715, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1710, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1710, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1710, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1715, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":1711 + /* "mtrand.pyx":1716 * * if np.any(np.less_equal(oa, 0)): * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * if np.any(np.less_equal(ob, 0)): * raise ValueError("b <= 0") */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__71, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1711, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__73, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1716, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 1711, __pyx_L1_error) + __PYX_ERR(0, 1716, __pyx_L1_error) - /* "mtrand.pyx":1710 + /* "mtrand.pyx":1715 * self.lock) * * if np.any(np.less_equal(oa, 0)): # <<<<<<<<<<<<<< @@ -21759,21 +21930,21 @@ */ } - /* "mtrand.pyx":1712 + /* "mtrand.pyx":1717 * if np.any(np.less_equal(oa, 0)): * raise ValueError("a <= 0") * if np.any(np.less_equal(ob, 0)): # <<<<<<<<<<<<<< * raise ValueError("b <= 0") * return cont2_array(self.internal_state, rk_beta, size, oa, ob, */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1712, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1717, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1712, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1717, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1712, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1717, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1712, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1717, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; @@ -21791,7 +21962,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_ob), __pyx_int_0}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1712, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1717, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -21799,13 +21970,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_ob), __pyx_int_0}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1712, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1717, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1712, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1717, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_3) { __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); __pyx_t_3 = NULL; @@ -21816,7 +21987,7 @@ __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_8, __pyx_int_0); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1712, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1717, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } @@ -21832,14 +22003,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1712, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1717, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1712, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1717, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -21848,43 +22019,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1712, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1717, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1712, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1717, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1712, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1717, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1712, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1717, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":1713 + /* "mtrand.pyx":1718 * raise ValueError("a <= 0") * if np.any(np.less_equal(ob, 0)): * raise ValueError("b <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_beta, size, oa, ob, * self.lock) */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__72, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1713, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__74, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1718, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 1713, __pyx_L1_error) + __PYX_ERR(0, 1718, __pyx_L1_error) - /* "mtrand.pyx":1712 + /* "mtrand.pyx":1717 * if np.any(np.less_equal(oa, 0)): * raise ValueError("a <= 0") * if np.any(np.less_equal(ob, 0)): # <<<<<<<<<<<<<< @@ -21893,7 +22064,7 @@ */ } - /* "mtrand.pyx":1714 + /* "mtrand.pyx":1719 * if np.any(np.less_equal(ob, 0)): * raise ValueError("b <= 0") * return cont2_array(self.internal_state, rk_beta, size, oa, ob, # <<<<<<<<<<<<<< @@ -21902,7 +22073,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":1715 + /* "mtrand.pyx":1720 * raise ValueError("b <= 0") * return cont2_array(self.internal_state, rk_beta, size, oa, ob, * self.lock) # <<<<<<<<<<<<<< @@ -21912,21 +22083,21 @@ __pyx_t_2 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_2); - /* "mtrand.pyx":1714 + /* "mtrand.pyx":1719 * if np.any(np.less_equal(ob, 0)): * raise ValueError("b <= 0") * return cont2_array(self.internal_state, rk_beta, size, oa, ob, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_9 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_beta, __pyx_v_size, __pyx_v_oa, __pyx_v_ob, __pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1714, __pyx_L1_error) + __pyx_t_9 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_beta, __pyx_v_size, __pyx_v_oa, __pyx_v_ob, __pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1719, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; - /* "mtrand.pyx":1655 + /* "mtrand.pyx":1660 * self.lock) * * def beta(self, a, b, size=None): # <<<<<<<<<<<<<< @@ -21952,7 +22123,7 @@ return __pyx_r; } -/* "mtrand.pyx":1717 +/* "mtrand.pyx":1722 * self.lock) * * def exponential(self, scale=1.0, size=None): # <<<<<<<<<<<<<< @@ -21989,18 +22160,18 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_scale); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_scale); if (value) { values[0] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "exponential") < 0)) __PYX_ERR(0, 1717, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "exponential") < 0)) __PYX_ERR(0, 1722, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -22017,7 +22188,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("exponential", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1717, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("exponential", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1722, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.exponential", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -22045,14 +22216,14 @@ PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("exponential", 0); - /* "mtrand.pyx":1765 + /* "mtrand.pyx":1770 * cdef double fscale * * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if oscale.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1765, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1770, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -22060,44 +22231,44 @@ __pyx_v_oscale = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":1767 + /* "mtrand.pyx":1772 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oscale.shape == (): # <<<<<<<<<<<<<< * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1767, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1772, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1767, __pyx_L1_error) + __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1772, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 1767, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 1772, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_3) { - /* "mtrand.pyx":1768 + /* "mtrand.pyx":1773 * * if oscale.shape == (): * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< * if np.signbit(fscale): * raise ValueError("scale < 0") */ - __pyx_t_4 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_4 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 1768, __pyx_L1_error) + __pyx_t_4 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_4 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 1773, __pyx_L1_error) __pyx_v_fscale = __pyx_t_4; - /* "mtrand.pyx":1769 + /* "mtrand.pyx":1774 * if oscale.shape == (): * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): # <<<<<<<<<<<<<< * raise ValueError("scale < 0") * return cont1_array_sc(self.internal_state, rk_exponential, size, */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1769, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1769, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fscale); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1769, __pyx_L1_error) + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fscale); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { @@ -22110,14 +22281,14 @@ } } if (!__pyx_t_6) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1769, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1774, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1769, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1774, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -22126,43 +22297,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1769, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1774, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1769, __pyx_L1_error) + __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1769, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 1769, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 1774, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":1770 + /* "mtrand.pyx":1775 * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_exponential, size, * fscale, self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__73, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1770, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__75, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1775, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 1770, __pyx_L1_error) + __PYX_ERR(0, 1775, __pyx_L1_error) - /* "mtrand.pyx":1769 + /* "mtrand.pyx":1774 * if oscale.shape == (): * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): # <<<<<<<<<<<<<< @@ -22171,7 +22342,7 @@ */ } - /* "mtrand.pyx":1771 + /* "mtrand.pyx":1776 * if np.signbit(fscale): * raise ValueError("scale < 0") * return cont1_array_sc(self.internal_state, rk_exponential, size, # <<<<<<<<<<<<<< @@ -22180,7 +22351,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":1772 + /* "mtrand.pyx":1777 * raise ValueError("scale < 0") * return cont1_array_sc(self.internal_state, rk_exponential, size, * fscale, self.lock) # <<<<<<<<<<<<<< @@ -22190,21 +22361,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":1771 + /* "mtrand.pyx":1776 * if np.signbit(fscale): * raise ValueError("scale < 0") * return cont1_array_sc(self.internal_state, rk_exponential, size, # <<<<<<<<<<<<<< * fscale, self.lock) * */ - __pyx_t_5 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_exponential, __pyx_v_size, __pyx_v_fscale, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1771, __pyx_L1_error) + __pyx_t_5 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_exponential, __pyx_v_size, __pyx_v_fscale, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1776, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; - /* "mtrand.pyx":1767 + /* "mtrand.pyx":1772 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oscale.shape == (): # <<<<<<<<<<<<<< @@ -22213,21 +22384,21 @@ */ } - /* "mtrand.pyx":1774 + /* "mtrand.pyx":1779 * fscale, self.lock) * * if np.any(np.signbit(oscale)): # <<<<<<<<<<<<<< * raise ValueError("scale < 0") * return cont1_array(self.internal_state, rk_exponential, size, oscale, */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1774, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1779, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1774, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1779, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1774, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1779, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1774, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1779, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -22241,13 +22412,13 @@ } } if (!__pyx_t_2) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oscale)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1774, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oscale)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1779, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oscale)}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1774, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1779, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -22255,19 +22426,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oscale)}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1774, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1779, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1774, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1779, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_INCREF(((PyObject *)__pyx_v_oscale)); __Pyx_GIVEREF(((PyObject *)__pyx_v_oscale)); PyTuple_SET_ITEM(__pyx_t_8, 0+1, ((PyObject *)__pyx_v_oscale)); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1774, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1779, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } @@ -22284,14 +22455,14 @@ } } if (!__pyx_t_6) { - __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1774, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1779, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_5); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_1}; - __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1774, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1779, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -22300,43 +22471,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_1}; - __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1774, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1779, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1774, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1779, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1774, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1779, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 1774, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 1779, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":1775 + /* "mtrand.pyx":1780 * * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_exponential, size, oscale, * self.lock) */ - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__74, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1775, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__76, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1780, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __PYX_ERR(0, 1775, __pyx_L1_error) + __PYX_ERR(0, 1780, __pyx_L1_error) - /* "mtrand.pyx":1774 + /* "mtrand.pyx":1779 * fscale, self.lock) * * if np.any(np.signbit(oscale)): # <<<<<<<<<<<<<< @@ -22345,7 +22516,7 @@ */ } - /* "mtrand.pyx":1776 + /* "mtrand.pyx":1781 * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") * return cont1_array(self.internal_state, rk_exponential, size, oscale, # <<<<<<<<<<<<<< @@ -22354,7 +22525,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":1777 + /* "mtrand.pyx":1782 * raise ValueError("scale < 0") * return cont1_array(self.internal_state, rk_exponential, size, oscale, * self.lock) # <<<<<<<<<<<<<< @@ -22364,21 +22535,21 @@ __pyx_t_5 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_5); - /* "mtrand.pyx":1776 + /* "mtrand.pyx":1781 * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") * return cont1_array(self.internal_state, rk_exponential, size, oscale, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_7 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_exponential, __pyx_v_size, __pyx_v_oscale, __pyx_t_5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1776, __pyx_L1_error) + __pyx_t_7 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_exponential, __pyx_v_size, __pyx_v_oscale, __pyx_t_5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1781, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; - /* "mtrand.pyx":1717 + /* "mtrand.pyx":1722 * self.lock) * * def exponential(self, scale=1.0, size=None): # <<<<<<<<<<<<<< @@ -22403,7 +22574,7 @@ return __pyx_r; } -/* "mtrand.pyx":1779 +/* "mtrand.pyx":1784 * self.lock) * * def standard_exponential(self, size=None): # <<<<<<<<<<<<<< @@ -22436,12 +22607,12 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[0] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_exponential") < 0)) __PYX_ERR(0, 1779, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_exponential") < 0)) __PYX_ERR(0, 1784, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -22455,7 +22626,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("standard_exponential", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1779, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("standard_exponential", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1784, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.standard_exponential", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -22475,7 +22646,7 @@ PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("standard_exponential", 0); - /* "mtrand.pyx":1807 + /* "mtrand.pyx":1812 * * """ * return cont0_array(self.internal_state, rk_standard_exponential, size, # <<<<<<<<<<<<<< @@ -22484,7 +22655,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":1808 + /* "mtrand.pyx":1813 * """ * return cont0_array(self.internal_state, rk_standard_exponential, size, * self.lock) # <<<<<<<<<<<<<< @@ -22494,21 +22665,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":1807 + /* "mtrand.pyx":1812 * * """ * return cont0_array(self.internal_state, rk_standard_exponential, size, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_standard_exponential, __pyx_v_size, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1807, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_standard_exponential, __pyx_v_size, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1812, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":1779 + /* "mtrand.pyx":1784 * self.lock) * * def standard_exponential(self, size=None): # <<<<<<<<<<<<<< @@ -22528,7 +22699,7 @@ return __pyx_r; } -/* "mtrand.pyx":1810 +/* "mtrand.pyx":1815 * self.lock) * * def standard_gamma(self, shape, size=None): # <<<<<<<<<<<<<< @@ -22563,17 +22734,17 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_gamma") < 0)) __PYX_ERR(0, 1810, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_gamma") < 0)) __PYX_ERR(0, 1815, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -22589,7 +22760,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("standard_gamma", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1810, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("standard_gamma", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1815, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.standard_gamma", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -22617,14 +22788,14 @@ PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("standard_gamma", 0); - /* "mtrand.pyx":1882 + /* "mtrand.pyx":1887 * cdef double fshape * * oshape = PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if oshape.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1882, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1887, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -22632,44 +22803,44 @@ __pyx_v_oshape = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":1884 + /* "mtrand.pyx":1889 * oshape = PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oshape.shape == (): # <<<<<<<<<<<<<< * fshape = PyFloat_AsDouble(shape) * if np.signbit(fshape): */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oshape), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1884, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oshape), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1889, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1884, __pyx_L1_error) + __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1889, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 1884, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 1889, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_3) { - /* "mtrand.pyx":1885 + /* "mtrand.pyx":1890 * * if oshape.shape == (): * fshape = PyFloat_AsDouble(shape) # <<<<<<<<<<<<<< * if np.signbit(fshape): * raise ValueError("shape < 0") */ - __pyx_t_4 = PyFloat_AsDouble(__pyx_v_shape); if (unlikely(__pyx_t_4 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 1885, __pyx_L1_error) + __pyx_t_4 = PyFloat_AsDouble(__pyx_v_shape); if (unlikely(__pyx_t_4 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 1890, __pyx_L1_error) __pyx_v_fshape = __pyx_t_4; - /* "mtrand.pyx":1886 + /* "mtrand.pyx":1891 * if oshape.shape == (): * fshape = PyFloat_AsDouble(shape) * if np.signbit(fshape): # <<<<<<<<<<<<<< * raise ValueError("shape < 0") * return cont1_array_sc(self.internal_state, rk_standard_gamma, */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1886, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1891, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1886, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1891, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fshape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1886, __pyx_L1_error) + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fshape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1891, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { @@ -22682,14 +22853,14 @@ } } if (!__pyx_t_6) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1886, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1891, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1886, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1891, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -22698,43 +22869,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1886, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1891, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1886, __pyx_L1_error) + __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1891, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1886, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1891, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 1886, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 1891, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":1887 + /* "mtrand.pyx":1892 * fshape = PyFloat_AsDouble(shape) * if np.signbit(fshape): * raise ValueError("shape < 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_standard_gamma, * size, fshape, self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__75, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1887, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__77, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1892, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 1887, __pyx_L1_error) + __PYX_ERR(0, 1892, __pyx_L1_error) - /* "mtrand.pyx":1886 + /* "mtrand.pyx":1891 * if oshape.shape == (): * fshape = PyFloat_AsDouble(shape) * if np.signbit(fshape): # <<<<<<<<<<<<<< @@ -22743,7 +22914,7 @@ */ } - /* "mtrand.pyx":1888 + /* "mtrand.pyx":1893 * if np.signbit(fshape): * raise ValueError("shape < 0") * return cont1_array_sc(self.internal_state, rk_standard_gamma, # <<<<<<<<<<<<<< @@ -22752,7 +22923,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":1889 + /* "mtrand.pyx":1894 * raise ValueError("shape < 0") * return cont1_array_sc(self.internal_state, rk_standard_gamma, * size, fshape, self.lock) # <<<<<<<<<<<<<< @@ -22762,21 +22933,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":1888 + /* "mtrand.pyx":1893 * if np.signbit(fshape): * raise ValueError("shape < 0") * return cont1_array_sc(self.internal_state, rk_standard_gamma, # <<<<<<<<<<<<<< * size, fshape, self.lock) * */ - __pyx_t_5 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_standard_gamma, __pyx_v_size, __pyx_v_fshape, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1888, __pyx_L1_error) + __pyx_t_5 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_standard_gamma, __pyx_v_size, __pyx_v_fshape, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1893, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; - /* "mtrand.pyx":1884 + /* "mtrand.pyx":1889 * oshape = PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oshape.shape == (): # <<<<<<<<<<<<<< @@ -22785,21 +22956,21 @@ */ } - /* "mtrand.pyx":1891 + /* "mtrand.pyx":1896 * size, fshape, self.lock) * * if np.any(np.signbit(oshape)): # <<<<<<<<<<<<<< * raise ValueError("shape < 0") * return cont1_array(self.internal_state, rk_standard_gamma, size, */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1891, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1891, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1891, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1891, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -22813,13 +22984,13 @@ } } if (!__pyx_t_2) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oshape)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1891, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oshape)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oshape)}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1891, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1896, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -22827,19 +22998,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oshape)}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1891, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1896, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1891, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_INCREF(((PyObject *)__pyx_v_oshape)); __Pyx_GIVEREF(((PyObject *)__pyx_v_oshape)); PyTuple_SET_ITEM(__pyx_t_8, 0+1, ((PyObject *)__pyx_v_oshape)); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1891, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } @@ -22856,14 +23027,14 @@ } } if (!__pyx_t_6) { - __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1891, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1896, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_5); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_1}; - __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1891, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1896, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -22872,43 +23043,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_1}; - __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1891, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1896, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1891, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1891, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 1891, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 1896, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":1892 + /* "mtrand.pyx":1897 * * if np.any(np.signbit(oshape)): * raise ValueError("shape < 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_standard_gamma, size, * oshape, self.lock) */ - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__76, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1892, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__78, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1897, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __PYX_ERR(0, 1892, __pyx_L1_error) + __PYX_ERR(0, 1897, __pyx_L1_error) - /* "mtrand.pyx":1891 + /* "mtrand.pyx":1896 * size, fshape, self.lock) * * if np.any(np.signbit(oshape)): # <<<<<<<<<<<<<< @@ -22917,7 +23088,7 @@ */ } - /* "mtrand.pyx":1893 + /* "mtrand.pyx":1898 * if np.any(np.signbit(oshape)): * raise ValueError("shape < 0") * return cont1_array(self.internal_state, rk_standard_gamma, size, # <<<<<<<<<<<<<< @@ -22926,7 +23097,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":1894 + /* "mtrand.pyx":1899 * raise ValueError("shape < 0") * return cont1_array(self.internal_state, rk_standard_gamma, size, * oshape, self.lock) # <<<<<<<<<<<<<< @@ -22936,21 +23107,21 @@ __pyx_t_5 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_5); - /* "mtrand.pyx":1893 + /* "mtrand.pyx":1898 * if np.any(np.signbit(oshape)): * raise ValueError("shape < 0") * return cont1_array(self.internal_state, rk_standard_gamma, size, # <<<<<<<<<<<<<< * oshape, self.lock) * */ - __pyx_t_7 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_standard_gamma, __pyx_v_size, __pyx_v_oshape, __pyx_t_5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1893, __pyx_L1_error) + __pyx_t_7 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_standard_gamma, __pyx_v_size, __pyx_v_oshape, __pyx_t_5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1898, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; - /* "mtrand.pyx":1810 + /* "mtrand.pyx":1815 * self.lock) * * def standard_gamma(self, shape, size=None): # <<<<<<<<<<<<<< @@ -22975,7 +23146,7 @@ return __pyx_r; } -/* "mtrand.pyx":1896 +/* "mtrand.pyx":1901 * oshape, self.lock) * * def gamma(self, shape, scale=1.0, size=None): # <<<<<<<<<<<<<< @@ -23014,23 +23185,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_scale); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_scale); if (value) { values[1] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gamma") < 0)) __PYX_ERR(0, 1896, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gamma") < 0)) __PYX_ERR(0, 1901, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -23049,7 +23220,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("gamma", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1896, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("gamma", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1901, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.gamma", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -23079,14 +23250,14 @@ PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("gamma", 0); - /* "mtrand.pyx":1972 + /* "mtrand.pyx":1977 * cdef double fshape, fscale * * oshape = PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1972, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1977, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -23094,14 +23265,14 @@ __pyx_v_oshape = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":1973 + /* "mtrand.pyx":1978 * * oshape = PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if oshape.shape == oscale.shape == (): */ - __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1973, __pyx_L1_error) + __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); @@ -23109,61 +23280,61 @@ __pyx_v_oscale = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":1975 + /* "mtrand.pyx":1980 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oshape.shape == oscale.shape == (): # <<<<<<<<<<<<<< * fshape = PyFloat_AsDouble(shape) * fscale = PyFloat_AsDouble(scale) */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oshape), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1975, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oshape), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1980, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1975, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1980, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1975, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1980, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1975, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1980, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1975, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1980, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { - /* "mtrand.pyx":1976 + /* "mtrand.pyx":1981 * * if oshape.shape == oscale.shape == (): * fshape = PyFloat_AsDouble(shape) # <<<<<<<<<<<<<< * fscale = PyFloat_AsDouble(scale) * if np.signbit(fshape): */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_shape); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 1976, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_shape); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 1981, __pyx_L1_error) __pyx_v_fshape = __pyx_t_5; - /* "mtrand.pyx":1977 + /* "mtrand.pyx":1982 * if oshape.shape == oscale.shape == (): * fshape = PyFloat_AsDouble(shape) * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< * if np.signbit(fshape): * raise ValueError("shape < 0") */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 1977, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 1982, __pyx_L1_error) __pyx_v_fscale = __pyx_t_5; - /* "mtrand.pyx":1978 + /* "mtrand.pyx":1983 * fshape = PyFloat_AsDouble(shape) * fscale = PyFloat_AsDouble(scale) * if np.signbit(fshape): # <<<<<<<<<<<<<< * raise ValueError("shape < 0") * if np.signbit(fscale): */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1978, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1978, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fshape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1978, __pyx_L1_error) + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fshape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { @@ -23176,14 +23347,14 @@ } } if (!__pyx_t_6) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1978, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1983, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1978, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1983, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -23192,43 +23363,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1978, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1983, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1978, __pyx_L1_error) + __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1978, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1978, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1983, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":1979 + /* "mtrand.pyx":1984 * fscale = PyFloat_AsDouble(scale) * if np.signbit(fshape): * raise ValueError("shape < 0") # <<<<<<<<<<<<<< * if np.signbit(fscale): * raise ValueError("scale < 0") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__77, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1979, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__79, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1984, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 1979, __pyx_L1_error) + __PYX_ERR(0, 1984, __pyx_L1_error) - /* "mtrand.pyx":1978 + /* "mtrand.pyx":1983 * fshape = PyFloat_AsDouble(shape) * fscale = PyFloat_AsDouble(scale) * if np.signbit(fshape): # <<<<<<<<<<<<<< @@ -23237,19 +23408,19 @@ */ } - /* "mtrand.pyx":1980 + /* "mtrand.pyx":1985 * if np.signbit(fshape): * raise ValueError("shape < 0") * if np.signbit(fscale): # <<<<<<<<<<<<<< * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1980, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1985, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_signbit); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1980, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_signbit); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1985, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyFloat_FromDouble(__pyx_v_fscale); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1980, __pyx_L1_error) + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_fscale); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1985, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { @@ -23262,14 +23433,14 @@ } } if (!__pyx_t_2) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1980, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1985, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_1}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1980, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1985, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -23278,43 +23449,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_1}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1980, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1985, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1980, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1985, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1980, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1985, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1980, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1985, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":1981 + /* "mtrand.pyx":1986 * raise ValueError("shape < 0") * if np.signbit(fscale): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, * fscale, self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__78, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1981, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__80, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1986, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 1981, __pyx_L1_error) + __PYX_ERR(0, 1986, __pyx_L1_error) - /* "mtrand.pyx":1980 + /* "mtrand.pyx":1985 * if np.signbit(fshape): * raise ValueError("shape < 0") * if np.signbit(fscale): # <<<<<<<<<<<<<< @@ -23323,7 +23494,7 @@ */ } - /* "mtrand.pyx":1982 + /* "mtrand.pyx":1987 * if np.signbit(fscale): * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, # <<<<<<<<<<<<<< @@ -23332,7 +23503,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":1983 + /* "mtrand.pyx":1988 * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, * fscale, self.lock) # <<<<<<<<<<<<<< @@ -23342,21 +23513,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":1982 + /* "mtrand.pyx":1987 * if np.signbit(fscale): * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, # <<<<<<<<<<<<<< * fscale, self.lock) * */ - __pyx_t_7 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_gamma, __pyx_v_size, __pyx_v_fshape, __pyx_v_fscale, __pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1982, __pyx_L1_error) + __pyx_t_7 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_gamma, __pyx_v_size, __pyx_v_fshape, __pyx_v_fscale, __pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1987, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; - /* "mtrand.pyx":1975 + /* "mtrand.pyx":1980 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oshape.shape == oscale.shape == (): # <<<<<<<<<<<<<< @@ -23365,21 +23536,21 @@ */ } - /* "mtrand.pyx":1985 + /* "mtrand.pyx":1990 * fscale, self.lock) * * if np.any(np.signbit(oshape)): # <<<<<<<<<<<<<< * raise ValueError("shape < 0") * if np.any(np.signbit(oscale)): */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1985, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1990, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1985, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1990, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1985, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1990, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_signbit); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1985, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_signbit); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1990, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; @@ -23393,13 +23564,13 @@ } } if (!__pyx_t_1) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, ((PyObject *)__pyx_v_oshape)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1985, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, ((PyObject *)__pyx_v_oshape)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1990, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_1, ((PyObject *)__pyx_v_oshape)}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1985, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1990, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -23407,19 +23578,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_1, ((PyObject *)__pyx_v_oshape)}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1985, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1990, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1985, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1990, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_1); __pyx_t_1 = NULL; __Pyx_INCREF(((PyObject *)__pyx_v_oshape)); __Pyx_GIVEREF(((PyObject *)__pyx_v_oshape)); PyTuple_SET_ITEM(__pyx_t_8, 0+1, ((PyObject *)__pyx_v_oshape)); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1985, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1990, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } @@ -23436,14 +23607,14 @@ } } if (!__pyx_t_2) { - __pyx_t_7 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1985, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1990, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_7); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_3}; - __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1985, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1990, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -23452,43 +23623,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_3}; - __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1985, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1990, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1985, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1990, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1985, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1990, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_7); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1985, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_7); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1990, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":1986 + /* "mtrand.pyx":1991 * * if np.any(np.signbit(oshape)): * raise ValueError("shape < 0") # <<<<<<<<<<<<<< * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") */ - __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__79, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1986, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__81, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1991, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_Raise(__pyx_t_7, 0, 0, 0); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __PYX_ERR(0, 1986, __pyx_L1_error) + __PYX_ERR(0, 1991, __pyx_L1_error) - /* "mtrand.pyx":1985 + /* "mtrand.pyx":1990 * fscale, self.lock) * * if np.any(np.signbit(oshape)): # <<<<<<<<<<<<<< @@ -23497,21 +23668,21 @@ */ } - /* "mtrand.pyx":1987 + /* "mtrand.pyx":1992 * if np.any(np.signbit(oshape)): * raise ValueError("shape < 0") * if np.any(np.signbit(oscale)): # <<<<<<<<<<<<<< * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale, */ - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1987, __pyx_L1_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_any); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1987, __pyx_L1_error) + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_any); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1987, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 1992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_signbit); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1987, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_signbit); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; @@ -23525,13 +23696,13 @@ } } if (!__pyx_t_3) { - __pyx_t_6 = __Pyx_PyObject_CallOneArg(__pyx_t_2, ((PyObject *)__pyx_v_oscale)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1987, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_CallOneArg(__pyx_t_2, ((PyObject *)__pyx_v_oscale)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_3, ((PyObject *)__pyx_v_oscale)}; - __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1987, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1992, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_6); } else @@ -23539,19 +23710,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_3, ((PyObject *)__pyx_v_oscale)}; - __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1987, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1992, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_6); } else #endif { - __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1987, __pyx_L1_error) + __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_INCREF(((PyObject *)__pyx_v_oscale)); __Pyx_GIVEREF(((PyObject *)__pyx_v_oscale)); PyTuple_SET_ITEM(__pyx_t_1, 0+1, ((PyObject *)__pyx_v_oscale)); - __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1987, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 1992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } @@ -23568,14 +23739,14 @@ } } if (!__pyx_t_2) { - __pyx_t_7 = __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1987, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1992, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_7); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_8)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_6}; - __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_8, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1987, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_8, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1992, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; @@ -23584,43 +23755,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_8)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_6}; - __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_8, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1987, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_8, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1992, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { - __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1987, __pyx_L1_error) + __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_1, 0+1, __pyx_t_6); __pyx_t_6 = 0; - __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_1, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1987, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_1, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } } __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_7); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1987, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_7); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 1992, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":1988 + /* "mtrand.pyx":1993 * raise ValueError("shape < 0") * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale, * self.lock) */ - __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__80, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1988, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__82, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 1993, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_Raise(__pyx_t_7, 0, 0, 0); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __PYX_ERR(0, 1988, __pyx_L1_error) + __PYX_ERR(0, 1993, __pyx_L1_error) - /* "mtrand.pyx":1987 + /* "mtrand.pyx":1992 * if np.any(np.signbit(oshape)): * raise ValueError("shape < 0") * if np.any(np.signbit(oscale)): # <<<<<<<<<<<<<< @@ -23629,7 +23800,7 @@ */ } - /* "mtrand.pyx":1989 + /* "mtrand.pyx":1994 * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale, # <<<<<<<<<<<<<< @@ -23638,7 +23809,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":1990 + /* "mtrand.pyx":1995 * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale, * self.lock) # <<<<<<<<<<<<<< @@ -23648,21 +23819,21 @@ __pyx_t_7 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_7); - /* "mtrand.pyx":1989 + /* "mtrand.pyx":1994 * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_8 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_gamma, __pyx_v_size, __pyx_v_oshape, __pyx_v_oscale, __pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1989, __pyx_L1_error) + __pyx_t_8 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_gamma, __pyx_v_size, __pyx_v_oshape, __pyx_v_oscale, __pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 1994, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_r = __pyx_t_8; __pyx_t_8 = 0; goto __pyx_L0; - /* "mtrand.pyx":1896 + /* "mtrand.pyx":1901 * oshape, self.lock) * * def gamma(self, shape, scale=1.0, size=None): # <<<<<<<<<<<<<< @@ -23688,7 +23859,7 @@ return __pyx_r; } -/* "mtrand.pyx":1992 +/* "mtrand.pyx":1997 * self.lock) * * def f(self, dfnum, dfden, size=None): # <<<<<<<<<<<<<< @@ -23698,7 +23869,7 @@ /* Python wrapper */ static PyObject *__pyx_pw_6mtrand_11RandomState_49f(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6mtrand_11RandomState_48f[] = "\n f(dfnum, dfden, size=None)\n\n Draw samples from an F distribution.\n\n Samples are drawn from an F distribution with specified parameters,\n `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of\n freedom in denominator), where both parameters should be greater than\n zero.\n\n The random variate of the F distribution (also known as the\n Fisher distribution) is a continuous probability distribution\n that arises in ANOVA tests, and is the ratio of two chi-square\n variates.\n\n Parameters\n ----------\n dfnum : int or array_like of ints\n Degrees of freedom in numerator. Should be greater than zero.\n dfden : int or array_like of ints\n Degrees of freedom in denominator. Should be greater than zero.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``dfnum`` and ``dfden`` are both scalars.\n Otherwise, ``np.broadcast(dfnum, dfden).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized Fisher distribution.\n\n See Also\n --------\n scipy.stats.f : probability density function, distribution or\n cumulative density function, etc.\n\n Notes\n -----\n The F statistic is used to compare in-group variances to between-group\n variances. Calculating the distribution depends on the sampling, and\n so it is a function of the respective degrees of freedom in the\n problem. The variable `dfnum` is the number of samples minus one, the\n between-groups degrees of freedom, while `dfden` is the within-groups\n degrees of freedom, the sum of the number of samples in each ""group\n minus the number of groups.\n\n References\n ----------\n .. [1] Glantz, Stanton A. \"Primer of Biostatistics.\", McGraw-Hill,\n Fifth Edition, 2002.\n .. [2] Wikipedia, \"F-distribution\",\n http://en.wikipedia.org/wiki/F-distribution\n\n Examples\n --------\n An example from Glantz[1], pp 47-40:\n\n Two groups, children of diabetics (25 people) and children from people\n without diabetes (25 controls). Fasting blood glucose was measured,\n case group had a mean value of 86.1, controls had a mean value of\n 82.2. Standard deviations were 2.09 and 2.49 respectively. Are these\n data consistent with the null hypothesis that the parents diabetic\n status does not affect their children's blood glucose levels?\n Calculating the F statistic from the data gives a value of 36.01.\n\n Draw samples from the distribution:\n\n >>> dfnum = 1. # between group degrees of freedom\n >>> dfden = 48. # within groups degrees of freedom\n >>> s = np.random.f(dfnum, dfden, 1000)\n\n The lower bound for the top 1% of the samples is :\n\n >>> sort(s)[-10]\n 7.61988120985\n\n So there is about a 1% chance that the F statistic will exceed 7.62,\n the measured value is 36, so the null hypothesis is rejected at the 1%\n level.\n\n "; +static char __pyx_doc_6mtrand_11RandomState_48f[] = "\n f(dfnum, dfden, size=None)\n\n Draw samples from an F distribution.\n\n Samples are drawn from an F distribution with specified parameters,\n `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of\n freedom in denominator), where both parameters should be greater than\n zero.\n\n The random variate of the F distribution (also known as the\n Fisher distribution) is a continuous probability distribution\n that arises in ANOVA tests, and is the ratio of two chi-square\n variates.\n\n Parameters\n ----------\n dfnum : float or array_like of floats\n Degrees of freedom in numerator, should be > 0.\n dfden : float or array_like of float\n Degrees of freedom in denominator, should be > 0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``dfnum`` and ``dfden`` are both scalars.\n Otherwise, ``np.broadcast(dfnum, dfden).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized Fisher distribution.\n\n See Also\n --------\n scipy.stats.f : probability density function, distribution or\n cumulative density function, etc.\n\n Notes\n -----\n The F statistic is used to compare in-group variances to between-group\n variances. Calculating the distribution depends on the sampling, and\n so it is a function of the respective degrees of freedom in the\n problem. The variable `dfnum` is the number of samples minus one, the\n between-groups degrees of freedom, while `dfden` is the within-groups\n degrees of freedom, the sum of the number of samples in each group\n minus ""the number of groups.\n\n References\n ----------\n .. [1] Glantz, Stanton A. \"Primer of Biostatistics.\", McGraw-Hill,\n Fifth Edition, 2002.\n .. [2] Wikipedia, \"F-distribution\",\n http://en.wikipedia.org/wiki/F-distribution\n\n Examples\n --------\n An example from Glantz[1], pp 47-40:\n\n Two groups, children of diabetics (25 people) and children from people\n without diabetes (25 controls). Fasting blood glucose was measured,\n case group had a mean value of 86.1, controls had a mean value of\n 82.2. Standard deviations were 2.09 and 2.49 respectively. Are these\n data consistent with the null hypothesis that the parents diabetic\n status does not affect their children's blood glucose levels?\n Calculating the F statistic from the data gives a value of 36.01.\n\n Draw samples from the distribution:\n\n >>> dfnum = 1. # between group degrees of freedom\n >>> dfden = 48. # within groups degrees of freedom\n >>> s = np.random.f(dfnum, dfden, 1000)\n\n The lower bound for the top 1% of the samples is :\n\n >>> sort(s)[-10]\n 7.61988120985\n\n So there is about a 1% chance that the F statistic will exceed 7.62,\n the measured value is 36, so the null hypothesis is rejected at the 1%\n level.\n\n "; static PyObject *__pyx_pw_6mtrand_11RandomState_49f(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_dfnum = 0; PyObject *__pyx_v_dfden = 0; @@ -23726,23 +23897,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dfnum)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dfnum)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dfden)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dfden)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("f", 0, 2, 3, 1); __PYX_ERR(0, 1992, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("f", 0, 2, 3, 1); __PYX_ERR(0, 1997, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "f") < 0)) __PYX_ERR(0, 1992, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "f") < 0)) __PYX_ERR(0, 1997, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -23760,7 +23931,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("f", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1992, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("f", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 1997, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.f", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -23791,14 +23962,14 @@ PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("f", 0); - /* "mtrand.pyx":2078 + /* "mtrand.pyx":2083 * cdef double fdfnum, fdfden * * odfnum = PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * odfden = PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2078, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2083, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -23806,14 +23977,14 @@ __pyx_v_odfnum = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":2079 + /* "mtrand.pyx":2084 * * odfnum = PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * odfden = PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if odfnum.shape == odfden.shape == (): */ - __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2079, __pyx_L1_error) + __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); @@ -23821,49 +23992,49 @@ __pyx_v_odfden = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":2081 + /* "mtrand.pyx":2086 * odfden = PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if odfnum.shape == odfden.shape == (): # <<<<<<<<<<<<<< * fdfnum = PyFloat_AsDouble(dfnum) * fdfden = PyFloat_AsDouble(dfden) */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_odfnum), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2081, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_odfnum), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2086, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_odfden), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2081, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_odfden), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2086, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2081, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2086, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2081, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2086, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2081, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2086, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { - /* "mtrand.pyx":2082 + /* "mtrand.pyx":2087 * * if odfnum.shape == odfden.shape == (): * fdfnum = PyFloat_AsDouble(dfnum) # <<<<<<<<<<<<<< * fdfden = PyFloat_AsDouble(dfden) * */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_dfnum); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 2082, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_dfnum); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 2087, __pyx_L1_error) __pyx_v_fdfnum = __pyx_t_5; - /* "mtrand.pyx":2083 + /* "mtrand.pyx":2088 * if odfnum.shape == odfden.shape == (): * fdfnum = PyFloat_AsDouble(dfnum) * fdfden = PyFloat_AsDouble(dfden) # <<<<<<<<<<<<<< * * if fdfnum <= 0: */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_dfden); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 2083, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_dfden); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 2088, __pyx_L1_error) __pyx_v_fdfden = __pyx_t_5; - /* "mtrand.pyx":2085 + /* "mtrand.pyx":2090 * fdfden = PyFloat_AsDouble(dfden) * * if fdfnum <= 0: # <<<<<<<<<<<<<< @@ -23871,22 +24042,22 @@ * if fdfden <= 0: */ __pyx_t_4 = ((__pyx_v_fdfnum <= 0.0) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":2086 + /* "mtrand.pyx":2091 * * if fdfnum <= 0: * raise ValueError("dfnum <= 0") # <<<<<<<<<<<<<< * if fdfden <= 0: * raise ValueError("dfden <= 0") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__81, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2086, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__83, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2091, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 2086, __pyx_L1_error) + __PYX_ERR(0, 2091, __pyx_L1_error) - /* "mtrand.pyx":2085 + /* "mtrand.pyx":2090 * fdfden = PyFloat_AsDouble(dfden) * * if fdfnum <= 0: # <<<<<<<<<<<<<< @@ -23895,7 +24066,7 @@ */ } - /* "mtrand.pyx":2087 + /* "mtrand.pyx":2092 * if fdfnum <= 0: * raise ValueError("dfnum <= 0") * if fdfden <= 0: # <<<<<<<<<<<<<< @@ -23903,22 +24074,22 @@ * return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, */ __pyx_t_4 = ((__pyx_v_fdfden <= 0.0) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":2088 + /* "mtrand.pyx":2093 * raise ValueError("dfnum <= 0") * if fdfden <= 0: * raise ValueError("dfden <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, * fdfden, self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__82, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2088, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__84, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2093, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 2088, __pyx_L1_error) + __PYX_ERR(0, 2093, __pyx_L1_error) - /* "mtrand.pyx":2087 + /* "mtrand.pyx":2092 * if fdfnum <= 0: * raise ValueError("dfnum <= 0") * if fdfden <= 0: # <<<<<<<<<<<<<< @@ -23927,7 +24098,7 @@ */ } - /* "mtrand.pyx":2089 + /* "mtrand.pyx":2094 * if fdfden <= 0: * raise ValueError("dfden <= 0") * return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, # <<<<<<<<<<<<<< @@ -23936,7 +24107,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2090 + /* "mtrand.pyx":2095 * raise ValueError("dfden <= 0") * return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, * fdfden, self.lock) # <<<<<<<<<<<<<< @@ -23946,21 +24117,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":2089 + /* "mtrand.pyx":2094 * if fdfden <= 0: * raise ValueError("dfden <= 0") * return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, # <<<<<<<<<<<<<< * fdfden, self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_f, __pyx_v_size, __pyx_v_fdfnum, __pyx_v_fdfden, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2089, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_f, __pyx_v_size, __pyx_v_fdfnum, __pyx_v_fdfden, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2094, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":2081 + /* "mtrand.pyx":2086 * odfden = PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if odfnum.shape == odfden.shape == (): # <<<<<<<<<<<<<< @@ -23969,21 +24140,21 @@ */ } - /* "mtrand.pyx":2092 + /* "mtrand.pyx":2097 * fdfden, self.lock) * * if np.any(np.less_equal(odfnum, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("dfnum <= 0") * if np.any(np.less_equal(odfden, 0.0)): */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2092, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2092, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2092, __pyx_L1_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2092, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; @@ -24001,7 +24172,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_odfnum), __pyx_float_0_0}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2092, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2097, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -24009,13 +24180,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_odfnum), __pyx_float_0_0}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2092, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2097, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2092, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -24026,7 +24197,7 @@ __Pyx_INCREF(__pyx_float_0_0); __Pyx_GIVEREF(__pyx_float_0_0); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_float_0_0); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2092, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -24042,14 +24213,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2092, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2097, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2092, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2097, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -24058,43 +24229,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2092, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2097, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2092, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2092, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2092, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2097, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":2093 + /* "mtrand.pyx":2098 * * if np.any(np.less_equal(odfnum, 0.0)): * raise ValueError("dfnum <= 0") # <<<<<<<<<<<<<< * if np.any(np.less_equal(odfden, 0.0)): * raise ValueError("dfden <= 0") */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__83, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2093, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__85, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2098, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 2093, __pyx_L1_error) + __PYX_ERR(0, 2098, __pyx_L1_error) - /* "mtrand.pyx":2092 + /* "mtrand.pyx":2097 * fdfden, self.lock) * * if np.any(np.less_equal(odfnum, 0.0)): # <<<<<<<<<<<<<< @@ -24103,21 +24274,21 @@ */ } - /* "mtrand.pyx":2094 + /* "mtrand.pyx":2099 * if np.any(np.less_equal(odfnum, 0.0)): * raise ValueError("dfnum <= 0") * if np.any(np.less_equal(odfden, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("dfden <= 0") * return cont2_array(self.internal_state, rk_f, size, odfnum, odfden, */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2094, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2099, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2094, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2099, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2094, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2099, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2094, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2099, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; @@ -24135,7 +24306,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_odfden), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2094, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2099, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -24143,13 +24314,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_odfden), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2094, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2099, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2094, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2099, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_3) { __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); __pyx_t_3 = NULL; @@ -24160,7 +24331,7 @@ __Pyx_INCREF(__pyx_float_0_0); __Pyx_GIVEREF(__pyx_float_0_0); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_8, __pyx_float_0_0); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2094, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2099, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } @@ -24176,14 +24347,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2094, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2099, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2094, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2099, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -24192,43 +24363,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2094, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2099, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2094, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2099, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2094, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2099, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2094, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2099, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":2095 + /* "mtrand.pyx":2100 * raise ValueError("dfnum <= 0") * if np.any(np.less_equal(odfden, 0.0)): * raise ValueError("dfden <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_f, size, odfnum, odfden, * self.lock) */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__84, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2095, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__86, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2100, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 2095, __pyx_L1_error) + __PYX_ERR(0, 2100, __pyx_L1_error) - /* "mtrand.pyx":2094 + /* "mtrand.pyx":2099 * if np.any(np.less_equal(odfnum, 0.0)): * raise ValueError("dfnum <= 0") * if np.any(np.less_equal(odfden, 0.0)): # <<<<<<<<<<<<<< @@ -24237,7 +24408,7 @@ */ } - /* "mtrand.pyx":2096 + /* "mtrand.pyx":2101 * if np.any(np.less_equal(odfden, 0.0)): * raise ValueError("dfden <= 0") * return cont2_array(self.internal_state, rk_f, size, odfnum, odfden, # <<<<<<<<<<<<<< @@ -24246,7 +24417,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2097 + /* "mtrand.pyx":2102 * raise ValueError("dfden <= 0") * return cont2_array(self.internal_state, rk_f, size, odfnum, odfden, * self.lock) # <<<<<<<<<<<<<< @@ -24256,21 +24427,21 @@ __pyx_t_2 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_2); - /* "mtrand.pyx":2096 + /* "mtrand.pyx":2101 * if np.any(np.less_equal(odfden, 0.0)): * raise ValueError("dfden <= 0") * return cont2_array(self.internal_state, rk_f, size, odfnum, odfden, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_9 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_f, __pyx_v_size, __pyx_v_odfnum, __pyx_v_odfden, __pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2096, __pyx_L1_error) + __pyx_t_9 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_f, __pyx_v_size, __pyx_v_odfnum, __pyx_v_odfden, __pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; - /* "mtrand.pyx":1992 + /* "mtrand.pyx":1997 * self.lock) * * def f(self, dfnum, dfden, size=None): # <<<<<<<<<<<<<< @@ -24296,7 +24467,7 @@ return __pyx_r; } -/* "mtrand.pyx":2099 +/* "mtrand.pyx":2104 * self.lock) * * def noncentral_f(self, dfnum, dfden, nonc, size=None): # <<<<<<<<<<<<<< @@ -24306,7 +24477,7 @@ /* Python wrapper */ static PyObject *__pyx_pw_6mtrand_11RandomState_51noncentral_f(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6mtrand_11RandomState_50noncentral_f[] = "\n noncentral_f(dfnum, dfden, nonc, size=None)\n\n Draw samples from the noncentral F distribution.\n\n Samples are drawn from an F distribution with specified parameters,\n `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of\n freedom in denominator), where both parameters > 1.\n `nonc` is the non-centrality parameter.\n\n Parameters\n ----------\n dfnum : int or array_like of ints\n Parameter, should be > 1.\n dfden : int or array_like of ints\n Parameter, should be > 1.\n nonc : float or array_like of floats\n Parameter, should be >= 0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``dfnum``, ``dfden``, and ``nonc``\n are all scalars. Otherwise, ``np.broadcast(dfnum, dfden, nonc).size``\n samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized noncentral Fisher distribution.\n\n Notes\n -----\n When calculating the power of an experiment (power = probability of\n rejecting the null hypothesis when a specific alternative is true) the\n non-central F statistic becomes important. When the null hypothesis is\n true, the F statistic follows a central F distribution. When the null\n hypothesis is not true, then it follows a non-central F statistic.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Noncentral F-Distribution.\"\n From MathWorld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/NoncentralF-Distribution.html\n .. [2] Wikipedia, \"Noncentral F-distribution\",\n http://en.wikipedia.org/wiki/Noncentral_F-dis""tribution\n\n Examples\n --------\n In a study, testing for a specific alternative to the null hypothesis\n requires use of the Noncentral F distribution. We need to calculate the\n area in the tail of the distribution that exceeds the value of the F\n distribution for the null hypothesis. We'll plot the two probability\n distributions for comparison.\n\n >>> dfnum = 3 # between group deg of freedom\n >>> dfden = 20 # within groups degrees of freedom\n >>> nonc = 3.0\n >>> nc_vals = np.random.noncentral_f(dfnum, dfden, nonc, 1000000)\n >>> NF = np.histogram(nc_vals, bins=50, normed=True)\n >>> c_vals = np.random.f(dfnum, dfden, 1000000)\n >>> F = np.histogram(c_vals, bins=50, normed=True)\n >>> plt.plot(F[1][1:], F[0])\n >>> plt.plot(NF[1][1:], NF[0])\n >>> plt.show()\n\n "; +static char __pyx_doc_6mtrand_11RandomState_50noncentral_f[] = "\n noncentral_f(dfnum, dfden, nonc, size=None)\n\n Draw samples from the noncentral F distribution.\n\n Samples are drawn from an F distribution with specified parameters,\n `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of\n freedom in denominator), where both parameters > 1.\n `nonc` is the non-centrality parameter.\n\n Parameters\n ----------\n dfnum : float or array_like of floats\n Numerator degrees of freedom, should be > 0.\n\n .. versionchanged:: 1.14.0\n Earlier NumPy versions required dfnum > 1.\n dfden : float or array_like of floats\n Denominator degrees of freedom, should be > 0.\n nonc : float or array_like of floats\n Non-centrality parameter, the sum of the squares of the numerator\n means, should be >= 0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``dfnum``, ``dfden``, and ``nonc``\n are all scalars. Otherwise, ``np.broadcast(dfnum, dfden, nonc).size``\n samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized noncentral Fisher distribution.\n\n Notes\n -----\n When calculating the power of an experiment (power = probability of\n rejecting the null hypothesis when a specific alternative is true) the\n non-central F statistic becomes important. When the null hypothesis is\n true, the F statistic follows a central F distribution. When the null\n hypothesis is not true, then it follows a non-central F statistic.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Noncentral F-Distribution.\"\n From MathW""orld--A Wolfram Web Resource.\n http://mathworld.wolfram.com/NoncentralF-Distribution.html\n .. [2] Wikipedia, \"Noncentral F-distribution\",\n http://en.wikipedia.org/wiki/Noncentral_F-distribution\n\n Examples\n --------\n In a study, testing for a specific alternative to the null hypothesis\n requires use of the Noncentral F distribution. We need to calculate the\n area in the tail of the distribution that exceeds the value of the F\n distribution for the null hypothesis. We'll plot the two probability\n distributions for comparison.\n\n >>> dfnum = 3 # between group deg of freedom\n >>> dfden = 20 # within groups degrees of freedom\n >>> nonc = 3.0\n >>> nc_vals = np.random.noncentral_f(dfnum, dfden, nonc, 1000000)\n >>> NF = np.histogram(nc_vals, bins=50, normed=True)\n >>> c_vals = np.random.f(dfnum, dfden, 1000000)\n >>> F = np.histogram(c_vals, bins=50, normed=True)\n >>> plt.plot(F[1][1:], F[0])\n >>> plt.plot(NF[1][1:], NF[0])\n >>> plt.show()\n\n "; static PyObject *__pyx_pw_6mtrand_11RandomState_51noncentral_f(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_dfnum = 0; PyObject *__pyx_v_dfden = 0; @@ -24337,29 +24508,29 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dfnum)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dfnum)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dfden)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dfden)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("noncentral_f", 0, 3, 4, 1); __PYX_ERR(0, 2099, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("noncentral_f", 0, 3, 4, 1); __PYX_ERR(0, 2104, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: - if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nonc)) != 0)) kw_args--; + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_nonc)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("noncentral_f", 0, 3, 4, 2); __PYX_ERR(0, 2099, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("noncentral_f", 0, 3, 4, 2); __PYX_ERR(0, 2104, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "noncentral_f") < 0)) __PYX_ERR(0, 2099, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "noncentral_f") < 0)) __PYX_ERR(0, 2104, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -24379,7 +24550,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("noncentral_f", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2099, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("noncentral_f", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2104, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.noncentral_f", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -24412,14 +24583,14 @@ PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("noncentral_f", 0); - /* "mtrand.pyx":2169 + /* "mtrand.pyx":2178 * cdef double fdfnum, fdfden, fnonc * * odfnum = PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * odfden = PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2169, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -24427,14 +24598,14 @@ __pyx_v_odfnum = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":2170 + /* "mtrand.pyx":2179 * * odfnum = PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * odfden = PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2170, __pyx_L1_error) + __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); @@ -24442,14 +24613,14 @@ __pyx_v_odfden = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":2171 + /* "mtrand.pyx":2180 * odfnum = PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * odfden = PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if odfnum.shape == odfden.shape == ononc.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2171, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2180, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -24457,130 +24628,130 @@ __pyx_v_ononc = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":2173 + /* "mtrand.pyx":2182 * ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if odfnum.shape == odfden.shape == ononc.shape == (): # <<<<<<<<<<<<<< * fdfnum = PyFloat_AsDouble(dfnum) * fdfden = PyFloat_AsDouble(dfden) */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_odfnum), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2173, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_odfnum), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_odfden), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2173, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_odfden), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2173, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2182, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_ononc), __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2173, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_ononc), __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2173, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2182, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2173, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2182, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 2173, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 2182, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_5) { - /* "mtrand.pyx":2174 + /* "mtrand.pyx":2183 * * if odfnum.shape == odfden.shape == ononc.shape == (): * fdfnum = PyFloat_AsDouble(dfnum) # <<<<<<<<<<<<<< * fdfden = PyFloat_AsDouble(dfden) * fnonc = PyFloat_AsDouble(nonc) */ - __pyx_t_6 = PyFloat_AsDouble(__pyx_v_dfnum); if (unlikely(__pyx_t_6 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 2174, __pyx_L1_error) + __pyx_t_6 = PyFloat_AsDouble(__pyx_v_dfnum); if (unlikely(__pyx_t_6 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 2183, __pyx_L1_error) __pyx_v_fdfnum = __pyx_t_6; - /* "mtrand.pyx":2175 + /* "mtrand.pyx":2184 * if odfnum.shape == odfden.shape == ononc.shape == (): * fdfnum = PyFloat_AsDouble(dfnum) * fdfden = PyFloat_AsDouble(dfden) # <<<<<<<<<<<<<< * fnonc = PyFloat_AsDouble(nonc) * */ - __pyx_t_6 = PyFloat_AsDouble(__pyx_v_dfden); if (unlikely(__pyx_t_6 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 2175, __pyx_L1_error) + __pyx_t_6 = PyFloat_AsDouble(__pyx_v_dfden); if (unlikely(__pyx_t_6 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 2184, __pyx_L1_error) __pyx_v_fdfden = __pyx_t_6; - /* "mtrand.pyx":2176 + /* "mtrand.pyx":2185 * fdfnum = PyFloat_AsDouble(dfnum) * fdfden = PyFloat_AsDouble(dfden) * fnonc = PyFloat_AsDouble(nonc) # <<<<<<<<<<<<<< * - * if fdfnum <= 1: + * if fdfnum <= 0: */ - __pyx_t_6 = PyFloat_AsDouble(__pyx_v_nonc); if (unlikely(__pyx_t_6 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 2176, __pyx_L1_error) + __pyx_t_6 = PyFloat_AsDouble(__pyx_v_nonc); if (unlikely(__pyx_t_6 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 2185, __pyx_L1_error) __pyx_v_fnonc = __pyx_t_6; - /* "mtrand.pyx":2178 + /* "mtrand.pyx":2187 * fnonc = PyFloat_AsDouble(nonc) * - * if fdfnum <= 1: # <<<<<<<<<<<<<< - * raise ValueError("dfnum <= 1") + * if fdfnum <= 0: # <<<<<<<<<<<<<< + * raise ValueError("dfnum <= 0") * if fdfden <= 0: */ - __pyx_t_5 = ((__pyx_v_fdfnum <= 1.0) != 0); - if (__pyx_t_5) { + __pyx_t_5 = ((__pyx_v_fdfnum <= 0.0) != 0); + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":2179 + /* "mtrand.pyx":2188 * - * if fdfnum <= 1: - * raise ValueError("dfnum <= 1") # <<<<<<<<<<<<<< + * if fdfnum <= 0: + * raise ValueError("dfnum <= 0") # <<<<<<<<<<<<<< * if fdfden <= 0: * raise ValueError("dfden <= 0") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__85, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2179, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__87, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2188, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 2179, __pyx_L1_error) + __PYX_ERR(0, 2188, __pyx_L1_error) - /* "mtrand.pyx":2178 + /* "mtrand.pyx":2187 * fnonc = PyFloat_AsDouble(nonc) * - * if fdfnum <= 1: # <<<<<<<<<<<<<< - * raise ValueError("dfnum <= 1") + * if fdfnum <= 0: # <<<<<<<<<<<<<< + * raise ValueError("dfnum <= 0") * if fdfden <= 0: */ } - /* "mtrand.pyx":2180 - * if fdfnum <= 1: - * raise ValueError("dfnum <= 1") + /* "mtrand.pyx":2189 + * if fdfnum <= 0: + * raise ValueError("dfnum <= 0") * if fdfden <= 0: # <<<<<<<<<<<<<< * raise ValueError("dfden <= 0") * if fnonc < 0: */ __pyx_t_5 = ((__pyx_v_fdfden <= 0.0) != 0); - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":2181 - * raise ValueError("dfnum <= 1") + /* "mtrand.pyx":2190 + * raise ValueError("dfnum <= 0") * if fdfden <= 0: * raise ValueError("dfden <= 0") # <<<<<<<<<<<<<< * if fnonc < 0: * raise ValueError("nonc < 0") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__86, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2181, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__88, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 2181, __pyx_L1_error) + __PYX_ERR(0, 2190, __pyx_L1_error) - /* "mtrand.pyx":2180 - * if fdfnum <= 1: - * raise ValueError("dfnum <= 1") + /* "mtrand.pyx":2189 + * if fdfnum <= 0: + * raise ValueError("dfnum <= 0") * if fdfden <= 0: # <<<<<<<<<<<<<< * raise ValueError("dfden <= 0") * if fnonc < 0: */ } - /* "mtrand.pyx":2182 + /* "mtrand.pyx":2191 * if fdfden <= 0: * raise ValueError("dfden <= 0") * if fnonc < 0: # <<<<<<<<<<<<<< @@ -24588,22 +24759,22 @@ * return cont3_array_sc(self.internal_state, rk_noncentral_f, size, */ __pyx_t_5 = ((__pyx_v_fnonc < 0.0) != 0); - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":2183 + /* "mtrand.pyx":2192 * raise ValueError("dfden <= 0") * if fnonc < 0: * raise ValueError("nonc < 0") # <<<<<<<<<<<<<< * return cont3_array_sc(self.internal_state, rk_noncentral_f, size, * fdfnum, fdfden, fnonc, self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__87, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2183, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__89, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 2183, __pyx_L1_error) + __PYX_ERR(0, 2192, __pyx_L1_error) - /* "mtrand.pyx":2182 + /* "mtrand.pyx":2191 * if fdfden <= 0: * raise ValueError("dfden <= 0") * if fnonc < 0: # <<<<<<<<<<<<<< @@ -24612,7 +24783,7 @@ */ } - /* "mtrand.pyx":2184 + /* "mtrand.pyx":2193 * if fnonc < 0: * raise ValueError("nonc < 0") * return cont3_array_sc(self.internal_state, rk_noncentral_f, size, # <<<<<<<<<<<<<< @@ -24621,31 +24792,31 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2185 + /* "mtrand.pyx":2194 * raise ValueError("nonc < 0") * return cont3_array_sc(self.internal_state, rk_noncentral_f, size, * fdfnum, fdfden, fnonc, self.lock) # <<<<<<<<<<<<<< * - * if np.any(np.less_equal(odfnum, 1.0)): + * if np.any(np.less_equal(odfnum, 0.0)): */ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":2184 + /* "mtrand.pyx":2193 * if fnonc < 0: * raise ValueError("nonc < 0") * return cont3_array_sc(self.internal_state, rk_noncentral_f, size, # <<<<<<<<<<<<<< * fdfnum, fdfden, fnonc, self.lock) * */ - __pyx_t_1 = __pyx_f_6mtrand_cont3_array_sc(__pyx_v_self->internal_state, rk_noncentral_f, __pyx_v_size, __pyx_v_fdfnum, __pyx_v_fdfden, __pyx_v_fnonc, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2184, __pyx_L1_error) + __pyx_t_1 = __pyx_f_6mtrand_cont3_array_sc(__pyx_v_self->internal_state, rk_noncentral_f, __pyx_v_size, __pyx_v_fdfnum, __pyx_v_fdfden, __pyx_v_fnonc, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2193, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; - /* "mtrand.pyx":2173 + /* "mtrand.pyx":2182 * ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if odfnum.shape == odfden.shape == ononc.shape == (): # <<<<<<<<<<<<<< @@ -24654,21 +24825,21 @@ */ } - /* "mtrand.pyx":2187 + /* "mtrand.pyx":2196 * fdfnum, fdfden, fnonc, self.lock) * - * if np.any(np.less_equal(odfnum, 1.0)): # <<<<<<<<<<<<<< - * raise ValueError("dfnum <= 1") + * if np.any(np.less_equal(odfnum, 0.0)): # <<<<<<<<<<<<<< + * raise ValueError("dfnum <= 0") * if np.any(np.less_equal(odfden, 0.0)): */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2187, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2187, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2187, __pyx_L1_error) + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2187, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; @@ -24685,22 +24856,22 @@ } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { - PyObject *__pyx_temp[3] = {__pyx_t_4, ((PyObject *)__pyx_v_odfnum), __pyx_float_1_0}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2187, __pyx_L1_error) + PyObject *__pyx_temp[3] = {__pyx_t_4, ((PyObject *)__pyx_v_odfnum), __pyx_float_0_0}; + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2196, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { - PyObject *__pyx_temp[3] = {__pyx_t_4, ((PyObject *)__pyx_v_odfnum), __pyx_float_1_0}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2187, __pyx_L1_error) + PyObject *__pyx_temp[3] = {__pyx_t_4, ((PyObject *)__pyx_v_odfnum), __pyx_float_0_0}; + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2196, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2187, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_4); __pyx_t_4 = NULL; @@ -24708,10 +24879,10 @@ __Pyx_INCREF(((PyObject *)__pyx_v_odfnum)); __Pyx_GIVEREF(((PyObject *)__pyx_v_odfnum)); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, ((PyObject *)__pyx_v_odfnum)); - __Pyx_INCREF(__pyx_float_1_0); - __Pyx_GIVEREF(__pyx_float_1_0); - PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_float_1_0); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2187, __pyx_L1_error) + __Pyx_INCREF(__pyx_float_0_0); + __Pyx_GIVEREF(__pyx_float_0_0); + PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_float_0_0); + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -24727,14 +24898,14 @@ } } if (!__pyx_t_7) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2187, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2196, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2187, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2196, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -24743,66 +24914,66 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2187, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2196, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2187, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2187, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 2187, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 2196, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":2188 + /* "mtrand.pyx":2197 * - * if np.any(np.less_equal(odfnum, 1.0)): - * raise ValueError("dfnum <= 1") # <<<<<<<<<<<<<< + * if np.any(np.less_equal(odfnum, 0.0)): + * raise ValueError("dfnum <= 0") # <<<<<<<<<<<<<< * if np.any(np.less_equal(odfden, 0.0)): * raise ValueError("dfden <= 0") */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__88, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2188, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__90, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2197, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 2188, __pyx_L1_error) + __PYX_ERR(0, 2197, __pyx_L1_error) - /* "mtrand.pyx":2187 + /* "mtrand.pyx":2196 * fdfnum, fdfden, fnonc, self.lock) * - * if np.any(np.less_equal(odfnum, 1.0)): # <<<<<<<<<<<<<< - * raise ValueError("dfnum <= 1") + * if np.any(np.less_equal(odfnum, 0.0)): # <<<<<<<<<<<<<< + * raise ValueError("dfnum <= 0") * if np.any(np.less_equal(odfden, 0.0)): */ } - /* "mtrand.pyx":2189 - * if np.any(np.less_equal(odfnum, 1.0)): - * raise ValueError("dfnum <= 1") + /* "mtrand.pyx":2198 + * if np.any(np.less_equal(odfnum, 0.0)): + * raise ValueError("dfnum <= 0") * if np.any(np.less_equal(odfden, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("dfden <= 0") * if np.any(np.less(ononc, 0.0)): */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2189, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2198, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2189, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2198, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2189, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2198, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2189, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2198, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; @@ -24820,7 +24991,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_odfden), __pyx_float_0_0}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2189, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2198, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else @@ -24828,13 +24999,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_odfden), __pyx_float_0_0}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2189, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2198, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif { - __pyx_t_4 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2189, __pyx_L1_error) + __pyx_t_4 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2198, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (__pyx_t_3) { __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = NULL; @@ -24845,7 +25016,7 @@ __Pyx_INCREF(__pyx_float_0_0); __Pyx_GIVEREF(__pyx_float_0_0); PyTuple_SET_ITEM(__pyx_t_4, 1+__pyx_t_8, __pyx_float_0_0); - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2189, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2198, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } @@ -24861,14 +25032,14 @@ } } if (!__pyx_t_7) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2189, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2198, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2189, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2198, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -24877,66 +25048,66 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2189, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2198, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2189, __pyx_L1_error) + __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2198, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2189, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2198, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 2189, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 2198, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":2190 - * raise ValueError("dfnum <= 1") + /* "mtrand.pyx":2199 + * raise ValueError("dfnum <= 0") * if np.any(np.less_equal(odfden, 0.0)): * raise ValueError("dfden <= 0") # <<<<<<<<<<<<<< * if np.any(np.less(ononc, 0.0)): * raise ValueError("nonc < 0") */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__89, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2190, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__91, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 2190, __pyx_L1_error) + __PYX_ERR(0, 2199, __pyx_L1_error) - /* "mtrand.pyx":2189 - * if np.any(np.less_equal(odfnum, 1.0)): - * raise ValueError("dfnum <= 1") + /* "mtrand.pyx":2198 + * if np.any(np.less_equal(odfnum, 0.0)): + * raise ValueError("dfnum <= 0") * if np.any(np.less_equal(odfden, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("dfden <= 0") * if np.any(np.less(ononc, 0.0)): */ } - /* "mtrand.pyx":2191 + /* "mtrand.pyx":2200 * if np.any(np.less_equal(odfden, 0.0)): * raise ValueError("dfden <= 0") * if np.any(np.less(ononc, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("nonc < 0") * return cont3_array(self.internal_state, rk_noncentral_f, size, odfnum, */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2191, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2200, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_any); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2191, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_any); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2200, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2191, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2200, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2191, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2200, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -24954,7 +25125,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_2, ((PyObject *)__pyx_v_ononc), __pyx_float_0_0}; - __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2191, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2200, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_9); } else @@ -24962,13 +25133,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_2, ((PyObject *)__pyx_v_ononc), __pyx_float_0_0}; - __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2191, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2200, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_9); } else #endif { - __pyx_t_3 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2191, __pyx_L1_error) + __pyx_t_3 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2200, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __pyx_t_2 = NULL; @@ -24979,7 +25150,7 @@ __Pyx_INCREF(__pyx_float_0_0); __Pyx_GIVEREF(__pyx_float_0_0); PyTuple_SET_ITEM(__pyx_t_3, 1+__pyx_t_8, __pyx_float_0_0); - __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_3, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2191, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_3, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2200, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } @@ -24995,14 +25166,14 @@ } } if (!__pyx_t_7) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2191, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2200, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_9}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2191, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2200, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -25011,43 +25182,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_9}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2191, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2200, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else #endif { - __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2191, __pyx_L1_error) + __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2200, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_t_9); __pyx_t_9 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2191, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2200, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 2191, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 2200, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":2192 + /* "mtrand.pyx":2201 * raise ValueError("dfden <= 0") * if np.any(np.less(ononc, 0.0)): * raise ValueError("nonc < 0") # <<<<<<<<<<<<<< * return cont3_array(self.internal_state, rk_noncentral_f, size, odfnum, * odfden, ononc, self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__90, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2192, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__92, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 2192, __pyx_L1_error) + __PYX_ERR(0, 2201, __pyx_L1_error) - /* "mtrand.pyx":2191 + /* "mtrand.pyx":2200 * if np.any(np.less_equal(odfden, 0.0)): * raise ValueError("dfden <= 0") * if np.any(np.less(ononc, 0.0)): # <<<<<<<<<<<<<< @@ -25056,7 +25227,7 @@ */ } - /* "mtrand.pyx":2193 + /* "mtrand.pyx":2202 * if np.any(np.less(ononc, 0.0)): * raise ValueError("nonc < 0") * return cont3_array(self.internal_state, rk_noncentral_f, size, odfnum, # <<<<<<<<<<<<<< @@ -25065,7 +25236,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2194 + /* "mtrand.pyx":2203 * raise ValueError("nonc < 0") * return cont3_array(self.internal_state, rk_noncentral_f, size, odfnum, * odfden, ononc, self.lock) # <<<<<<<<<<<<<< @@ -25075,21 +25246,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":2193 + /* "mtrand.pyx":2202 * if np.any(np.less(ononc, 0.0)): * raise ValueError("nonc < 0") * return cont3_array(self.internal_state, rk_noncentral_f, size, odfnum, # <<<<<<<<<<<<<< * odfden, ononc, self.lock) * */ - __pyx_t_4 = __pyx_f_6mtrand_cont3_array(__pyx_v_self->internal_state, rk_noncentral_f, __pyx_v_size, __pyx_v_odfnum, __pyx_v_odfden, __pyx_v_ononc, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2193, __pyx_L1_error) + __pyx_t_4 = __pyx_f_6mtrand_cont3_array(__pyx_v_self->internal_state, rk_noncentral_f, __pyx_v_size, __pyx_v_odfnum, __pyx_v_odfden, __pyx_v_ononc, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 2202, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; - /* "mtrand.pyx":2099 + /* "mtrand.pyx":2104 * self.lock) * * def noncentral_f(self, dfnum, dfden, nonc, size=None): # <<<<<<<<<<<<<< @@ -25116,7 +25287,7 @@ return __pyx_r; } -/* "mtrand.pyx":2196 +/* "mtrand.pyx":2205 * odfden, ononc, self.lock) * * def chisquare(self, df, size=None): # <<<<<<<<<<<<<< @@ -25126,7 +25297,7 @@ /* Python wrapper */ static PyObject *__pyx_pw_6mtrand_11RandomState_53chisquare(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6mtrand_11RandomState_52chisquare[] = "\n chisquare(df, size=None)\n\n Draw samples from a chi-square distribution.\n\n When `df` independent random variables, each with standard normal\n distributions (mean 0, variance 1), are squared and summed, the\n resulting distribution is chi-square (see Notes). This distribution\n is often used in hypothesis testing.\n\n Parameters\n ----------\n df : int or array_like of ints\n Number of degrees of freedom.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``df`` is a scalar. Otherwise,\n ``np.array(df).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized chi-square distribution.\n\n Raises\n ------\n ValueError\n When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)\n is given.\n\n Notes\n -----\n The variable obtained by summing the squares of `df` independent,\n standard normally distributed random variables:\n\n .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i\n\n is chi-square distributed, denoted\n\n .. math:: Q \\sim \\chi^2_k.\n\n The probability density function of the chi-squared distribution is\n\n .. math:: p(x) = \\frac{(1/2)^{k/2}}{\\Gamma(k/2)}\n x^{k/2 - 1} e^{-x/2},\n\n where :math:`\\Gamma` is the gamma function,\n\n .. math:: \\Gamma(x) = \\int_0^{-\\infty} t^{x - 1} e^{-t} dt.\n\n References\n ----------\n .. [1] NIST \"Engineering Statistics Handbook\"\n http://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n\n Examples\n --------\n >>> np.random.chisquare(""2,4)\n array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272])\n\n "; +static char __pyx_doc_6mtrand_11RandomState_52chisquare[] = "\n chisquare(df, size=None)\n\n Draw samples from a chi-square distribution.\n\n When `df` independent random variables, each with standard normal\n distributions (mean 0, variance 1), are squared and summed, the\n resulting distribution is chi-square (see Notes). This distribution\n is often used in hypothesis testing.\n\n Parameters\n ----------\n df : float or array_like of floats\n Number of degrees of freedom, should be > 0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``df`` is a scalar. Otherwise,\n ``np.array(df).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized chi-square distribution.\n\n Raises\n ------\n ValueError\n When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)\n is given.\n\n Notes\n -----\n The variable obtained by summing the squares of `df` independent,\n standard normally distributed random variables:\n\n .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i\n\n is chi-square distributed, denoted\n\n .. math:: Q \\sim \\chi^2_k.\n\n The probability density function of the chi-squared distribution is\n\n .. math:: p(x) = \\frac{(1/2)^{k/2}}{\\Gamma(k/2)}\n x^{k/2 - 1} e^{-x/2},\n\n where :math:`\\Gamma` is the gamma function,\n\n .. math:: \\Gamma(x) = \\int_0^{-\\infty} t^{x - 1} e^{-t} dt.\n\n References\n ----------\n .. [1] NIST \"Engineering Statistics Handbook\"\n http://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm\n\n Examples\n --------\n >>> n""p.random.chisquare(2,4)\n array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272])\n\n "; static PyObject *__pyx_pw_6mtrand_11RandomState_53chisquare(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_df = 0; PyObject *__pyx_v_size = 0; @@ -25151,17 +25322,17 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_df)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_df)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "chisquare") < 0)) __PYX_ERR(0, 2196, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "chisquare") < 0)) __PYX_ERR(0, 2205, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -25177,7 +25348,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("chisquare", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2196, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("chisquare", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2205, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -25206,14 +25377,14 @@ PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("chisquare", 0); - /* "mtrand.pyx":2262 + /* "mtrand.pyx":2271 * cdef double fdf * * odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if odf.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_df, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2262, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_df, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2271, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -25221,32 +25392,32 @@ __pyx_v_odf = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":2264 + /* "mtrand.pyx":2273 * odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if odf.shape == (): # <<<<<<<<<<<<<< * fdf = PyFloat_AsDouble(df) * */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_odf), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2264, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_odf), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2273, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2264, __pyx_L1_error) + __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2273, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2264, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2273, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_3) { - /* "mtrand.pyx":2265 + /* "mtrand.pyx":2274 * * if odf.shape == (): * fdf = PyFloat_AsDouble(df) # <<<<<<<<<<<<<< * * if fdf <= 0: */ - __pyx_t_4 = PyFloat_AsDouble(__pyx_v_df); if (unlikely(__pyx_t_4 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 2265, __pyx_L1_error) + __pyx_t_4 = PyFloat_AsDouble(__pyx_v_df); if (unlikely(__pyx_t_4 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 2274, __pyx_L1_error) __pyx_v_fdf = __pyx_t_4; - /* "mtrand.pyx":2267 + /* "mtrand.pyx":2276 * fdf = PyFloat_AsDouble(df) * * if fdf <= 0: # <<<<<<<<<<<<<< @@ -25254,22 +25425,22 @@ * return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf, */ __pyx_t_3 = ((__pyx_v_fdf <= 0.0) != 0); - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":2268 + /* "mtrand.pyx":2277 * * if fdf <= 0: * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf, * self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__91, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2268, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__93, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2277, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 2268, __pyx_L1_error) + __PYX_ERR(0, 2277, __pyx_L1_error) - /* "mtrand.pyx":2267 + /* "mtrand.pyx":2276 * fdf = PyFloat_AsDouble(df) * * if fdf <= 0: # <<<<<<<<<<<<<< @@ -25278,7 +25449,7 @@ */ } - /* "mtrand.pyx":2269 + /* "mtrand.pyx":2278 * if fdf <= 0: * raise ValueError("df <= 0") * return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf, # <<<<<<<<<<<<<< @@ -25287,7 +25458,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2270 + /* "mtrand.pyx":2279 * raise ValueError("df <= 0") * return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf, * self.lock) # <<<<<<<<<<<<<< @@ -25297,21 +25468,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":2269 + /* "mtrand.pyx":2278 * if fdf <= 0: * raise ValueError("df <= 0") * return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_chisquare, __pyx_v_size, __pyx_v_fdf, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2269, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_chisquare, __pyx_v_size, __pyx_v_fdf, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":2264 + /* "mtrand.pyx":2273 * odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if odf.shape == (): # <<<<<<<<<<<<<< @@ -25320,21 +25491,21 @@ */ } - /* "mtrand.pyx":2272 + /* "mtrand.pyx":2281 * self.lock) * * if np.any(np.less_equal(odf, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("df <= 0") * return cont1_array(self.internal_state, rk_chisquare, size, odf, */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2272, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2272, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2272, __pyx_L1_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2272, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; @@ -25352,7 +25523,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_odf), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2272, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2281, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -25360,13 +25531,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_odf), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2272, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2281, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2272, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -25377,7 +25548,7 @@ __Pyx_INCREF(__pyx_float_0_0); __Pyx_GIVEREF(__pyx_float_0_0); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_float_0_0); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2272, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -25393,14 +25564,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2272, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2281, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2272, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2281, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -25409,43 +25580,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2272, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2281, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2272, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2272, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2272, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2281, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":2273 + /* "mtrand.pyx":2282 * * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_chisquare, size, odf, * self.lock) */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__92, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2273, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__94, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 2273, __pyx_L1_error) + __PYX_ERR(0, 2282, __pyx_L1_error) - /* "mtrand.pyx":2272 + /* "mtrand.pyx":2281 * self.lock) * * if np.any(np.less_equal(odf, 0.0)): # <<<<<<<<<<<<<< @@ -25454,7 +25625,7 @@ */ } - /* "mtrand.pyx":2274 + /* "mtrand.pyx":2283 * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") * return cont1_array(self.internal_state, rk_chisquare, size, odf, # <<<<<<<<<<<<<< @@ -25463,7 +25634,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2275 + /* "mtrand.pyx":2284 * raise ValueError("df <= 0") * return cont1_array(self.internal_state, rk_chisquare, size, odf, * self.lock) # <<<<<<<<<<<<<< @@ -25473,21 +25644,21 @@ __pyx_t_2 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_2); - /* "mtrand.pyx":2274 + /* "mtrand.pyx":2283 * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") * return cont1_array(self.internal_state, rk_chisquare, size, odf, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_5 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_chisquare, __pyx_v_size, __pyx_v_odf, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2274, __pyx_L1_error) + __pyx_t_5 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_chisquare, __pyx_v_size, __pyx_v_odf, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; - /* "mtrand.pyx":2196 + /* "mtrand.pyx":2205 * odfden, ononc, self.lock) * * def chisquare(self, df, size=None): # <<<<<<<<<<<<<< @@ -25512,7 +25683,7 @@ return __pyx_r; } -/* "mtrand.pyx":2277 +/* "mtrand.pyx":2286 * self.lock) * * def noncentral_chisquare(self, df, nonc, size=None): # <<<<<<<<<<<<<< @@ -25522,7 +25693,7 @@ /* Python wrapper */ static PyObject *__pyx_pw_6mtrand_11RandomState_55noncentral_chisquare(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6mtrand_11RandomState_54noncentral_chisquare[] = "\n noncentral_chisquare(df, nonc, size=None)\n\n Draw samples from a noncentral chi-square distribution.\n\n The noncentral :math:`\\chi^2` distribution is a generalisation of\n the :math:`\\chi^2` distribution.\n\n Parameters\n ----------\n df : int or array_like of ints\n Degrees of freedom, should be > 0 as of NumPy 1.10.0,\n should be > 1 for earlier versions.\n nonc : float or array_like of floats\n Non-centrality, should be non-negative.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``df`` and ``nonc`` are both scalars.\n Otherwise, ``np.broadcast(df, nonc).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized noncentral chi-square distribution.\n\n Notes\n -----\n The probability density function for the noncentral Chi-square\n distribution is\n\n .. math:: P(x;df,nonc) = \\sum^{\\infty}_{i=0}\n \\frac{e^{-nonc/2}(nonc/2)^{i}}{i!}\n \\P_{Y_{df+2i}}(x),\n\n where :math:`Y_{q}` is the Chi-square with q degrees of freedom.\n\n In Delhi (2007), it is noted that the noncentral chi-square is\n useful in bombing and coverage problems, the probability of\n killing the point target given by the noncentral chi-squared\n distribution.\n\n References\n ----------\n .. [1] Delhi, M.S. Holla, \"On a noncentral chi-square distribution in\n the analysis of weapon systems effectiveness\", Metrika,\n Volume 15, Number 1 / December, 1970.\n .. [2] Wikipedia, \"Noncentral chi-square distribution\"\n ""http://en.wikipedia.org/wiki/Noncentral_chi-square_distribution\n\n Examples\n --------\n Draw values from the distribution and plot the histogram\n\n >>> import matplotlib.pyplot as plt\n >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),\n ... bins=200, normed=True)\n >>> plt.show()\n\n Draw values from a noncentral chisquare with very small noncentrality,\n and compare to a chisquare.\n\n >>> plt.figure()\n >>> values = plt.hist(np.random.noncentral_chisquare(3, .0000001, 100000),\n ... bins=np.arange(0., 25, .1), normed=True)\n >>> values2 = plt.hist(np.random.chisquare(3, 100000),\n ... bins=np.arange(0., 25, .1), normed=True)\n >>> plt.plot(values[1][0:-1], values[0]-values2[0], 'ob')\n >>> plt.show()\n\n Demonstrate how large values of non-centrality lead to a more symmetric\n distribution.\n\n >>> plt.figure()\n >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),\n ... bins=200, normed=True)\n >>> plt.show()\n\n "; +static char __pyx_doc_6mtrand_11RandomState_54noncentral_chisquare[] = "\n noncentral_chisquare(df, nonc, size=None)\n\n Draw samples from a noncentral chi-square distribution.\n\n The noncentral :math:`\\chi^2` distribution is a generalisation of\n the :math:`\\chi^2` distribution.\n\n Parameters\n ----------\n df : float or array_like of floats\n Degrees of freedom, should be > 0.\n\n .. versionchanged:: 1.10.0\n Earlier NumPy versions required dfnum > 1.\n nonc : float or array_like of floats\n Non-centrality, should be non-negative.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``df`` and ``nonc`` are both scalars.\n Otherwise, ``np.broadcast(df, nonc).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized noncentral chi-square distribution.\n\n Notes\n -----\n The probability density function for the noncentral Chi-square\n distribution is\n\n .. math:: P(x;df,nonc) = \\sum^{\\infty}_{i=0}\n \\frac{e^{-nonc/2}(nonc/2)^{i}}{i!}\n \\P_{Y_{df+2i}}(x),\n\n where :math:`Y_{q}` is the Chi-square with q degrees of freedom.\n\n In Delhi (2007), it is noted that the noncentral chi-square is\n useful in bombing and coverage problems, the probability of\n killing the point target given by the noncentral chi-squared\n distribution.\n\n References\n ----------\n .. [1] Delhi, M.S. Holla, \"On a noncentral chi-square distribution in\n the analysis of weapon systems effectiveness\", Metrika,\n Volume 15, Number 1 / December, 1970.\n .. [2] Wikipedia, \"Noncentral chi-s""quare distribution\"\n http://en.wikipedia.org/wiki/Noncentral_chi-square_distribution\n\n Examples\n --------\n Draw values from the distribution and plot the histogram\n\n >>> import matplotlib.pyplot as plt\n >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),\n ... bins=200, normed=True)\n >>> plt.show()\n\n Draw values from a noncentral chisquare with very small noncentrality,\n and compare to a chisquare.\n\n >>> plt.figure()\n >>> values = plt.hist(np.random.noncentral_chisquare(3, .0000001, 100000),\n ... bins=np.arange(0., 25, .1), normed=True)\n >>> values2 = plt.hist(np.random.chisquare(3, 100000),\n ... bins=np.arange(0., 25, .1), normed=True)\n >>> plt.plot(values[1][0:-1], values[0]-values2[0], 'ob')\n >>> plt.show()\n\n Demonstrate how large values of non-centrality lead to a more symmetric\n distribution.\n\n >>> plt.figure()\n >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),\n ... bins=200, normed=True)\n >>> plt.show()\n\n "; static PyObject *__pyx_pw_6mtrand_11RandomState_55noncentral_chisquare(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_df = 0; PyObject *__pyx_v_nonc = 0; @@ -25550,23 +25721,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_df)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_df)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nonc)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_nonc)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("noncentral_chisquare", 0, 2, 3, 1); __PYX_ERR(0, 2277, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("noncentral_chisquare", 0, 2, 3, 1); __PYX_ERR(0, 2286, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "noncentral_chisquare") < 0)) __PYX_ERR(0, 2277, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "noncentral_chisquare") < 0)) __PYX_ERR(0, 2286, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -25584,7 +25755,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("noncentral_chisquare", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2277, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("noncentral_chisquare", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2286, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.noncentral_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -25615,14 +25786,14 @@ PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("noncentral_chisquare", 0); - /* "mtrand.pyx":2360 + /* "mtrand.pyx":2371 * cdef double fdf, fnonc * * odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_df, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2360, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_df, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2371, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -25630,14 +25801,14 @@ __pyx_v_odf = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":2361 + /* "mtrand.pyx":2372 * * odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if odf.shape == ononc.shape == (): */ - __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2361, __pyx_L1_error) + __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2372, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); @@ -25645,49 +25816,49 @@ __pyx_v_ononc = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":2363 + /* "mtrand.pyx":2374 * ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if odf.shape == ononc.shape == (): # <<<<<<<<<<<<<< * fdf = PyFloat_AsDouble(df) * fnonc = PyFloat_AsDouble(nonc) */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_odf), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2363, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_odf), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2374, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_ononc), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2363, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_ononc), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2374, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2363, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2374, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2363, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2374, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2363, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2374, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { - /* "mtrand.pyx":2364 + /* "mtrand.pyx":2375 * * if odf.shape == ononc.shape == (): * fdf = PyFloat_AsDouble(df) # <<<<<<<<<<<<<< * fnonc = PyFloat_AsDouble(nonc) * */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_df); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 2364, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_df); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 2375, __pyx_L1_error) __pyx_v_fdf = __pyx_t_5; - /* "mtrand.pyx":2365 + /* "mtrand.pyx":2376 * if odf.shape == ononc.shape == (): * fdf = PyFloat_AsDouble(df) * fnonc = PyFloat_AsDouble(nonc) # <<<<<<<<<<<<<< * * if fdf <= 0: */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_nonc); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 2365, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_nonc); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 2376, __pyx_L1_error) __pyx_v_fnonc = __pyx_t_5; - /* "mtrand.pyx":2367 + /* "mtrand.pyx":2378 * fnonc = PyFloat_AsDouble(nonc) * * if fdf <= 0: # <<<<<<<<<<<<<< @@ -25695,22 +25866,22 @@ * if fnonc < 0: */ __pyx_t_4 = ((__pyx_v_fdf <= 0.0) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":2368 + /* "mtrand.pyx":2379 * * if fdf <= 0: * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * if fnonc < 0: * raise ValueError("nonc < 0") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__93, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2368, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__95, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2379, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 2368, __pyx_L1_error) + __PYX_ERR(0, 2379, __pyx_L1_error) - /* "mtrand.pyx":2367 + /* "mtrand.pyx":2378 * fnonc = PyFloat_AsDouble(nonc) * * if fdf <= 0: # <<<<<<<<<<<<<< @@ -25719,7 +25890,7 @@ */ } - /* "mtrand.pyx":2369 + /* "mtrand.pyx":2380 * if fdf <= 0: * raise ValueError("df <= 0") * if fnonc < 0: # <<<<<<<<<<<<<< @@ -25727,22 +25898,22 @@ * return cont2_array_sc(self.internal_state, rk_noncentral_chisquare, */ __pyx_t_4 = ((__pyx_v_fnonc < 0.0) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":2370 + /* "mtrand.pyx":2381 * raise ValueError("df <= 0") * if fnonc < 0: * raise ValueError("nonc < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_noncentral_chisquare, * size, fdf, fnonc, self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__94, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2370, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__96, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2381, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 2370, __pyx_L1_error) + __PYX_ERR(0, 2381, __pyx_L1_error) - /* "mtrand.pyx":2369 + /* "mtrand.pyx":2380 * if fdf <= 0: * raise ValueError("df <= 0") * if fnonc < 0: # <<<<<<<<<<<<<< @@ -25751,7 +25922,7 @@ */ } - /* "mtrand.pyx":2371 + /* "mtrand.pyx":2382 * if fnonc < 0: * raise ValueError("nonc < 0") * return cont2_array_sc(self.internal_state, rk_noncentral_chisquare, # <<<<<<<<<<<<<< @@ -25760,7 +25931,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2372 + /* "mtrand.pyx":2383 * raise ValueError("nonc < 0") * return cont2_array_sc(self.internal_state, rk_noncentral_chisquare, * size, fdf, fnonc, self.lock) # <<<<<<<<<<<<<< @@ -25770,21 +25941,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":2371 + /* "mtrand.pyx":2382 * if fnonc < 0: * raise ValueError("nonc < 0") * return cont2_array_sc(self.internal_state, rk_noncentral_chisquare, # <<<<<<<<<<<<<< * size, fdf, fnonc, self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_noncentral_chisquare, __pyx_v_size, __pyx_v_fdf, __pyx_v_fnonc, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2371, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_noncentral_chisquare, __pyx_v_size, __pyx_v_fdf, __pyx_v_fnonc, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2382, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":2363 + /* "mtrand.pyx":2374 * ononc = PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if odf.shape == ononc.shape == (): # <<<<<<<<<<<<<< @@ -25793,21 +25964,21 @@ */ } - /* "mtrand.pyx":2374 + /* "mtrand.pyx":2385 * size, fdf, fnonc, self.lock) * * if np.any(np.less_equal(odf, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("df <= 0") * if np.any(np.less(ononc, 0.0)): */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2374, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2374, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2374, __pyx_L1_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2374, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; @@ -25825,7 +25996,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_odf), __pyx_float_0_0}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2374, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2385, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -25833,13 +26004,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_odf), __pyx_float_0_0}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2374, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2385, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2374, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -25850,7 +26021,7 @@ __Pyx_INCREF(__pyx_float_0_0); __Pyx_GIVEREF(__pyx_float_0_0); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_float_0_0); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2374, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -25866,14 +26037,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2374, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2385, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2374, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2385, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -25882,43 +26053,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2374, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2385, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2374, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2374, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2374, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2385, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":2375 + /* "mtrand.pyx":2386 * * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * if np.any(np.less(ononc, 0.0)): * raise ValueError("nonc < 0") */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__95, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2375, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__97, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2386, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 2375, __pyx_L1_error) + __PYX_ERR(0, 2386, __pyx_L1_error) - /* "mtrand.pyx":2374 + /* "mtrand.pyx":2385 * size, fdf, fnonc, self.lock) * * if np.any(np.less_equal(odf, 0.0)): # <<<<<<<<<<<<<< @@ -25927,21 +26098,21 @@ */ } - /* "mtrand.pyx":2376 + /* "mtrand.pyx":2387 * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") * if np.any(np.less(ononc, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("nonc < 0") * return cont2_array(self.internal_state, rk_noncentral_chisquare, size, */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2376, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2387, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2376, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2387, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2376, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2387, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2376, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2387, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; @@ -25959,7 +26130,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_ononc), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2376, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2387, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -25967,13 +26138,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_ononc), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2376, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2387, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2376, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2387, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_3) { __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); __pyx_t_3 = NULL; @@ -25984,7 +26155,7 @@ __Pyx_INCREF(__pyx_float_0_0); __Pyx_GIVEREF(__pyx_float_0_0); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_8, __pyx_float_0_0); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2376, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2387, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } @@ -26000,14 +26171,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2376, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2387, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2376, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2387, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -26016,43 +26187,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2376, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2387, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2376, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2387, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2376, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2387, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2376, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2387, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":2377 + /* "mtrand.pyx":2388 * raise ValueError("df <= 0") * if np.any(np.less(ononc, 0.0)): * raise ValueError("nonc < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_noncentral_chisquare, size, * odf, ononc, self.lock) */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__96, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2377, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__98, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2388, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 2377, __pyx_L1_error) + __PYX_ERR(0, 2388, __pyx_L1_error) - /* "mtrand.pyx":2376 + /* "mtrand.pyx":2387 * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") * if np.any(np.less(ononc, 0.0)): # <<<<<<<<<<<<<< @@ -26061,7 +26232,7 @@ */ } - /* "mtrand.pyx":2378 + /* "mtrand.pyx":2389 * if np.any(np.less(ononc, 0.0)): * raise ValueError("nonc < 0") * return cont2_array(self.internal_state, rk_noncentral_chisquare, size, # <<<<<<<<<<<<<< @@ -26070,7 +26241,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2379 + /* "mtrand.pyx":2390 * raise ValueError("nonc < 0") * return cont2_array(self.internal_state, rk_noncentral_chisquare, size, * odf, ononc, self.lock) # <<<<<<<<<<<<<< @@ -26080,21 +26251,21 @@ __pyx_t_2 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_2); - /* "mtrand.pyx":2378 + /* "mtrand.pyx":2389 * if np.any(np.less(ononc, 0.0)): * raise ValueError("nonc < 0") * return cont2_array(self.internal_state, rk_noncentral_chisquare, size, # <<<<<<<<<<<<<< * odf, ononc, self.lock) * */ - __pyx_t_9 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_noncentral_chisquare, __pyx_v_size, __pyx_v_odf, __pyx_v_ononc, __pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2378, __pyx_L1_error) + __pyx_t_9 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_noncentral_chisquare, __pyx_v_size, __pyx_v_odf, __pyx_v_ononc, __pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; - /* "mtrand.pyx":2277 + /* "mtrand.pyx":2286 * self.lock) * * def noncentral_chisquare(self, df, nonc, size=None): # <<<<<<<<<<<<<< @@ -26120,7 +26291,7 @@ return __pyx_r; } -/* "mtrand.pyx":2381 +/* "mtrand.pyx":2392 * odf, ononc, self.lock) * * def standard_cauchy(self, size=None): # <<<<<<<<<<<<<< @@ -26153,12 +26324,12 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[0] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_cauchy") < 0)) __PYX_ERR(0, 2381, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_cauchy") < 0)) __PYX_ERR(0, 2392, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -26172,7 +26343,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("standard_cauchy", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2381, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("standard_cauchy", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2392, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.standard_cauchy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -26192,7 +26363,7 @@ PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("standard_cauchy", 0); - /* "mtrand.pyx":2442 + /* "mtrand.pyx":2453 * * """ * return cont0_array(self.internal_state, rk_standard_cauchy, size, # <<<<<<<<<<<<<< @@ -26201,7 +26372,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2443 + /* "mtrand.pyx":2454 * """ * return cont0_array(self.internal_state, rk_standard_cauchy, size, * self.lock) # <<<<<<<<<<<<<< @@ -26211,21 +26382,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":2442 + /* "mtrand.pyx":2453 * * """ * return cont0_array(self.internal_state, rk_standard_cauchy, size, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_standard_cauchy, __pyx_v_size, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2442, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_cont0_array(__pyx_v_self->internal_state, rk_standard_cauchy, __pyx_v_size, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2453, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":2381 + /* "mtrand.pyx":2392 * odf, ononc, self.lock) * * def standard_cauchy(self, size=None): # <<<<<<<<<<<<<< @@ -26245,7 +26416,7 @@ return __pyx_r; } -/* "mtrand.pyx":2445 +/* "mtrand.pyx":2456 * self.lock) * * def standard_t(self, df, size=None): # <<<<<<<<<<<<<< @@ -26255,7 +26426,7 @@ /* Python wrapper */ static PyObject *__pyx_pw_6mtrand_11RandomState_59standard_t(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6mtrand_11RandomState_58standard_t[] = "\n standard_t(df, size=None)\n\n Draw samples from a standard Student's t distribution with `df` degrees\n of freedom.\n\n A special case of the hyperbolic distribution. As `df` gets\n large, the result resembles that of the standard normal\n distribution (`standard_normal`).\n\n Parameters\n ----------\n df : int or array_like of ints\n Degrees of freedom, should be > 0.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``df`` is a scalar. Otherwise,\n ``np.array(df).size`` samples are drawn.\n\n Returns\n -------\n out : ndarray or scalar\n Drawn samples from the parameterized standard Student's t distribution.\n\n Notes\n -----\n The probability density function for the t distribution is\n\n .. math:: P(x, df) = \\frac{\\Gamma(\\frac{df+1}{2})}{\\sqrt{\\pi df}\n \\Gamma(\\frac{df}{2})}\\Bigl( 1+\\frac{x^2}{df} \\Bigr)^{-(df+1)/2}\n\n The t test is based on an assumption that the data come from a\n Normal distribution. The t test provides a way to test whether\n the sample mean (that is the mean calculated from the data) is\n a good estimate of the true mean.\n\n The derivation of the t-distribution was first published in\n 1908 by William Gosset while working for the Guinness Brewery\n in Dublin. Due to proprietary issues, he had to publish under\n a pseudonym, and so he used the name Student.\n\n References\n ----------\n .. [1] Dalgaard, Peter, \"Introductory Statistics With R\",\n Springer, 2002.\n .. [2] Wikipedia, \"Student's t-distribution\"\n http://en.wikipedia.org/wiki/Student's_t-distributio""n\n\n Examples\n --------\n From Dalgaard page 83 [1]_, suppose the daily energy intake for 11\n women in Kj is:\n\n >>> intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \\\n ... 7515, 8230, 8770])\n\n Does their energy intake deviate systematically from the recommended\n value of 7725 kJ?\n\n We have 10 degrees of freedom, so is the sample mean within 95% of the\n recommended value?\n\n >>> s = np.random.standard_t(10, size=100000)\n >>> np.mean(intake)\n 6753.636363636364\n >>> intake.std(ddof=1)\n 1142.1232221373727\n\n Calculate the t statistic, setting the ddof parameter to the unbiased\n value so the divisor in the standard deviation will be degrees of\n freedom, N-1.\n\n >>> t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))\n >>> import matplotlib.pyplot as plt\n >>> h = plt.hist(s, bins=100, normed=True)\n\n For a one-sided t-test, how far out in the distribution does the t\n statistic appear?\n\n >>> np.sum(s>> intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \\\n ... 7515, 8230, 8770])\n\n Does their energy intake deviate systematically from the recommended\n value of 7725 kJ?\n\n We have 10 degrees of freedom, so is the sample mean within 95% of the\n recommended value?\n\n >>> s = np.random.standard_t(10, size=100000)\n >>> np.mean(intake)\n 6753.636363636364\n >>> intake.std(ddof=1)\n 1142.1232221373727\n\n Calculate the t statistic, setting the ddof parameter to the unbiased\n value so the divisor in the standard deviation will be degrees of\n freedom, N-1.\n\n >>> t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))\n >>> import matplotlib.pyplot as plt\n >>> h = plt.hist(s, bins=100, normed=True)\n\n For a one-sided t-test, how far out in the distribution does the t\n statistic appear?\n\n >>> np.sum(s 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_t") < 0)) __PYX_ERR(0, 2445, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "standard_t") < 0)) __PYX_ERR(0, 2456, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -26306,7 +26477,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("standard_t", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2445, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("standard_t", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2456, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.standard_t", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -26335,14 +26506,14 @@ PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("standard_t", 0); - /* "mtrand.pyx":2536 + /* "mtrand.pyx":2547 * cdef double fdf * * odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if odf.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_df, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2536, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_df, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2547, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -26350,32 +26521,32 @@ __pyx_v_odf = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":2538 + /* "mtrand.pyx":2549 * odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if odf.shape == (): # <<<<<<<<<<<<<< * fdf = PyFloat_AsDouble(df) * */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_odf), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2538, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_odf), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2549, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2538, __pyx_L1_error) + __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2549, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2538, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2549, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_3) { - /* "mtrand.pyx":2539 + /* "mtrand.pyx":2550 * * if odf.shape == (): * fdf = PyFloat_AsDouble(df) # <<<<<<<<<<<<<< * * if fdf <= 0: */ - __pyx_t_4 = PyFloat_AsDouble(__pyx_v_df); if (unlikely(__pyx_t_4 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 2539, __pyx_L1_error) + __pyx_t_4 = PyFloat_AsDouble(__pyx_v_df); if (unlikely(__pyx_t_4 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 2550, __pyx_L1_error) __pyx_v_fdf = __pyx_t_4; - /* "mtrand.pyx":2541 + /* "mtrand.pyx":2552 * fdf = PyFloat_AsDouble(df) * * if fdf <= 0: # <<<<<<<<<<<<<< @@ -26383,22 +26554,22 @@ * return cont1_array_sc(self.internal_state, rk_standard_t, size, */ __pyx_t_3 = ((__pyx_v_fdf <= 0.0) != 0); - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":2542 + /* "mtrand.pyx":2553 * * if fdf <= 0: * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_standard_t, size, * fdf, self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__97, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2542, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__99, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2553, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 2542, __pyx_L1_error) + __PYX_ERR(0, 2553, __pyx_L1_error) - /* "mtrand.pyx":2541 + /* "mtrand.pyx":2552 * fdf = PyFloat_AsDouble(df) * * if fdf <= 0: # <<<<<<<<<<<<<< @@ -26407,7 +26578,7 @@ */ } - /* "mtrand.pyx":2543 + /* "mtrand.pyx":2554 * if fdf <= 0: * raise ValueError("df <= 0") * return cont1_array_sc(self.internal_state, rk_standard_t, size, # <<<<<<<<<<<<<< @@ -26416,7 +26587,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2544 + /* "mtrand.pyx":2555 * raise ValueError("df <= 0") * return cont1_array_sc(self.internal_state, rk_standard_t, size, * fdf, self.lock) # <<<<<<<<<<<<<< @@ -26426,21 +26597,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":2543 + /* "mtrand.pyx":2554 * if fdf <= 0: * raise ValueError("df <= 0") * return cont1_array_sc(self.internal_state, rk_standard_t, size, # <<<<<<<<<<<<<< * fdf, self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_standard_t, __pyx_v_size, __pyx_v_fdf, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2543, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_standard_t, __pyx_v_size, __pyx_v_fdf, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":2538 + /* "mtrand.pyx":2549 * odf = PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if odf.shape == (): # <<<<<<<<<<<<<< @@ -26449,21 +26620,21 @@ */ } - /* "mtrand.pyx":2546 + /* "mtrand.pyx":2557 * fdf, self.lock) * * if np.any(np.less_equal(odf, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("df <= 0") * return cont1_array(self.internal_state, rk_standard_t, size, odf, */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2546, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2557, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2546, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2557, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2546, __pyx_L1_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2557, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2546, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2557, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; @@ -26481,7 +26652,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_odf), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2546, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2557, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -26489,13 +26660,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_odf), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2546, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2557, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2546, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2557, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -26506,7 +26677,7 @@ __Pyx_INCREF(__pyx_float_0_0); __Pyx_GIVEREF(__pyx_float_0_0); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_float_0_0); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2546, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2557, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -26522,14 +26693,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2546, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2557, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2546, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2557, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -26538,43 +26709,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2546, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2557, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2546, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2557, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2546, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2557, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2546, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2557, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":2547 + /* "mtrand.pyx":2558 * * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_standard_t, size, odf, * self.lock) */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__98, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2547, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__100, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 2547, __pyx_L1_error) + __PYX_ERR(0, 2558, __pyx_L1_error) - /* "mtrand.pyx":2546 + /* "mtrand.pyx":2557 * fdf, self.lock) * * if np.any(np.less_equal(odf, 0.0)): # <<<<<<<<<<<<<< @@ -26583,7 +26754,7 @@ */ } - /* "mtrand.pyx":2548 + /* "mtrand.pyx":2559 * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") * return cont1_array(self.internal_state, rk_standard_t, size, odf, # <<<<<<<<<<<<<< @@ -26592,7 +26763,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2549 + /* "mtrand.pyx":2560 * raise ValueError("df <= 0") * return cont1_array(self.internal_state, rk_standard_t, size, odf, * self.lock) # <<<<<<<<<<<<<< @@ -26602,21 +26773,21 @@ __pyx_t_2 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_2); - /* "mtrand.pyx":2548 + /* "mtrand.pyx":2559 * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") * return cont1_array(self.internal_state, rk_standard_t, size, odf, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_5 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_standard_t, __pyx_v_size, __pyx_v_odf, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2548, __pyx_L1_error) + __pyx_t_5 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_standard_t, __pyx_v_size, __pyx_v_odf, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2559, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; - /* "mtrand.pyx":2445 + /* "mtrand.pyx":2456 * self.lock) * * def standard_t(self, df, size=None): # <<<<<<<<<<<<<< @@ -26641,7 +26812,7 @@ return __pyx_r; } -/* "mtrand.pyx":2551 +/* "mtrand.pyx":2562 * self.lock) * * def vonmises(self, mu, kappa, size=None): # <<<<<<<<<<<<<< @@ -26679,23 +26850,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mu)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mu)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_kappa)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_kappa)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("vonmises", 0, 2, 3, 1); __PYX_ERR(0, 2551, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("vonmises", 0, 2, 3, 1); __PYX_ERR(0, 2562, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vonmises") < 0)) __PYX_ERR(0, 2551, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "vonmises") < 0)) __PYX_ERR(0, 2562, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -26713,7 +26884,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("vonmises", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2551, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("vonmises", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2562, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.vonmises", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -26744,14 +26915,14 @@ PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("vonmises", 0); - /* "mtrand.pyx":2632 + /* "mtrand.pyx":2643 * cdef double fmu, fkappa * * omu = PyArray_FROM_OTF(mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * okappa = PyArray_FROM_OTF(kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2632, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2643, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -26759,14 +26930,14 @@ __pyx_v_omu = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":2633 + /* "mtrand.pyx":2644 * * omu = PyArray_FROM_OTF(mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * okappa = PyArray_FROM_OTF(kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if omu.shape == okappa.shape == (): */ - __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2633, __pyx_L1_error) + __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2644, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); @@ -26774,49 +26945,49 @@ __pyx_v_okappa = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":2635 + /* "mtrand.pyx":2646 * okappa = PyArray_FROM_OTF(kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if omu.shape == okappa.shape == (): # <<<<<<<<<<<<<< * fmu = PyFloat_AsDouble(mu) * fkappa = PyFloat_AsDouble(kappa) */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_omu), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2635, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_omu), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_okappa), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2635, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_okappa), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2635, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2646, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2635, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2646, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2635, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2646, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { - /* "mtrand.pyx":2636 + /* "mtrand.pyx":2647 * * if omu.shape == okappa.shape == (): * fmu = PyFloat_AsDouble(mu) # <<<<<<<<<<<<<< * fkappa = PyFloat_AsDouble(kappa) * */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_mu); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 2636, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_mu); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 2647, __pyx_L1_error) __pyx_v_fmu = __pyx_t_5; - /* "mtrand.pyx":2637 + /* "mtrand.pyx":2648 * if omu.shape == okappa.shape == (): * fmu = PyFloat_AsDouble(mu) * fkappa = PyFloat_AsDouble(kappa) # <<<<<<<<<<<<<< * * if fkappa < 0: */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_kappa); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 2637, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_kappa); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 2648, __pyx_L1_error) __pyx_v_fkappa = __pyx_t_5; - /* "mtrand.pyx":2639 + /* "mtrand.pyx":2650 * fkappa = PyFloat_AsDouble(kappa) * * if fkappa < 0: # <<<<<<<<<<<<<< @@ -26824,22 +26995,22 @@ * return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, */ __pyx_t_4 = ((__pyx_v_fkappa < 0.0) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":2640 + /* "mtrand.pyx":2651 * * if fkappa < 0: * raise ValueError("kappa < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, * fkappa, self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__99, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2640, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__101, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2651, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 2640, __pyx_L1_error) + __PYX_ERR(0, 2651, __pyx_L1_error) - /* "mtrand.pyx":2639 + /* "mtrand.pyx":2650 * fkappa = PyFloat_AsDouble(kappa) * * if fkappa < 0: # <<<<<<<<<<<<<< @@ -26848,7 +27019,7 @@ */ } - /* "mtrand.pyx":2641 + /* "mtrand.pyx":2652 * if fkappa < 0: * raise ValueError("kappa < 0") * return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, # <<<<<<<<<<<<<< @@ -26857,7 +27028,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2642 + /* "mtrand.pyx":2653 * raise ValueError("kappa < 0") * return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, * fkappa, self.lock) # <<<<<<<<<<<<<< @@ -26867,21 +27038,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":2641 + /* "mtrand.pyx":2652 * if fkappa < 0: * raise ValueError("kappa < 0") * return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, # <<<<<<<<<<<<<< * fkappa, self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_vonmises, __pyx_v_size, __pyx_v_fmu, __pyx_v_fkappa, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2641, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_vonmises, __pyx_v_size, __pyx_v_fmu, __pyx_v_fkappa, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2652, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":2635 + /* "mtrand.pyx":2646 * okappa = PyArray_FROM_OTF(kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if omu.shape == okappa.shape == (): # <<<<<<<<<<<<<< @@ -26890,21 +27061,21 @@ */ } - /* "mtrand.pyx":2644 + /* "mtrand.pyx":2655 * fkappa, self.lock) * * if np.any(np.less(okappa, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("kappa < 0") * return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa, */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2644, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2644, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2644, __pyx_L1_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2644, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; @@ -26922,7 +27093,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_okappa), __pyx_float_0_0}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2644, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2655, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -26930,13 +27101,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_okappa), __pyx_float_0_0}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2644, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2655, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2644, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -26947,7 +27118,7 @@ __Pyx_INCREF(__pyx_float_0_0); __Pyx_GIVEREF(__pyx_float_0_0); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_float_0_0); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2644, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 2655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -26963,14 +27134,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2644, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2655, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2644, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2655, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -26979,43 +27150,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2644, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2655, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2644, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2644, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2644, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 2655, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":2645 + /* "mtrand.pyx":2656 * * if np.any(np.less(okappa, 0.0)): * raise ValueError("kappa < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa, * self.lock) */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__100, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2645, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__102, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2656, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 2645, __pyx_L1_error) + __PYX_ERR(0, 2656, __pyx_L1_error) - /* "mtrand.pyx":2644 + /* "mtrand.pyx":2655 * fkappa, self.lock) * * if np.any(np.less(okappa, 0.0)): # <<<<<<<<<<<<<< @@ -27024,7 +27195,7 @@ */ } - /* "mtrand.pyx":2646 + /* "mtrand.pyx":2657 * if np.any(np.less(okappa, 0.0)): * raise ValueError("kappa < 0") * return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa, # <<<<<<<<<<<<<< @@ -27033,7 +27204,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2647 + /* "mtrand.pyx":2658 * raise ValueError("kappa < 0") * return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa, * self.lock) # <<<<<<<<<<<<<< @@ -27043,21 +27214,21 @@ __pyx_t_2 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_2); - /* "mtrand.pyx":2646 + /* "mtrand.pyx":2657 * if np.any(np.less(okappa, 0.0)): * raise ValueError("kappa < 0") * return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_1 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_vonmises, __pyx_v_size, __pyx_v_omu, __pyx_v_okappa, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2646, __pyx_L1_error) + __pyx_t_1 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_vonmises, __pyx_v_size, __pyx_v_omu, __pyx_v_okappa, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2657, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; - /* "mtrand.pyx":2551 + /* "mtrand.pyx":2562 * self.lock) * * def vonmises(self, mu, kappa, size=None): # <<<<<<<<<<<<<< @@ -27083,7 +27254,7 @@ return __pyx_r; } -/* "mtrand.pyx":2649 +/* "mtrand.pyx":2660 * self.lock) * * def pareto(self, a, size=None): # <<<<<<<<<<<<<< @@ -27118,17 +27289,17 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_a)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_a)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "pareto") < 0)) __PYX_ERR(0, 2649, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "pareto") < 0)) __PYX_ERR(0, 2660, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -27144,7 +27315,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("pareto", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2649, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("pareto", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2660, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.pareto", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -27173,14 +27344,14 @@ PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("pareto", 0); - /* "mtrand.pyx":2745 + /* "mtrand.pyx":2756 * cdef double fa * * oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if oa.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2745, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2756, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -27188,32 +27359,32 @@ __pyx_v_oa = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":2747 + /* "mtrand.pyx":2758 * oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oa.shape == (): # <<<<<<<<<<<<<< * fa = PyFloat_AsDouble(a) * */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oa), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2747, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oa), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2758, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2747, __pyx_L1_error) + __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2758, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2747, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2758, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_3) { - /* "mtrand.pyx":2748 + /* "mtrand.pyx":2759 * * if oa.shape == (): * fa = PyFloat_AsDouble(a) # <<<<<<<<<<<<<< * * if fa <= 0: */ - __pyx_t_4 = PyFloat_AsDouble(__pyx_v_a); if (unlikely(__pyx_t_4 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 2748, __pyx_L1_error) + __pyx_t_4 = PyFloat_AsDouble(__pyx_v_a); if (unlikely(__pyx_t_4 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 2759, __pyx_L1_error) __pyx_v_fa = __pyx_t_4; - /* "mtrand.pyx":2750 + /* "mtrand.pyx":2761 * fa = PyFloat_AsDouble(a) * * if fa <= 0: # <<<<<<<<<<<<<< @@ -27221,22 +27392,22 @@ * return cont1_array_sc(self.internal_state, rk_pareto, size, fa, */ __pyx_t_3 = ((__pyx_v_fa <= 0.0) != 0); - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":2751 + /* "mtrand.pyx":2762 * * if fa <= 0: * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_pareto, size, fa, * self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__101, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2751, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__103, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 2751, __pyx_L1_error) + __PYX_ERR(0, 2762, __pyx_L1_error) - /* "mtrand.pyx":2750 + /* "mtrand.pyx":2761 * fa = PyFloat_AsDouble(a) * * if fa <= 0: # <<<<<<<<<<<<<< @@ -27245,7 +27416,7 @@ */ } - /* "mtrand.pyx":2752 + /* "mtrand.pyx":2763 * if fa <= 0: * raise ValueError("a <= 0") * return cont1_array_sc(self.internal_state, rk_pareto, size, fa, # <<<<<<<<<<<<<< @@ -27254,7 +27425,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2753 + /* "mtrand.pyx":2764 * raise ValueError("a <= 0") * return cont1_array_sc(self.internal_state, rk_pareto, size, fa, * self.lock) # <<<<<<<<<<<<<< @@ -27264,21 +27435,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":2752 + /* "mtrand.pyx":2763 * if fa <= 0: * raise ValueError("a <= 0") * return cont1_array_sc(self.internal_state, rk_pareto, size, fa, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_pareto, __pyx_v_size, __pyx_v_fa, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2752, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_pareto, __pyx_v_size, __pyx_v_fa, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2763, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":2747 + /* "mtrand.pyx":2758 * oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oa.shape == (): # <<<<<<<<<<<<<< @@ -27287,21 +27458,21 @@ */ } - /* "mtrand.pyx":2755 + /* "mtrand.pyx":2766 * self.lock) * * if np.any(np.less_equal(oa, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("a <= 0") * return cont1_array(self.internal_state, rk_pareto, size, oa, self.lock) */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2755, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2755, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2755, __pyx_L1_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2755, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; @@ -27319,7 +27490,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_oa), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2755, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2766, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -27327,13 +27498,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_oa), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2755, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2766, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2755, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -27344,7 +27515,7 @@ __Pyx_INCREF(__pyx_float_0_0); __Pyx_GIVEREF(__pyx_float_0_0); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_float_0_0); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2755, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -27360,14 +27531,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2755, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2766, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2755, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2766, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -27376,43 +27547,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2755, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2766, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2755, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 2766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2755, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2755, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2766, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":2756 + /* "mtrand.pyx":2767 * * if np.any(np.less_equal(oa, 0.0)): * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_pareto, size, oa, self.lock) * */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__102, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2756, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__104, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2767, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 2756, __pyx_L1_error) + __PYX_ERR(0, 2767, __pyx_L1_error) - /* "mtrand.pyx":2755 + /* "mtrand.pyx":2766 * self.lock) * * if np.any(np.less_equal(oa, 0.0)): # <<<<<<<<<<<<<< @@ -27421,7 +27592,7 @@ */ } - /* "mtrand.pyx":2757 + /* "mtrand.pyx":2768 * if np.any(np.less_equal(oa, 0.0)): * raise ValueError("a <= 0") * return cont1_array(self.internal_state, rk_pareto, size, oa, self.lock) # <<<<<<<<<<<<<< @@ -27431,14 +27602,14 @@ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_2); - __pyx_t_5 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_pareto, __pyx_v_size, __pyx_v_oa, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2757, __pyx_L1_error) + __pyx_t_5 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_pareto, __pyx_v_size, __pyx_v_oa, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2768, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; - /* "mtrand.pyx":2649 + /* "mtrand.pyx":2660 * self.lock) * * def pareto(self, a, size=None): # <<<<<<<<<<<<<< @@ -27463,7 +27634,7 @@ return __pyx_r; } -/* "mtrand.pyx":2759 +/* "mtrand.pyx":2770 * return cont1_array(self.internal_state, rk_pareto, size, oa, self.lock) * * def weibull(self, a, size=None): # <<<<<<<<<<<<<< @@ -27498,17 +27669,17 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_a)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_a)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "weibull") < 0)) __PYX_ERR(0, 2759, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "weibull") < 0)) __PYX_ERR(0, 2770, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -27524,7 +27695,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("weibull", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2759, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("weibull", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2770, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.weibull", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -27552,14 +27723,14 @@ PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("weibull", 0); - /* "mtrand.pyx":2855 + /* "mtrand.pyx":2866 * cdef double fa * * oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if oa.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2855, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2866, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -27567,44 +27738,44 @@ __pyx_v_oa = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":2857 + /* "mtrand.pyx":2868 * oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oa.shape == (): # <<<<<<<<<<<<<< * fa = PyFloat_AsDouble(a) * if np.signbit(fa): */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oa), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2857, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oa), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2868, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2857, __pyx_L1_error) + __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2868, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2857, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2868, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_3) { - /* "mtrand.pyx":2858 + /* "mtrand.pyx":2869 * * if oa.shape == (): * fa = PyFloat_AsDouble(a) # <<<<<<<<<<<<<< * if np.signbit(fa): * raise ValueError("a < 0") */ - __pyx_t_4 = PyFloat_AsDouble(__pyx_v_a); if (unlikely(__pyx_t_4 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 2858, __pyx_L1_error) + __pyx_t_4 = PyFloat_AsDouble(__pyx_v_a); if (unlikely(__pyx_t_4 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 2869, __pyx_L1_error) __pyx_v_fa = __pyx_t_4; - /* "mtrand.pyx":2859 + /* "mtrand.pyx":2870 * if oa.shape == (): * fa = PyFloat_AsDouble(a) * if np.signbit(fa): # <<<<<<<<<<<<<< * raise ValueError("a < 0") * return cont1_array_sc(self.internal_state, rk_weibull, size, fa, */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2859, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2859, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fa); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2859, __pyx_L1_error) + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fa); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { @@ -27617,14 +27788,14 @@ } } if (!__pyx_t_6) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2859, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2870, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2859, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2870, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -27633,43 +27804,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2859, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2870, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2859, __pyx_L1_error) + __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2859, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2859, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2870, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":2860 + /* "mtrand.pyx":2871 * fa = PyFloat_AsDouble(a) * if np.signbit(fa): * raise ValueError("a < 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_weibull, size, fa, * self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__103, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2860, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__105, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2871, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 2860, __pyx_L1_error) + __PYX_ERR(0, 2871, __pyx_L1_error) - /* "mtrand.pyx":2859 + /* "mtrand.pyx":2870 * if oa.shape == (): * fa = PyFloat_AsDouble(a) * if np.signbit(fa): # <<<<<<<<<<<<<< @@ -27678,7 +27849,7 @@ */ } - /* "mtrand.pyx":2861 + /* "mtrand.pyx":2872 * if np.signbit(fa): * raise ValueError("a < 0") * return cont1_array_sc(self.internal_state, rk_weibull, size, fa, # <<<<<<<<<<<<<< @@ -27687,7 +27858,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2862 + /* "mtrand.pyx":2873 * raise ValueError("a < 0") * return cont1_array_sc(self.internal_state, rk_weibull, size, fa, * self.lock) # <<<<<<<<<<<<<< @@ -27697,21 +27868,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":2861 + /* "mtrand.pyx":2872 * if np.signbit(fa): * raise ValueError("a < 0") * return cont1_array_sc(self.internal_state, rk_weibull, size, fa, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_5 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_weibull, __pyx_v_size, __pyx_v_fa, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2861, __pyx_L1_error) + __pyx_t_5 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_weibull, __pyx_v_size, __pyx_v_fa, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2872, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; - /* "mtrand.pyx":2857 + /* "mtrand.pyx":2868 * oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oa.shape == (): # <<<<<<<<<<<<<< @@ -27720,21 +27891,21 @@ */ } - /* "mtrand.pyx":2864 + /* "mtrand.pyx":2875 * self.lock) * * if np.any(np.signbit(oa)): # <<<<<<<<<<<<<< * raise ValueError("a < 0") * return cont1_array(self.internal_state, rk_weibull, size, oa, */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2864, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2875, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2864, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2875, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2864, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2875, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2864, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2875, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -27748,13 +27919,13 @@ } } if (!__pyx_t_2) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oa)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2864, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oa)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2875, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oa)}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2864, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2875, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -27762,19 +27933,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oa)}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2864, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2875, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2864, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2875, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_INCREF(((PyObject *)__pyx_v_oa)); __Pyx_GIVEREF(((PyObject *)__pyx_v_oa)); PyTuple_SET_ITEM(__pyx_t_8, 0+1, ((PyObject *)__pyx_v_oa)); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2864, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2875, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } @@ -27791,14 +27962,14 @@ } } if (!__pyx_t_6) { - __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2864, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2875, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_5); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_1}; - __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2864, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2875, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -27807,43 +27978,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_1}; - __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2864, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2875, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2864, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2875, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2864, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2875, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2864, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2875, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":2865 + /* "mtrand.pyx":2876 * * if np.any(np.signbit(oa)): * raise ValueError("a < 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_weibull, size, oa, * self.lock) */ - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__104, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2865, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__106, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2876, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __PYX_ERR(0, 2865, __pyx_L1_error) + __PYX_ERR(0, 2876, __pyx_L1_error) - /* "mtrand.pyx":2864 + /* "mtrand.pyx":2875 * self.lock) * * if np.any(np.signbit(oa)): # <<<<<<<<<<<<<< @@ -27852,7 +28023,7 @@ */ } - /* "mtrand.pyx":2866 + /* "mtrand.pyx":2877 * if np.any(np.signbit(oa)): * raise ValueError("a < 0") * return cont1_array(self.internal_state, rk_weibull, size, oa, # <<<<<<<<<<<<<< @@ -27861,7 +28032,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2867 + /* "mtrand.pyx":2878 * raise ValueError("a < 0") * return cont1_array(self.internal_state, rk_weibull, size, oa, * self.lock) # <<<<<<<<<<<<<< @@ -27871,21 +28042,21 @@ __pyx_t_5 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_5); - /* "mtrand.pyx":2866 + /* "mtrand.pyx":2877 * if np.any(np.signbit(oa)): * raise ValueError("a < 0") * return cont1_array(self.internal_state, rk_weibull, size, oa, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_7 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_weibull, __pyx_v_size, __pyx_v_oa, __pyx_t_5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2866, __pyx_L1_error) + __pyx_t_7 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_weibull, __pyx_v_size, __pyx_v_oa, __pyx_t_5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2877, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; - /* "mtrand.pyx":2759 + /* "mtrand.pyx":2770 * return cont1_array(self.internal_state, rk_pareto, size, oa, self.lock) * * def weibull(self, a, size=None): # <<<<<<<<<<<<<< @@ -27910,7 +28081,7 @@ return __pyx_r; } -/* "mtrand.pyx":2869 +/* "mtrand.pyx":2880 * self.lock) * * def power(self, a, size=None): # <<<<<<<<<<<<<< @@ -27945,17 +28116,17 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_a)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_a)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "power") < 0)) __PYX_ERR(0, 2869, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "power") < 0)) __PYX_ERR(0, 2880, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -27971,7 +28142,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("power", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2869, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("power", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2880, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.power", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -27999,14 +28170,14 @@ PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("power", 0); - /* "mtrand.pyx":2967 + /* "mtrand.pyx":2978 * cdef double fa * * oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if oa.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2967, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -28014,44 +28185,44 @@ __pyx_v_oa = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":2969 + /* "mtrand.pyx":2980 * oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oa.shape == (): # <<<<<<<<<<<<<< * fa = PyFloat_AsDouble(a) * if np.signbit(fa): */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oa), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2969, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oa), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2980, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2969, __pyx_L1_error) + __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2980, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2969, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2980, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_3) { - /* "mtrand.pyx":2970 + /* "mtrand.pyx":2981 * * if oa.shape == (): * fa = PyFloat_AsDouble(a) # <<<<<<<<<<<<<< * if np.signbit(fa): * raise ValueError("a < 0") */ - __pyx_t_4 = PyFloat_AsDouble(__pyx_v_a); if (unlikely(__pyx_t_4 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 2970, __pyx_L1_error) + __pyx_t_4 = PyFloat_AsDouble(__pyx_v_a); if (unlikely(__pyx_t_4 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 2981, __pyx_L1_error) __pyx_v_fa = __pyx_t_4; - /* "mtrand.pyx":2971 + /* "mtrand.pyx":2982 * if oa.shape == (): * fa = PyFloat_AsDouble(a) * if np.signbit(fa): # <<<<<<<<<<<<<< * raise ValueError("a < 0") * return cont1_array_sc(self.internal_state, rk_power, size, fa, */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2971, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2982, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2971, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2982, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fa); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2971, __pyx_L1_error) + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fa); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2982, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { @@ -28064,14 +28235,14 @@ } } if (!__pyx_t_6) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2971, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2982, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2971, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2982, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -28080,43 +28251,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2971, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2982, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2971, __pyx_L1_error) + __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2982, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2971, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2982, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2971, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2982, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":2972 + /* "mtrand.pyx":2983 * fa = PyFloat_AsDouble(a) * if np.signbit(fa): * raise ValueError("a < 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_power, size, fa, * self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__105, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2972, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__107, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 2972, __pyx_L1_error) + __PYX_ERR(0, 2983, __pyx_L1_error) - /* "mtrand.pyx":2971 + /* "mtrand.pyx":2982 * if oa.shape == (): * fa = PyFloat_AsDouble(a) * if np.signbit(fa): # <<<<<<<<<<<<<< @@ -28125,7 +28296,7 @@ */ } - /* "mtrand.pyx":2973 + /* "mtrand.pyx":2984 * if np.signbit(fa): * raise ValueError("a < 0") * return cont1_array_sc(self.internal_state, rk_power, size, fa, # <<<<<<<<<<<<<< @@ -28134,7 +28305,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":2974 + /* "mtrand.pyx":2985 * raise ValueError("a < 0") * return cont1_array_sc(self.internal_state, rk_power, size, fa, * self.lock) # <<<<<<<<<<<<<< @@ -28144,21 +28315,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":2973 + /* "mtrand.pyx":2984 * if np.signbit(fa): * raise ValueError("a < 0") * return cont1_array_sc(self.internal_state, rk_power, size, fa, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_5 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_power, __pyx_v_size, __pyx_v_fa, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2973, __pyx_L1_error) + __pyx_t_5 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_power, __pyx_v_size, __pyx_v_fa, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2984, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; - /* "mtrand.pyx":2969 + /* "mtrand.pyx":2980 * oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oa.shape == (): # <<<<<<<<<<<<<< @@ -28167,21 +28338,21 @@ */ } - /* "mtrand.pyx":2976 + /* "mtrand.pyx":2987 * self.lock) * * if np.any(np.signbit(oa)): # <<<<<<<<<<<<<< * raise ValueError("a < 0") * return cont1_array(self.internal_state, rk_power, size, oa, self.lock) */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2976, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2987, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2976, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2987, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2976, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2987, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2976, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 2987, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -28195,13 +28366,13 @@ } } if (!__pyx_t_2) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oa)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2976, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oa)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2987, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oa)}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2976, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2987, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -28209,19 +28380,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oa)}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2976, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2987, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2976, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2987, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_INCREF(((PyObject *)__pyx_v_oa)); __Pyx_GIVEREF(((PyObject *)__pyx_v_oa)); PyTuple_SET_ITEM(__pyx_t_8, 0+1, ((PyObject *)__pyx_v_oa)); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2976, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2987, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } @@ -28238,14 +28409,14 @@ } } if (!__pyx_t_6) { - __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2976, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2987, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_5); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_1}; - __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2976, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2987, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -28254,43 +28425,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_1}; - __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2976, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2987, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2976, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 2987, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2976, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2987, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2976, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 2987, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":2977 + /* "mtrand.pyx":2988 * * if np.any(np.signbit(oa)): * raise ValueError("a < 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_power, size, oa, self.lock) * */ - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__106, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2977, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__108, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 2988, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __PYX_ERR(0, 2977, __pyx_L1_error) + __PYX_ERR(0, 2988, __pyx_L1_error) - /* "mtrand.pyx":2976 + /* "mtrand.pyx":2987 * self.lock) * * if np.any(np.signbit(oa)): # <<<<<<<<<<<<<< @@ -28299,7 +28470,7 @@ */ } - /* "mtrand.pyx":2978 + /* "mtrand.pyx":2989 * if np.any(np.signbit(oa)): * raise ValueError("a < 0") * return cont1_array(self.internal_state, rk_power, size, oa, self.lock) # <<<<<<<<<<<<<< @@ -28309,14 +28480,14 @@ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_5); - __pyx_t_7 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_power, __pyx_v_size, __pyx_v_oa, __pyx_t_5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2978, __pyx_L1_error) + __pyx_t_7 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_power, __pyx_v_size, __pyx_v_oa, __pyx_t_5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2989, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; - /* "mtrand.pyx":2869 + /* "mtrand.pyx":2880 * self.lock) * * def power(self, a, size=None): # <<<<<<<<<<<<<< @@ -28341,7 +28512,7 @@ return __pyx_r; } -/* "mtrand.pyx":2980 +/* "mtrand.pyx":2991 * return cont1_array(self.internal_state, rk_power, size, oa, self.lock) * * def laplace(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< @@ -28382,24 +28553,24 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_loc); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_loc); if (value) { values[0] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_scale); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_scale); if (value) { values[1] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "laplace") < 0)) __PYX_ERR(0, 2980, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "laplace") < 0)) __PYX_ERR(0, 2991, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -28419,7 +28590,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("laplace", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2980, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("laplace", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 2991, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.laplace", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -28449,87 +28620,87 @@ PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("laplace", 0); - /* "mtrand.pyx":3062 + /* "mtrand.pyx":3073 * cdef double floc, fscale * * oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3062, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3073, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3062, __pyx_L1_error) + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3073, __pyx_L1_error) __pyx_v_oloc = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":3063 + /* "mtrand.pyx":3074 * * oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if oloc.shape == oscale.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3063, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3074, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3063, __pyx_L1_error) + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3074, __pyx_L1_error) __pyx_v_oscale = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":3065 + /* "mtrand.pyx":3076 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oloc.shape == oscale.shape == (): # <<<<<<<<<<<<<< * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oloc), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3065, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oloc), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3076, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3065, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3076, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3065, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3076, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3065, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3076, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3065, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3076, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { - /* "mtrand.pyx":3066 + /* "mtrand.pyx":3077 * * if oloc.shape == oscale.shape == (): * floc = PyFloat_AsDouble(loc) # <<<<<<<<<<<<<< * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_loc); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3066, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_loc); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3077, __pyx_L1_error) __pyx_v_floc = __pyx_t_5; - /* "mtrand.pyx":3067 + /* "mtrand.pyx":3078 * if oloc.shape == oscale.shape == (): * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< * if np.signbit(fscale): * raise ValueError("scale < 0") */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3067, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3078, __pyx_L1_error) __pyx_v_fscale = __pyx_t_5; - /* "mtrand.pyx":3068 + /* "mtrand.pyx":3079 * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): # <<<<<<<<<<<<<< * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_laplace, size, floc, */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3068, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3079, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3068, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3079, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fscale); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3068, __pyx_L1_error) + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fscale); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3079, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { @@ -28542,14 +28713,14 @@ } } if (!__pyx_t_6) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3068, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3079, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3068, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3079, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -28558,43 +28729,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3068, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3079, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3068, __pyx_L1_error) + __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3079, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3068, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3079, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3068, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3079, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3069 + /* "mtrand.pyx":3080 * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_laplace, size, floc, * fscale, self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__107, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3069, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__109, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3080, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3069, __pyx_L1_error) + __PYX_ERR(0, 3080, __pyx_L1_error) - /* "mtrand.pyx":3068 + /* "mtrand.pyx":3079 * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): # <<<<<<<<<<<<<< @@ -28603,7 +28774,7 @@ */ } - /* "mtrand.pyx":3070 + /* "mtrand.pyx":3081 * if np.signbit(fscale): * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_laplace, size, floc, # <<<<<<<<<<<<<< @@ -28612,7 +28783,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3071 + /* "mtrand.pyx":3082 * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_laplace, size, floc, * fscale, self.lock) # <<<<<<<<<<<<<< @@ -28622,21 +28793,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":3070 + /* "mtrand.pyx":3081 * if np.signbit(fscale): * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_laplace, size, floc, # <<<<<<<<<<<<<< * fscale, self.lock) * */ - __pyx_t_1 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_laplace, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3070, __pyx_L1_error) + __pyx_t_1 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_laplace, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3081, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; - /* "mtrand.pyx":3065 + /* "mtrand.pyx":3076 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oloc.shape == oscale.shape == (): # <<<<<<<<<<<<<< @@ -28645,21 +28816,21 @@ */ } - /* "mtrand.pyx":3073 + /* "mtrand.pyx":3084 * fscale, self.lock) * * if np.any(np.signbit(oscale)): # <<<<<<<<<<<<<< * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale, */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3073, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3073, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3073, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3073, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -28673,13 +28844,13 @@ } } if (!__pyx_t_2) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oscale)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3073, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oscale)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oscale)}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3073, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3084, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -28687,19 +28858,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oscale)}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3073, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3084, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3073, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_INCREF(((PyObject *)__pyx_v_oscale)); __Pyx_GIVEREF(((PyObject *)__pyx_v_oscale)); PyTuple_SET_ITEM(__pyx_t_8, 0+1, ((PyObject *)__pyx_v_oscale)); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3073, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } @@ -28716,14 +28887,14 @@ } } if (!__pyx_t_6) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3073, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3084, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3073, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3084, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -28732,43 +28903,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3073, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3084, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3073, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3073, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3073, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3084, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3074 + /* "mtrand.pyx":3085 * * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale, * self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__108, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3074, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__110, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3085, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 3074, __pyx_L1_error) + __PYX_ERR(0, 3085, __pyx_L1_error) - /* "mtrand.pyx":3073 + /* "mtrand.pyx":3084 * fscale, self.lock) * * if np.any(np.signbit(oscale)): # <<<<<<<<<<<<<< @@ -28777,7 +28948,7 @@ */ } - /* "mtrand.pyx":3075 + /* "mtrand.pyx":3086 * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale, # <<<<<<<<<<<<<< @@ -28786,7 +28957,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3076 + /* "mtrand.pyx":3087 * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale, * self.lock) # <<<<<<<<<<<<<< @@ -28796,21 +28967,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":3075 + /* "mtrand.pyx":3086 * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_7 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_laplace, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale, __pyx_t_1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3075, __pyx_L1_error) + __pyx_t_7 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_laplace, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale, __pyx_t_1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3086, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; - /* "mtrand.pyx":2980 + /* "mtrand.pyx":2991 * return cont1_array(self.internal_state, rk_power, size, oa, self.lock) * * def laplace(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< @@ -28836,7 +29007,7 @@ return __pyx_r; } -/* "mtrand.pyx":3078 +/* "mtrand.pyx":3089 * self.lock) * * def gumbel(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< @@ -28877,24 +29048,24 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_loc); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_loc); if (value) { values[0] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_scale); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_scale); if (value) { values[1] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gumbel") < 0)) __PYX_ERR(0, 3078, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gumbel") < 0)) __PYX_ERR(0, 3089, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -28914,7 +29085,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("gumbel", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3078, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("gumbel", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3089, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.gumbel", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -28944,87 +29115,87 @@ PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("gumbel", 0); - /* "mtrand.pyx":3193 + /* "mtrand.pyx":3204 * cdef double floc, fscale * * oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3193, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3204, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3193, __pyx_L1_error) + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3204, __pyx_L1_error) __pyx_v_oloc = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":3194 + /* "mtrand.pyx":3205 * * oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if oloc.shape == oscale.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3194, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3205, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3194, __pyx_L1_error) + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3205, __pyx_L1_error) __pyx_v_oscale = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":3196 + /* "mtrand.pyx":3207 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oloc.shape == oscale.shape == (): # <<<<<<<<<<<<<< * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oloc), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3196, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oloc), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3196, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3196, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3207, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3196, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3207, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3196, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3207, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { - /* "mtrand.pyx":3197 + /* "mtrand.pyx":3208 * * if oloc.shape == oscale.shape == (): * floc = PyFloat_AsDouble(loc) # <<<<<<<<<<<<<< * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_loc); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3197, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_loc); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3208, __pyx_L1_error) __pyx_v_floc = __pyx_t_5; - /* "mtrand.pyx":3198 + /* "mtrand.pyx":3209 * if oloc.shape == oscale.shape == (): * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< * if np.signbit(fscale): * raise ValueError("scale < 0") */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3198, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3209, __pyx_L1_error) __pyx_v_fscale = __pyx_t_5; - /* "mtrand.pyx":3199 + /* "mtrand.pyx":3210 * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): # <<<<<<<<<<<<<< * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3199, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3210, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3199, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3210, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fscale); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3199, __pyx_L1_error) + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fscale); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3210, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { @@ -29037,14 +29208,14 @@ } } if (!__pyx_t_6) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3199, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3210, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3199, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3210, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -29053,43 +29224,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3199, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3210, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3199, __pyx_L1_error) + __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3210, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3199, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3210, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3199, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3210, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3200 + /* "mtrand.pyx":3211 * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, * fscale, self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__109, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3200, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__111, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3211, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3200, __pyx_L1_error) + __PYX_ERR(0, 3211, __pyx_L1_error) - /* "mtrand.pyx":3199 + /* "mtrand.pyx":3210 * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): # <<<<<<<<<<<<<< @@ -29098,7 +29269,7 @@ */ } - /* "mtrand.pyx":3201 + /* "mtrand.pyx":3212 * if np.signbit(fscale): * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, # <<<<<<<<<<<<<< @@ -29107,7 +29278,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3202 + /* "mtrand.pyx":3213 * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, * fscale, self.lock) # <<<<<<<<<<<<<< @@ -29117,21 +29288,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":3201 + /* "mtrand.pyx":3212 * if np.signbit(fscale): * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, # <<<<<<<<<<<<<< * fscale, self.lock) * */ - __pyx_t_1 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_gumbel, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3201, __pyx_L1_error) + __pyx_t_1 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_gumbel, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3212, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; - /* "mtrand.pyx":3196 + /* "mtrand.pyx":3207 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oloc.shape == oscale.shape == (): # <<<<<<<<<<<<<< @@ -29140,21 +29311,21 @@ */ } - /* "mtrand.pyx":3204 + /* "mtrand.pyx":3215 * fscale, self.lock) * * if np.any(np.signbit(oscale)): # <<<<<<<<<<<<<< * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale, */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3204, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3204, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3204, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3204, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -29168,13 +29339,13 @@ } } if (!__pyx_t_2) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oscale)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3204, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oscale)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oscale)}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3204, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3215, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -29182,19 +29353,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oscale)}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3204, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3215, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3204, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_INCREF(((PyObject *)__pyx_v_oscale)); __Pyx_GIVEREF(((PyObject *)__pyx_v_oscale)); PyTuple_SET_ITEM(__pyx_t_8, 0+1, ((PyObject *)__pyx_v_oscale)); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3204, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } @@ -29211,14 +29382,14 @@ } } if (!__pyx_t_6) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3204, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3215, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3204, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3215, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -29227,43 +29398,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3204, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3215, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3204, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3204, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3215, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3204, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3215, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3205 + /* "mtrand.pyx":3216 * * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale, * self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__110, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3205, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__112, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3216, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 3205, __pyx_L1_error) + __PYX_ERR(0, 3216, __pyx_L1_error) - /* "mtrand.pyx":3204 + /* "mtrand.pyx":3215 * fscale, self.lock) * * if np.any(np.signbit(oscale)): # <<<<<<<<<<<<<< @@ -29272,7 +29443,7 @@ */ } - /* "mtrand.pyx":3206 + /* "mtrand.pyx":3217 * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale, # <<<<<<<<<<<<<< @@ -29281,7 +29452,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3207 + /* "mtrand.pyx":3218 * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale, * self.lock) # <<<<<<<<<<<<<< @@ -29291,21 +29462,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":3206 + /* "mtrand.pyx":3217 * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_7 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_gumbel, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale, __pyx_t_1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3206, __pyx_L1_error) + __pyx_t_7 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_gumbel, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale, __pyx_t_1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; - /* "mtrand.pyx":3078 + /* "mtrand.pyx":3089 * self.lock) * * def gumbel(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< @@ -29331,7 +29502,7 @@ return __pyx_r; } -/* "mtrand.pyx":3209 +/* "mtrand.pyx":3220 * self.lock) * * def logistic(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< @@ -29372,24 +29543,24 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_loc); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_loc); if (value) { values[0] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_scale); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_scale); if (value) { values[1] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "logistic") < 0)) __PYX_ERR(0, 3209, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "logistic") < 0)) __PYX_ERR(0, 3220, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -29409,7 +29580,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("logistic", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3209, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("logistic", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3220, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.logistic", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -29439,87 +29610,87 @@ PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("logistic", 0); - /* "mtrand.pyx":3286 + /* "mtrand.pyx":3297 * cdef double floc, fscale * * oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3286, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3297, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3286, __pyx_L1_error) + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3297, __pyx_L1_error) __pyx_v_oloc = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":3287 + /* "mtrand.pyx":3298 * * oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if oloc.shape == oscale.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3287, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3298, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3287, __pyx_L1_error) + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3298, __pyx_L1_error) __pyx_v_oscale = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":3289 + /* "mtrand.pyx":3300 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oloc.shape == oscale.shape == (): # <<<<<<<<<<<<<< * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oloc), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3289, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oloc), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3300, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3289, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3300, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3289, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3300, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3289, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3300, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3289, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3300, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { - /* "mtrand.pyx":3290 + /* "mtrand.pyx":3301 * * if oloc.shape == oscale.shape == (): * floc = PyFloat_AsDouble(loc) # <<<<<<<<<<<<<< * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_loc); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3290, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_loc); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3301, __pyx_L1_error) __pyx_v_floc = __pyx_t_5; - /* "mtrand.pyx":3291 + /* "mtrand.pyx":3302 * if oloc.shape == oscale.shape == (): * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< * if np.signbit(fscale): * raise ValueError("scale < 0") */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3291, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3302, __pyx_L1_error) __pyx_v_fscale = __pyx_t_5; - /* "mtrand.pyx":3292 + /* "mtrand.pyx":3303 * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): # <<<<<<<<<<<<<< * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_logistic, size, floc, */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3292, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3303, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3292, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3303, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fscale); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3292, __pyx_L1_error) + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fscale); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3303, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { @@ -29532,14 +29703,14 @@ } } if (!__pyx_t_6) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3292, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3303, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3292, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3303, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -29548,43 +29719,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3292, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3303, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3292, __pyx_L1_error) + __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3303, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3292, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3303, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3292, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3303, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3293 + /* "mtrand.pyx":3304 * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_logistic, size, floc, * fscale, self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__111, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3293, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__113, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3304, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3293, __pyx_L1_error) + __PYX_ERR(0, 3304, __pyx_L1_error) - /* "mtrand.pyx":3292 + /* "mtrand.pyx":3303 * floc = PyFloat_AsDouble(loc) * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): # <<<<<<<<<<<<<< @@ -29593,7 +29764,7 @@ */ } - /* "mtrand.pyx":3294 + /* "mtrand.pyx":3305 * if np.signbit(fscale): * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_logistic, size, floc, # <<<<<<<<<<<<<< @@ -29602,7 +29773,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3295 + /* "mtrand.pyx":3306 * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_logistic, size, floc, * fscale, self.lock) # <<<<<<<<<<<<<< @@ -29612,21 +29783,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":3294 + /* "mtrand.pyx":3305 * if np.signbit(fscale): * raise ValueError("scale < 0") * return cont2_array_sc(self.internal_state, rk_logistic, size, floc, # <<<<<<<<<<<<<< * fscale, self.lock) * */ - __pyx_t_1 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_logistic, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3294, __pyx_L1_error) + __pyx_t_1 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_logistic, __pyx_v_size, __pyx_v_floc, __pyx_v_fscale, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3305, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; - /* "mtrand.pyx":3289 + /* "mtrand.pyx":3300 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oloc.shape == oscale.shape == (): # <<<<<<<<<<<<<< @@ -29635,21 +29806,21 @@ */ } - /* "mtrand.pyx":3297 + /* "mtrand.pyx":3308 * fscale, self.lock) * * if np.any(np.signbit(oscale)): # <<<<<<<<<<<<<< * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_logistic, size, oloc, */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3297, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3308, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3297, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3308, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3297, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3308, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3297, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3308, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -29663,13 +29834,13 @@ } } if (!__pyx_t_2) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oscale)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3297, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oscale)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3308, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oscale)}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3297, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3308, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -29677,19 +29848,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oscale)}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3297, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3308, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3297, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3308, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_INCREF(((PyObject *)__pyx_v_oscale)); __Pyx_GIVEREF(((PyObject *)__pyx_v_oscale)); PyTuple_SET_ITEM(__pyx_t_8, 0+1, ((PyObject *)__pyx_v_oscale)); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3297, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3308, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } @@ -29706,14 +29877,14 @@ } } if (!__pyx_t_6) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3297, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3308, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3297, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3308, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -29722,43 +29893,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3297, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3308, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3297, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3308, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3297, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3308, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3297, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3308, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3298 + /* "mtrand.pyx":3309 * * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_logistic, size, oloc, * oscale, self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__112, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3298, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__114, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3309, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 3298, __pyx_L1_error) + __PYX_ERR(0, 3309, __pyx_L1_error) - /* "mtrand.pyx":3297 + /* "mtrand.pyx":3308 * fscale, self.lock) * * if np.any(np.signbit(oscale)): # <<<<<<<<<<<<<< @@ -29767,7 +29938,7 @@ */ } - /* "mtrand.pyx":3299 + /* "mtrand.pyx":3310 * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_logistic, size, oloc, # <<<<<<<<<<<<<< @@ -29776,7 +29947,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3300 + /* "mtrand.pyx":3311 * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_logistic, size, oloc, * oscale, self.lock) # <<<<<<<<<<<<<< @@ -29786,21 +29957,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":3299 + /* "mtrand.pyx":3310 * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") * return cont2_array(self.internal_state, rk_logistic, size, oloc, # <<<<<<<<<<<<<< * oscale, self.lock) * */ - __pyx_t_7 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_logistic, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale, __pyx_t_1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3299, __pyx_L1_error) + __pyx_t_7 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_logistic, __pyx_v_size, __pyx_v_oloc, __pyx_v_oscale, __pyx_t_1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; - /* "mtrand.pyx":3209 + /* "mtrand.pyx":3220 * self.lock) * * def logistic(self, loc=0.0, scale=1.0, size=None): # <<<<<<<<<<<<<< @@ -29826,7 +29997,7 @@ return __pyx_r; } -/* "mtrand.pyx":3302 +/* "mtrand.pyx":3313 * oscale, self.lock) * * def lognormal(self, mean=0.0, sigma=1.0, size=None): # <<<<<<<<<<<<<< @@ -29867,24 +30038,24 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mean); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mean); if (value) { values[0] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_sigma); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sigma); if (value) { values[1] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "lognormal") < 0)) __PYX_ERR(0, 3302, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "lognormal") < 0)) __PYX_ERR(0, 3313, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -29904,7 +30075,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("lognormal", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3302, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("lognormal", 0, 0, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3313, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.lognormal", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -29934,87 +30105,87 @@ PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("lognormal", 0); - /* "mtrand.pyx":3410 + /* "mtrand.pyx":3421 * cdef double fmean, fsigma * * omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * osigma = PyArray_FROM_OTF(sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3410, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3421, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3410, __pyx_L1_error) + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3421, __pyx_L1_error) __pyx_v_omean = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":3411 + /* "mtrand.pyx":3422 * * omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * osigma = PyArray_FROM_OTF(sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if omean.shape == osigma.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3411, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3422, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3411, __pyx_L1_error) + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3422, __pyx_L1_error) __pyx_v_osigma = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":3413 + /* "mtrand.pyx":3424 * osigma = PyArray_FROM_OTF(sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if omean.shape == osigma.shape == (): # <<<<<<<<<<<<<< * fmean = PyFloat_AsDouble(mean) * fsigma = PyFloat_AsDouble(sigma) */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_omean), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3413, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_omean), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3424, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_osigma), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3413, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_osigma), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3424, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3413, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3424, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3413, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3424, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3413, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3424, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { - /* "mtrand.pyx":3414 + /* "mtrand.pyx":3425 * * if omean.shape == osigma.shape == (): * fmean = PyFloat_AsDouble(mean) # <<<<<<<<<<<<<< * fsigma = PyFloat_AsDouble(sigma) * if np.signbit(fsigma): */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_mean); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3414, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_mean); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3425, __pyx_L1_error) __pyx_v_fmean = __pyx_t_5; - /* "mtrand.pyx":3415 + /* "mtrand.pyx":3426 * if omean.shape == osigma.shape == (): * fmean = PyFloat_AsDouble(mean) * fsigma = PyFloat_AsDouble(sigma) # <<<<<<<<<<<<<< * if np.signbit(fsigma): * raise ValueError("sigma < 0") */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_sigma); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3415, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_sigma); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3426, __pyx_L1_error) __pyx_v_fsigma = __pyx_t_5; - /* "mtrand.pyx":3416 + /* "mtrand.pyx":3427 * fmean = PyFloat_AsDouble(mean) * fsigma = PyFloat_AsDouble(sigma) * if np.signbit(fsigma): # <<<<<<<<<<<<<< * raise ValueError("sigma < 0") * return cont2_array_sc(self.internal_state, rk_lognormal, size, */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3416, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3416, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fsigma); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3416, __pyx_L1_error) + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fsigma); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { @@ -30027,14 +30198,14 @@ } } if (!__pyx_t_6) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3416, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3427, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3416, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3427, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -30043,43 +30214,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3416, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3427, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3416, __pyx_L1_error) + __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3416, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3416, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3427, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3417 + /* "mtrand.pyx":3428 * fsigma = PyFloat_AsDouble(sigma) * if np.signbit(fsigma): * raise ValueError("sigma < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_lognormal, size, * fmean, fsigma, self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__113, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3417, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__115, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3428, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3417, __pyx_L1_error) + __PYX_ERR(0, 3428, __pyx_L1_error) - /* "mtrand.pyx":3416 + /* "mtrand.pyx":3427 * fmean = PyFloat_AsDouble(mean) * fsigma = PyFloat_AsDouble(sigma) * if np.signbit(fsigma): # <<<<<<<<<<<<<< @@ -30088,7 +30259,7 @@ */ } - /* "mtrand.pyx":3418 + /* "mtrand.pyx":3429 * if np.signbit(fsigma): * raise ValueError("sigma < 0") * return cont2_array_sc(self.internal_state, rk_lognormal, size, # <<<<<<<<<<<<<< @@ -30097,7 +30268,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3419 + /* "mtrand.pyx":3430 * raise ValueError("sigma < 0") * return cont2_array_sc(self.internal_state, rk_lognormal, size, * fmean, fsigma, self.lock) # <<<<<<<<<<<<<< @@ -30107,21 +30278,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":3418 + /* "mtrand.pyx":3429 * if np.signbit(fsigma): * raise ValueError("sigma < 0") * return cont2_array_sc(self.internal_state, rk_lognormal, size, # <<<<<<<<<<<<<< * fmean, fsigma, self.lock) * */ - __pyx_t_1 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_lognormal, __pyx_v_size, __pyx_v_fmean, __pyx_v_fsigma, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3418, __pyx_L1_error) + __pyx_t_1 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_lognormal, __pyx_v_size, __pyx_v_fmean, __pyx_v_fsigma, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; - /* "mtrand.pyx":3413 + /* "mtrand.pyx":3424 * osigma = PyArray_FROM_OTF(sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if omean.shape == osigma.shape == (): # <<<<<<<<<<<<<< @@ -30130,21 +30301,21 @@ */ } - /* "mtrand.pyx":3421 + /* "mtrand.pyx":3432 * fmean, fsigma, self.lock) * * if np.any(np.signbit(osigma)): # <<<<<<<<<<<<<< * raise ValueError("sigma < 0.0") * return cont2_array(self.internal_state, rk_lognormal, size, omean, */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3421, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3432, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3421, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3432, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3421, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3432, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3421, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3432, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -30158,13 +30329,13 @@ } } if (!__pyx_t_2) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_osigma)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3421, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_osigma)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3432, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_osigma)}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3421, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3432, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -30172,19 +30343,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_osigma)}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3421, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3432, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3421, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3432, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_INCREF(((PyObject *)__pyx_v_osigma)); __Pyx_GIVEREF(((PyObject *)__pyx_v_osigma)); PyTuple_SET_ITEM(__pyx_t_8, 0+1, ((PyObject *)__pyx_v_osigma)); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3421, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3432, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } @@ -30201,14 +30372,14 @@ } } if (!__pyx_t_6) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3421, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3432, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3421, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3432, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -30217,43 +30388,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3421, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3432, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3421, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3432, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3421, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3432, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3421, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3432, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3422 + /* "mtrand.pyx":3433 * * if np.any(np.signbit(osigma)): * raise ValueError("sigma < 0.0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_lognormal, size, omean, * osigma, self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__114, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3422, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__116, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3433, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 3422, __pyx_L1_error) + __PYX_ERR(0, 3433, __pyx_L1_error) - /* "mtrand.pyx":3421 + /* "mtrand.pyx":3432 * fmean, fsigma, self.lock) * * if np.any(np.signbit(osigma)): # <<<<<<<<<<<<<< @@ -30262,7 +30433,7 @@ */ } - /* "mtrand.pyx":3423 + /* "mtrand.pyx":3434 * if np.any(np.signbit(osigma)): * raise ValueError("sigma < 0.0") * return cont2_array(self.internal_state, rk_lognormal, size, omean, # <<<<<<<<<<<<<< @@ -30271,7 +30442,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3424 + /* "mtrand.pyx":3435 * raise ValueError("sigma < 0.0") * return cont2_array(self.internal_state, rk_lognormal, size, omean, * osigma, self.lock) # <<<<<<<<<<<<<< @@ -30281,21 +30452,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":3423 + /* "mtrand.pyx":3434 * if np.any(np.signbit(osigma)): * raise ValueError("sigma < 0.0") * return cont2_array(self.internal_state, rk_lognormal, size, omean, # <<<<<<<<<<<<<< * osigma, self.lock) * */ - __pyx_t_7 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_lognormal, __pyx_v_size, __pyx_v_omean, __pyx_v_osigma, __pyx_t_1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3423, __pyx_L1_error) + __pyx_t_7 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_lognormal, __pyx_v_size, __pyx_v_omean, __pyx_v_osigma, __pyx_t_1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3434, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; - /* "mtrand.pyx":3302 + /* "mtrand.pyx":3313 * oscale, self.lock) * * def lognormal(self, mean=0.0, sigma=1.0, size=None): # <<<<<<<<<<<<<< @@ -30321,7 +30492,7 @@ return __pyx_r; } -/* "mtrand.pyx":3426 +/* "mtrand.pyx":3437 * osigma, self.lock) * * def rayleigh(self, scale=1.0, size=None): # <<<<<<<<<<<<<< @@ -30358,18 +30529,18 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_scale); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_scale); if (value) { values[0] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "rayleigh") < 0)) __PYX_ERR(0, 3426, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "rayleigh") < 0)) __PYX_ERR(0, 3437, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -30386,7 +30557,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("rayleigh", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3426, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("rayleigh", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3437, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.rayleigh", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -30414,14 +30585,14 @@ PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("rayleigh", 0); - /* "mtrand.pyx":3491 + /* "mtrand.pyx":3502 * cdef double fscale * * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if oscale.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3491, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3502, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -30429,44 +30600,44 @@ __pyx_v_oscale = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":3493 + /* "mtrand.pyx":3504 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oscale.shape == (): # <<<<<<<<<<<<<< * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3493, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3493, __pyx_L1_error) + __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3504, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3493, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3504, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_3) { - /* "mtrand.pyx":3494 + /* "mtrand.pyx":3505 * * if oscale.shape == (): * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< * if np.signbit(fscale): * raise ValueError("scale < 0") */ - __pyx_t_4 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_4 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3494, __pyx_L1_error) + __pyx_t_4 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_4 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3505, __pyx_L1_error) __pyx_v_fscale = __pyx_t_4; - /* "mtrand.pyx":3495 + /* "mtrand.pyx":3506 * if oscale.shape == (): * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): # <<<<<<<<<<<<<< * raise ValueError("scale < 0") * return cont1_array_sc(self.internal_state, rk_rayleigh, size, */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3495, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3495, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fscale); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3495, __pyx_L1_error) + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fscale); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { @@ -30479,14 +30650,14 @@ } } if (!__pyx_t_6) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3495, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3506, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3495, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3506, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -30495,43 +30666,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3495, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3506, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3495, __pyx_L1_error) + __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3495, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3495, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3506, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":3496 + /* "mtrand.pyx":3507 * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_rayleigh, size, * fscale, self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__115, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3496, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__117, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3507, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 3496, __pyx_L1_error) + __PYX_ERR(0, 3507, __pyx_L1_error) - /* "mtrand.pyx":3495 + /* "mtrand.pyx":3506 * if oscale.shape == (): * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): # <<<<<<<<<<<<<< @@ -30540,7 +30711,7 @@ */ } - /* "mtrand.pyx":3497 + /* "mtrand.pyx":3508 * if np.signbit(fscale): * raise ValueError("scale < 0") * return cont1_array_sc(self.internal_state, rk_rayleigh, size, # <<<<<<<<<<<<<< @@ -30549,7 +30720,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3498 + /* "mtrand.pyx":3509 * raise ValueError("scale < 0") * return cont1_array_sc(self.internal_state, rk_rayleigh, size, * fscale, self.lock) # <<<<<<<<<<<<<< @@ -30559,21 +30730,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":3497 + /* "mtrand.pyx":3508 * if np.signbit(fscale): * raise ValueError("scale < 0") * return cont1_array_sc(self.internal_state, rk_rayleigh, size, # <<<<<<<<<<<<<< * fscale, self.lock) * */ - __pyx_t_5 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_rayleigh, __pyx_v_size, __pyx_v_fscale, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3497, __pyx_L1_error) + __pyx_t_5 = __pyx_f_6mtrand_cont1_array_sc(__pyx_v_self->internal_state, rk_rayleigh, __pyx_v_size, __pyx_v_fscale, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; - /* "mtrand.pyx":3493 + /* "mtrand.pyx":3504 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oscale.shape == (): # <<<<<<<<<<<<<< @@ -30582,21 +30753,21 @@ */ } - /* "mtrand.pyx":3500 + /* "mtrand.pyx":3511 * fscale, self.lock) * * if np.any(np.signbit(oscale)): # <<<<<<<<<<<<<< * raise ValueError("scale < 0.0") * return cont1_array(self.internal_state, rk_rayleigh, size, oscale, */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3500, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3500, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3500, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3500, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_signbit); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -30610,13 +30781,13 @@ } } if (!__pyx_t_2) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oscale)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3500, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_6, ((PyObject *)__pyx_v_oscale)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oscale)}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3500, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3511, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -30624,19 +30795,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_oscale)}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3500, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3511, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3500, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_INCREF(((PyObject *)__pyx_v_oscale)); __Pyx_GIVEREF(((PyObject *)__pyx_v_oscale)); PyTuple_SET_ITEM(__pyx_t_8, 0+1, ((PyObject *)__pyx_v_oscale)); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3500, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } @@ -30653,14 +30824,14 @@ } } if (!__pyx_t_6) { - __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3500, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3511, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_5); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_1}; - __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3500, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3511, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -30669,43 +30840,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_1}; - __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3500, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3511, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3500, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3500, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3500, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3511, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":3501 + /* "mtrand.pyx":3512 * * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0.0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_rayleigh, size, oscale, * self.lock) */ - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__116, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3501, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__118, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __PYX_ERR(0, 3501, __pyx_L1_error) + __PYX_ERR(0, 3512, __pyx_L1_error) - /* "mtrand.pyx":3500 + /* "mtrand.pyx":3511 * fscale, self.lock) * * if np.any(np.signbit(oscale)): # <<<<<<<<<<<<<< @@ -30714,7 +30885,7 @@ */ } - /* "mtrand.pyx":3502 + /* "mtrand.pyx":3513 * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0.0") * return cont1_array(self.internal_state, rk_rayleigh, size, oscale, # <<<<<<<<<<<<<< @@ -30723,7 +30894,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3503 + /* "mtrand.pyx":3514 * raise ValueError("scale < 0.0") * return cont1_array(self.internal_state, rk_rayleigh, size, oscale, * self.lock) # <<<<<<<<<<<<<< @@ -30733,21 +30904,21 @@ __pyx_t_5 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_5); - /* "mtrand.pyx":3502 + /* "mtrand.pyx":3513 * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0.0") * return cont1_array(self.internal_state, rk_rayleigh, size, oscale, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_7 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_rayleigh, __pyx_v_size, __pyx_v_oscale, __pyx_t_5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3502, __pyx_L1_error) + __pyx_t_7 = __pyx_f_6mtrand_cont1_array(__pyx_v_self->internal_state, rk_rayleigh, __pyx_v_size, __pyx_v_oscale, __pyx_t_5); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3513, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; - /* "mtrand.pyx":3426 + /* "mtrand.pyx":3437 * osigma, self.lock) * * def rayleigh(self, scale=1.0, size=None): # <<<<<<<<<<<<<< @@ -30772,7 +30943,7 @@ return __pyx_r; } -/* "mtrand.pyx":3505 +/* "mtrand.pyx":3516 * self.lock) * * def wald(self, mean, scale, size=None): # <<<<<<<<<<<<<< @@ -30810,23 +30981,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mean)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mean)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_scale)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_scale)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("wald", 0, 2, 3, 1); __PYX_ERR(0, 3505, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("wald", 0, 2, 3, 1); __PYX_ERR(0, 3516, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "wald") < 0)) __PYX_ERR(0, 3505, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "wald") < 0)) __PYX_ERR(0, 3516, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -30844,7 +31015,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("wald", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3505, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("wald", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3516, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.wald", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -30875,75 +31046,75 @@ PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("wald", 0); - /* "mtrand.pyx":3571 + /* "mtrand.pyx":3582 * cdef double fmean, fscale * * omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3571, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3582, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3571, __pyx_L1_error) + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3582, __pyx_L1_error) __pyx_v_omean = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":3572 + /* "mtrand.pyx":3583 * * omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if omean.shape == oscale.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3572, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3572, __pyx_L1_error) + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_6mtrand_ndarray))))) __PYX_ERR(0, 3583, __pyx_L1_error) __pyx_v_oscale = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":3574 + /* "mtrand.pyx":3585 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if omean.shape == oscale.shape == (): # <<<<<<<<<<<<<< * fmean = PyFloat_AsDouble(mean) * fscale = PyFloat_AsDouble(scale) */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_omean), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3574, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_omean), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3585, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3574, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oscale), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3585, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3574, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3585, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3574, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3585, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3574, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3585, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { - /* "mtrand.pyx":3575 + /* "mtrand.pyx":3586 * * if omean.shape == oscale.shape == (): * fmean = PyFloat_AsDouble(mean) # <<<<<<<<<<<<<< * fscale = PyFloat_AsDouble(scale) * */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_mean); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3575, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_mean); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3586, __pyx_L1_error) __pyx_v_fmean = __pyx_t_5; - /* "mtrand.pyx":3576 + /* "mtrand.pyx":3587 * if omean.shape == oscale.shape == (): * fmean = PyFloat_AsDouble(mean) * fscale = PyFloat_AsDouble(scale) # <<<<<<<<<<<<<< * * if fmean <= 0: */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3576, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_scale); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3587, __pyx_L1_error) __pyx_v_fscale = __pyx_t_5; - /* "mtrand.pyx":3578 + /* "mtrand.pyx":3589 * fscale = PyFloat_AsDouble(scale) * * if fmean <= 0: # <<<<<<<<<<<<<< @@ -30951,22 +31122,22 @@ * if fscale <= 0: */ __pyx_t_4 = ((__pyx_v_fmean <= 0.0) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3579 + /* "mtrand.pyx":3590 * * if fmean <= 0: * raise ValueError("mean <= 0") # <<<<<<<<<<<<<< * if fscale <= 0: * raise ValueError("scale <= 0") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__117, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3579, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__119, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3590, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3579, __pyx_L1_error) + __PYX_ERR(0, 3590, __pyx_L1_error) - /* "mtrand.pyx":3578 + /* "mtrand.pyx":3589 * fscale = PyFloat_AsDouble(scale) * * if fmean <= 0: # <<<<<<<<<<<<<< @@ -30975,7 +31146,7 @@ */ } - /* "mtrand.pyx":3580 + /* "mtrand.pyx":3591 * if fmean <= 0: * raise ValueError("mean <= 0") * if fscale <= 0: # <<<<<<<<<<<<<< @@ -30983,22 +31154,22 @@ * return cont2_array_sc(self.internal_state, rk_wald, size, fmean, */ __pyx_t_4 = ((__pyx_v_fscale <= 0.0) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3581 + /* "mtrand.pyx":3592 * raise ValueError("mean <= 0") * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_wald, size, fmean, * fscale, self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__118, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3581, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__120, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3592, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3581, __pyx_L1_error) + __PYX_ERR(0, 3592, __pyx_L1_error) - /* "mtrand.pyx":3580 + /* "mtrand.pyx":3591 * if fmean <= 0: * raise ValueError("mean <= 0") * if fscale <= 0: # <<<<<<<<<<<<<< @@ -31007,7 +31178,7 @@ */ } - /* "mtrand.pyx":3582 + /* "mtrand.pyx":3593 * if fscale <= 0: * raise ValueError("scale <= 0") * return cont2_array_sc(self.internal_state, rk_wald, size, fmean, # <<<<<<<<<<<<<< @@ -31016,7 +31187,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3583 + /* "mtrand.pyx":3594 * raise ValueError("scale <= 0") * return cont2_array_sc(self.internal_state, rk_wald, size, fmean, * fscale, self.lock) # <<<<<<<<<<<<<< @@ -31026,21 +31197,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":3582 + /* "mtrand.pyx":3593 * if fscale <= 0: * raise ValueError("scale <= 0") * return cont2_array_sc(self.internal_state, rk_wald, size, fmean, # <<<<<<<<<<<<<< * fscale, self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_wald, __pyx_v_size, __pyx_v_fmean, __pyx_v_fscale, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3582, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_cont2_array_sc(__pyx_v_self->internal_state, rk_wald, __pyx_v_size, __pyx_v_fmean, __pyx_v_fscale, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":3574 + /* "mtrand.pyx":3585 * oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if omean.shape == oscale.shape == (): # <<<<<<<<<<<<<< @@ -31049,21 +31220,21 @@ */ } - /* "mtrand.pyx":3585 + /* "mtrand.pyx":3596 * fscale, self.lock) * * if np.any(np.less_equal(omean,0.0)): # <<<<<<<<<<<<<< * raise ValueError("mean <= 0.0") * elif np.any(np.less_equal(oscale,0.0)): */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3585, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3596, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3585, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3596, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3585, __pyx_L1_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3596, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3585, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3596, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; @@ -31081,7 +31252,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_omean), __pyx_float_0_0}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3585, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3596, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -31089,13 +31260,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_omean), __pyx_float_0_0}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3585, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3596, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3585, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3596, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -31106,7 +31277,7 @@ __Pyx_INCREF(__pyx_float_0_0); __Pyx_GIVEREF(__pyx_float_0_0); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_float_0_0); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3585, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3596, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -31122,14 +31293,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3585, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3596, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3585, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3596, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -31138,43 +31309,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3585, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3596, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3585, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3596, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3585, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3596, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3585, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3596, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3586 + /* "mtrand.pyx":3597 * * if np.any(np.less_equal(omean,0.0)): * raise ValueError("mean <= 0.0") # <<<<<<<<<<<<<< * elif np.any(np.less_equal(oscale,0.0)): * raise ValueError("scale <= 0.0") */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__119, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3586, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__121, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3597, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 3586, __pyx_L1_error) + __PYX_ERR(0, 3597, __pyx_L1_error) - /* "mtrand.pyx":3585 + /* "mtrand.pyx":3596 * fscale, self.lock) * * if np.any(np.less_equal(omean,0.0)): # <<<<<<<<<<<<<< @@ -31183,21 +31354,21 @@ */ } - /* "mtrand.pyx":3587 + /* "mtrand.pyx":3598 * if np.any(np.less_equal(omean,0.0)): * raise ValueError("mean <= 0.0") * elif np.any(np.less_equal(oscale,0.0)): # <<<<<<<<<<<<<< * raise ValueError("scale <= 0.0") * return cont2_array(self.internal_state, rk_wald, size, omean, oscale, */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3587, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3587, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3587, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3587, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; @@ -31215,7 +31386,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_oscale), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3587, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3598, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -31223,13 +31394,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_oscale), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3587, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3598, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3587, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_3) { __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); __pyx_t_3 = NULL; @@ -31240,7 +31411,7 @@ __Pyx_INCREF(__pyx_float_0_0); __Pyx_GIVEREF(__pyx_float_0_0); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_8, __pyx_float_0_0); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3587, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } @@ -31256,14 +31427,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3587, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3598, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3587, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3598, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -31272,43 +31443,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3587, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3598, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3587, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3587, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3587, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3598, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3588 + /* "mtrand.pyx":3599 * raise ValueError("mean <= 0.0") * elif np.any(np.less_equal(oscale,0.0)): * raise ValueError("scale <= 0.0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_wald, size, omean, oscale, * self.lock) */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__120, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3588, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__122, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 3588, __pyx_L1_error) + __PYX_ERR(0, 3599, __pyx_L1_error) - /* "mtrand.pyx":3587 + /* "mtrand.pyx":3598 * if np.any(np.less_equal(omean,0.0)): * raise ValueError("mean <= 0.0") * elif np.any(np.less_equal(oscale,0.0)): # <<<<<<<<<<<<<< @@ -31317,7 +31488,7 @@ */ } - /* "mtrand.pyx":3589 + /* "mtrand.pyx":3600 * elif np.any(np.less_equal(oscale,0.0)): * raise ValueError("scale <= 0.0") * return cont2_array(self.internal_state, rk_wald, size, omean, oscale, # <<<<<<<<<<<<<< @@ -31326,7 +31497,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3590 + /* "mtrand.pyx":3601 * raise ValueError("scale <= 0.0") * return cont2_array(self.internal_state, rk_wald, size, omean, oscale, * self.lock) # <<<<<<<<<<<<<< @@ -31336,21 +31507,21 @@ __pyx_t_2 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_2); - /* "mtrand.pyx":3589 + /* "mtrand.pyx":3600 * elif np.any(np.less_equal(oscale,0.0)): * raise ValueError("scale <= 0.0") * return cont2_array(self.internal_state, rk_wald, size, omean, oscale, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_9 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_wald, __pyx_v_size, __pyx_v_omean, __pyx_v_oscale, __pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3589, __pyx_L1_error) + __pyx_t_9 = __pyx_f_6mtrand_cont2_array(__pyx_v_self->internal_state, rk_wald, __pyx_v_size, __pyx_v_omean, __pyx_v_oscale, __pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; - /* "mtrand.pyx":3505 + /* "mtrand.pyx":3516 * self.lock) * * def wald(self, mean, scale, size=None): # <<<<<<<<<<<<<< @@ -31376,7 +31547,7 @@ return __pyx_r; } -/* "mtrand.pyx":3592 +/* "mtrand.pyx":3603 * self.lock) * * def triangular(self, left, mode, right, size=None): # <<<<<<<<<<<<<< @@ -31417,29 +31588,29 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_left)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_left)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("triangular", 0, 3, 4, 1); __PYX_ERR(0, 3592, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("triangular", 0, 3, 4, 1); __PYX_ERR(0, 3603, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: - if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_right)) != 0)) kw_args--; + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_right)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("triangular", 0, 3, 4, 2); __PYX_ERR(0, 3592, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("triangular", 0, 3, 4, 2); __PYX_ERR(0, 3603, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "triangular") < 0)) __PYX_ERR(0, 3592, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "triangular") < 0)) __PYX_ERR(0, 3603, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -31459,7 +31630,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("triangular", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3592, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("triangular", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3603, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.triangular", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -31492,14 +31663,14 @@ PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("triangular", 0); - /* "mtrand.pyx":3658 + /* "mtrand.pyx":3669 * cdef double fleft, fmode, fright * * oleft = PyArray_FROM_OTF(left, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * omode = PyArray_FROM_OTF(mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * oright = PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ARRAY_ALIGNED) */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_left, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3658, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_left, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3669, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -31507,14 +31678,14 @@ __pyx_v_oleft = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":3659 + /* "mtrand.pyx":3670 * * oleft = PyArray_FROM_OTF(left, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * omode = PyArray_FROM_OTF(mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * oright = PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3659, __pyx_L1_error) + __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3670, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); @@ -31522,14 +31693,14 @@ __pyx_v_omode = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":3660 + /* "mtrand.pyx":3671 * oleft = PyArray_FROM_OTF(left, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * omode = PyArray_FROM_OTF(mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * oright = PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if oleft.shape == omode.shape == oright.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_right, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3660, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_right, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3671, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -31537,66 +31708,66 @@ __pyx_v_oright = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":3662 + /* "mtrand.pyx":3673 * oright = PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oleft.shape == omode.shape == oright.shape == (): # <<<<<<<<<<<<<< * fleft = PyFloat_AsDouble(left) * fright = PyFloat_AsDouble(right) */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oleft), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3662, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oleft), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3673, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_omode), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3662, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_omode), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3673, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3662, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3673, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oright), __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3662, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oright), __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3673, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3662, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3673, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3662, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3673, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 3662, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 3673, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_5) { - /* "mtrand.pyx":3663 + /* "mtrand.pyx":3674 * * if oleft.shape == omode.shape == oright.shape == (): * fleft = PyFloat_AsDouble(left) # <<<<<<<<<<<<<< * fright = PyFloat_AsDouble(right) * fmode = PyFloat_AsDouble(mode) */ - __pyx_t_6 = PyFloat_AsDouble(__pyx_v_left); if (unlikely(__pyx_t_6 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3663, __pyx_L1_error) + __pyx_t_6 = PyFloat_AsDouble(__pyx_v_left); if (unlikely(__pyx_t_6 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3674, __pyx_L1_error) __pyx_v_fleft = __pyx_t_6; - /* "mtrand.pyx":3664 + /* "mtrand.pyx":3675 * if oleft.shape == omode.shape == oright.shape == (): * fleft = PyFloat_AsDouble(left) * fright = PyFloat_AsDouble(right) # <<<<<<<<<<<<<< * fmode = PyFloat_AsDouble(mode) * */ - __pyx_t_6 = PyFloat_AsDouble(__pyx_v_right); if (unlikely(__pyx_t_6 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3664, __pyx_L1_error) + __pyx_t_6 = PyFloat_AsDouble(__pyx_v_right); if (unlikely(__pyx_t_6 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3675, __pyx_L1_error) __pyx_v_fright = __pyx_t_6; - /* "mtrand.pyx":3665 + /* "mtrand.pyx":3676 * fleft = PyFloat_AsDouble(left) * fright = PyFloat_AsDouble(right) * fmode = PyFloat_AsDouble(mode) # <<<<<<<<<<<<<< * * if fleft > fmode: */ - __pyx_t_6 = PyFloat_AsDouble(__pyx_v_mode); if (unlikely(__pyx_t_6 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3665, __pyx_L1_error) + __pyx_t_6 = PyFloat_AsDouble(__pyx_v_mode); if (unlikely(__pyx_t_6 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3676, __pyx_L1_error) __pyx_v_fmode = __pyx_t_6; - /* "mtrand.pyx":3667 + /* "mtrand.pyx":3678 * fmode = PyFloat_AsDouble(mode) * * if fleft > fmode: # <<<<<<<<<<<<<< @@ -31604,22 +31775,22 @@ * if fmode > fright: */ __pyx_t_5 = ((__pyx_v_fleft > __pyx_v_fmode) != 0); - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":3668 + /* "mtrand.pyx":3679 * * if fleft > fmode: * raise ValueError("left > mode") # <<<<<<<<<<<<<< * if fmode > fright: * raise ValueError("mode > right") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__121, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3668, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__123, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3668, __pyx_L1_error) + __PYX_ERR(0, 3679, __pyx_L1_error) - /* "mtrand.pyx":3667 + /* "mtrand.pyx":3678 * fmode = PyFloat_AsDouble(mode) * * if fleft > fmode: # <<<<<<<<<<<<<< @@ -31628,7 +31799,7 @@ */ } - /* "mtrand.pyx":3669 + /* "mtrand.pyx":3680 * if fleft > fmode: * raise ValueError("left > mode") * if fmode > fright: # <<<<<<<<<<<<<< @@ -31636,22 +31807,22 @@ * if fleft == fright: */ __pyx_t_5 = ((__pyx_v_fmode > __pyx_v_fright) != 0); - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":3670 + /* "mtrand.pyx":3681 * raise ValueError("left > mode") * if fmode > fright: * raise ValueError("mode > right") # <<<<<<<<<<<<<< * if fleft == fright: * raise ValueError("left == right") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__122, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3670, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__124, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3681, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3670, __pyx_L1_error) + __PYX_ERR(0, 3681, __pyx_L1_error) - /* "mtrand.pyx":3669 + /* "mtrand.pyx":3680 * if fleft > fmode: * raise ValueError("left > mode") * if fmode > fright: # <<<<<<<<<<<<<< @@ -31660,7 +31831,7 @@ */ } - /* "mtrand.pyx":3671 + /* "mtrand.pyx":3682 * if fmode > fright: * raise ValueError("mode > right") * if fleft == fright: # <<<<<<<<<<<<<< @@ -31668,22 +31839,22 @@ * return cont3_array_sc(self.internal_state, rk_triangular, size, */ __pyx_t_5 = ((__pyx_v_fleft == __pyx_v_fright) != 0); - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":3672 + /* "mtrand.pyx":3683 * raise ValueError("mode > right") * if fleft == fright: * raise ValueError("left == right") # <<<<<<<<<<<<<< * return cont3_array_sc(self.internal_state, rk_triangular, size, * fleft, fmode, fright, self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__123, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3672, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__125, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3683, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3672, __pyx_L1_error) + __PYX_ERR(0, 3683, __pyx_L1_error) - /* "mtrand.pyx":3671 + /* "mtrand.pyx":3682 * if fmode > fright: * raise ValueError("mode > right") * if fleft == fright: # <<<<<<<<<<<<<< @@ -31692,7 +31863,7 @@ */ } - /* "mtrand.pyx":3673 + /* "mtrand.pyx":3684 * if fleft == fright: * raise ValueError("left == right") * return cont3_array_sc(self.internal_state, rk_triangular, size, # <<<<<<<<<<<<<< @@ -31701,7 +31872,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3674 + /* "mtrand.pyx":3685 * raise ValueError("left == right") * return cont3_array_sc(self.internal_state, rk_triangular, size, * fleft, fmode, fright, self.lock) # <<<<<<<<<<<<<< @@ -31711,21 +31882,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":3673 + /* "mtrand.pyx":3684 * if fleft == fright: * raise ValueError("left == right") * return cont3_array_sc(self.internal_state, rk_triangular, size, # <<<<<<<<<<<<<< * fleft, fmode, fright, self.lock) * */ - __pyx_t_1 = __pyx_f_6mtrand_cont3_array_sc(__pyx_v_self->internal_state, rk_triangular, __pyx_v_size, __pyx_v_fleft, __pyx_v_fmode, __pyx_v_fright, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3673, __pyx_L1_error) + __pyx_t_1 = __pyx_f_6mtrand_cont3_array_sc(__pyx_v_self->internal_state, rk_triangular, __pyx_v_size, __pyx_v_fleft, __pyx_v_fmode, __pyx_v_fright, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; - /* "mtrand.pyx":3662 + /* "mtrand.pyx":3673 * oright = PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oleft.shape == omode.shape == oright.shape == (): # <<<<<<<<<<<<<< @@ -31734,21 +31905,21 @@ */ } - /* "mtrand.pyx":3676 + /* "mtrand.pyx":3687 * fleft, fmode, fright, self.lock) * * if np.any(np.greater(oleft, omode)): # <<<<<<<<<<<<<< * raise ValueError("left > mode") * if np.any(np.greater(omode, oright)): */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3676, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3687, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3676, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3687, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3676, __pyx_L1_error) + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3687, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_greater); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3676, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_greater); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3687, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; @@ -31766,7 +31937,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_4, ((PyObject *)__pyx_v_oleft), ((PyObject *)__pyx_v_omode)}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3676, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3687, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -31774,13 +31945,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_4, ((PyObject *)__pyx_v_oleft), ((PyObject *)__pyx_v_omode)}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3676, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3687, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3676, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3687, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_4); __pyx_t_4 = NULL; @@ -31791,7 +31962,7 @@ __Pyx_INCREF(((PyObject *)__pyx_v_omode)); __Pyx_GIVEREF(((PyObject *)__pyx_v_omode)); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, ((PyObject *)__pyx_v_omode)); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3676, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3687, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -31807,14 +31978,14 @@ } } if (!__pyx_t_7) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3676, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3687, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3676, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3687, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -31823,43 +31994,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3676, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3687, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3676, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3687, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3676, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3687, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 3676, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 3687, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":3677 + /* "mtrand.pyx":3688 * * if np.any(np.greater(oleft, omode)): * raise ValueError("left > mode") # <<<<<<<<<<<<<< * if np.any(np.greater(omode, oright)): * raise ValueError("mode > right") */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__124, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3677, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__126, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3688, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 3677, __pyx_L1_error) + __PYX_ERR(0, 3688, __pyx_L1_error) - /* "mtrand.pyx":3676 + /* "mtrand.pyx":3687 * fleft, fmode, fright, self.lock) * * if np.any(np.greater(oleft, omode)): # <<<<<<<<<<<<<< @@ -31868,21 +32039,21 @@ */ } - /* "mtrand.pyx":3678 + /* "mtrand.pyx":3689 * if np.any(np.greater(oleft, omode)): * raise ValueError("left > mode") * if np.any(np.greater(omode, oright)): # <<<<<<<<<<<<<< * raise ValueError("mode > right") * if np.any(np.equal(oleft, oright)): */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3678, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3678, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3678, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_greater); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3678, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_greater); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; @@ -31900,7 +32071,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_omode), ((PyObject *)__pyx_v_oright)}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3678, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3689, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else @@ -31908,13 +32079,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_omode), ((PyObject *)__pyx_v_oright)}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3678, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3689, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif { - __pyx_t_4 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3678, __pyx_L1_error) + __pyx_t_4 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (__pyx_t_3) { __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = NULL; @@ -31925,7 +32096,7 @@ __Pyx_INCREF(((PyObject *)__pyx_v_oright)); __Pyx_GIVEREF(((PyObject *)__pyx_v_oright)); PyTuple_SET_ITEM(__pyx_t_4, 1+__pyx_t_8, ((PyObject *)__pyx_v_oright)); - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3678, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } @@ -31941,14 +32112,14 @@ } } if (!__pyx_t_7) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3678, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3689, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3678, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3689, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -31957,43 +32128,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3678, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3689, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3678, __pyx_L1_error) + __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3678, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 3678, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 3689, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":3679 + /* "mtrand.pyx":3690 * raise ValueError("left > mode") * if np.any(np.greater(omode, oright)): * raise ValueError("mode > right") # <<<<<<<<<<<<<< * if np.any(np.equal(oleft, oright)): * raise ValueError("left == right") */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__125, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3679, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__127, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3690, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 3679, __pyx_L1_error) + __PYX_ERR(0, 3690, __pyx_L1_error) - /* "mtrand.pyx":3678 + /* "mtrand.pyx":3689 * if np.any(np.greater(oleft, omode)): * raise ValueError("left > mode") * if np.any(np.greater(omode, oright)): # <<<<<<<<<<<<<< @@ -32002,21 +32173,21 @@ */ } - /* "mtrand.pyx":3680 + /* "mtrand.pyx":3691 * if np.any(np.greater(omode, oright)): * raise ValueError("mode > right") * if np.any(np.equal(oleft, oright)): # <<<<<<<<<<<<<< * raise ValueError("left == right") * return cont3_array(self.internal_state, rk_triangular, size, oleft, */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3680, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_any); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3680, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_any); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3680, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3680, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -32034,7 +32205,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_2, ((PyObject *)__pyx_v_oleft), ((PyObject *)__pyx_v_oright)}; - __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3680, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3691, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_9); } else @@ -32042,13 +32213,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_2, ((PyObject *)__pyx_v_oleft), ((PyObject *)__pyx_v_oright)}; - __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3680, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3691, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_9); } else #endif { - __pyx_t_3 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3680, __pyx_L1_error) + __pyx_t_3 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __pyx_t_2 = NULL; @@ -32059,7 +32230,7 @@ __Pyx_INCREF(((PyObject *)__pyx_v_oright)); __Pyx_GIVEREF(((PyObject *)__pyx_v_oright)); PyTuple_SET_ITEM(__pyx_t_3, 1+__pyx_t_8, ((PyObject *)__pyx_v_oright)); - __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_3, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3680, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_3, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } @@ -32075,14 +32246,14 @@ } } if (!__pyx_t_7) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3680, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3691, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_9}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3680, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3691, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -32091,43 +32262,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_9}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3680, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3691, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else #endif { - __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3680, __pyx_L1_error) + __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_t_9); __pyx_t_9 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3680, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 3680, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 3691, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":3681 + /* "mtrand.pyx":3692 * raise ValueError("mode > right") * if np.any(np.equal(oleft, oright)): * raise ValueError("left == right") # <<<<<<<<<<<<<< * return cont3_array(self.internal_state, rk_triangular, size, oleft, * omode, oright, self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__126, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3681, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__128, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3692, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 3681, __pyx_L1_error) + __PYX_ERR(0, 3692, __pyx_L1_error) - /* "mtrand.pyx":3680 + /* "mtrand.pyx":3691 * if np.any(np.greater(omode, oright)): * raise ValueError("mode > right") * if np.any(np.equal(oleft, oright)): # <<<<<<<<<<<<<< @@ -32136,7 +32307,7 @@ */ } - /* "mtrand.pyx":3682 + /* "mtrand.pyx":3693 * if np.any(np.equal(oleft, oright)): * raise ValueError("left == right") * return cont3_array(self.internal_state, rk_triangular, size, oleft, # <<<<<<<<<<<<<< @@ -32145,7 +32316,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3683 + /* "mtrand.pyx":3694 * raise ValueError("left == right") * return cont3_array(self.internal_state, rk_triangular, size, oleft, * omode, oright, self.lock) # <<<<<<<<<<<<<< @@ -32155,21 +32326,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":3682 + /* "mtrand.pyx":3693 * if np.any(np.equal(oleft, oright)): * raise ValueError("left == right") * return cont3_array(self.internal_state, rk_triangular, size, oleft, # <<<<<<<<<<<<<< * omode, oright, self.lock) * */ - __pyx_t_4 = __pyx_f_6mtrand_cont3_array(__pyx_v_self->internal_state, rk_triangular, __pyx_v_size, __pyx_v_oleft, __pyx_v_omode, __pyx_v_oright, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3682, __pyx_L1_error) + __pyx_t_4 = __pyx_f_6mtrand_cont3_array(__pyx_v_self->internal_state, rk_triangular, __pyx_v_size, __pyx_v_oleft, __pyx_v_omode, __pyx_v_oright, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 3693, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; - /* "mtrand.pyx":3592 + /* "mtrand.pyx":3603 * self.lock) * * def triangular(self, left, mode, right, size=None): # <<<<<<<<<<<<<< @@ -32196,7 +32367,7 @@ return __pyx_r; } -/* "mtrand.pyx":3686 +/* "mtrand.pyx":3697 * * # Complicated, discrete distributions: * def binomial(self, n, p, size=None): # <<<<<<<<<<<<<< @@ -32234,23 +32405,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_n)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_n)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_p)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_p)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("binomial", 0, 2, 3, 1); __PYX_ERR(0, 3686, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("binomial", 0, 2, 3, 1); __PYX_ERR(0, 3697, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "binomial") < 0)) __PYX_ERR(0, 3686, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "binomial") < 0)) __PYX_ERR(0, 3697, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -32268,7 +32439,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("binomial", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3686, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("binomial", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3697, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.binomial", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -32300,14 +32471,14 @@ PyObject *__pyx_t_10 = NULL; __Pyx_RefNannySetupContext("binomial", 0); - /* "mtrand.pyx":3775 + /* "mtrand.pyx":3786 * cdef double fp * * on = PyArray_FROM_OTF(n, NPY_LONG, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_n, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3775, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_n, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3786, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -32315,14 +32486,14 @@ __pyx_v_on = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":3776 + /* "mtrand.pyx":3787 * * on = PyArray_FROM_OTF(n, NPY_LONG, NPY_ARRAY_ALIGNED) * op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if on.shape == op.shape == (): */ - __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3776, __pyx_L1_error) + __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3787, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); @@ -32330,49 +32501,49 @@ __pyx_v_op = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":3778 + /* "mtrand.pyx":3789 * op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if on.shape == op.shape == (): # <<<<<<<<<<<<<< * fp = PyFloat_AsDouble(p) * ln = PyInt_AsLong(n) */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_on), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3778, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_on), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_op), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3778, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_op), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3778, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3789, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3778, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3789, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3778, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3789, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { - /* "mtrand.pyx":3779 + /* "mtrand.pyx":3790 * * if on.shape == op.shape == (): * fp = PyFloat_AsDouble(p) # <<<<<<<<<<<<<< * ln = PyInt_AsLong(n) * */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_p); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3779, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_p); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3790, __pyx_L1_error) __pyx_v_fp = __pyx_t_5; - /* "mtrand.pyx":3780 + /* "mtrand.pyx":3791 * if on.shape == op.shape == (): * fp = PyFloat_AsDouble(p) * ln = PyInt_AsLong(n) # <<<<<<<<<<<<<< * * if ln < 0: */ - __pyx_t_6 = PyInt_AsLong(__pyx_v_n); if (unlikely(__pyx_t_6 == -1L && PyErr_Occurred())) __PYX_ERR(0, 3780, __pyx_L1_error) + __pyx_t_6 = PyInt_AsLong(__pyx_v_n); if (unlikely(__pyx_t_6 == ((long)-1L) && PyErr_Occurred())) __PYX_ERR(0, 3791, __pyx_L1_error) __pyx_v_ln = __pyx_t_6; - /* "mtrand.pyx":3782 + /* "mtrand.pyx":3793 * ln = PyInt_AsLong(n) * * if ln < 0: # <<<<<<<<<<<<<< @@ -32380,22 +32551,22 @@ * if fp < 0: */ __pyx_t_4 = ((__pyx_v_ln < 0) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3783 + /* "mtrand.pyx":3794 * * if ln < 0: * raise ValueError("n < 0") # <<<<<<<<<<<<<< * if fp < 0: * raise ValueError("p < 0") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__127, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3783, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__129, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3794, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3783, __pyx_L1_error) + __PYX_ERR(0, 3794, __pyx_L1_error) - /* "mtrand.pyx":3782 + /* "mtrand.pyx":3793 * ln = PyInt_AsLong(n) * * if ln < 0: # <<<<<<<<<<<<<< @@ -32404,7 +32575,7 @@ */ } - /* "mtrand.pyx":3784 + /* "mtrand.pyx":3795 * if ln < 0: * raise ValueError("n < 0") * if fp < 0: # <<<<<<<<<<<<<< @@ -32412,22 +32583,22 @@ * elif fp > 1: */ __pyx_t_4 = ((__pyx_v_fp < 0.0) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3785 + /* "mtrand.pyx":3796 * raise ValueError("n < 0") * if fp < 0: * raise ValueError("p < 0") # <<<<<<<<<<<<<< * elif fp > 1: * raise ValueError("p > 1") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__128, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3785, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__130, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3785, __pyx_L1_error) + __PYX_ERR(0, 3796, __pyx_L1_error) - /* "mtrand.pyx":3784 + /* "mtrand.pyx":3795 * if ln < 0: * raise ValueError("n < 0") * if fp < 0: # <<<<<<<<<<<<<< @@ -32436,7 +32607,7 @@ */ } - /* "mtrand.pyx":3786 + /* "mtrand.pyx":3797 * if fp < 0: * raise ValueError("p < 0") * elif fp > 1: # <<<<<<<<<<<<<< @@ -32444,22 +32615,22 @@ * elif np.isnan(fp): */ __pyx_t_4 = ((__pyx_v_fp > 1.0) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3787 + /* "mtrand.pyx":3798 * raise ValueError("p < 0") * elif fp > 1: * raise ValueError("p > 1") # <<<<<<<<<<<<<< * elif np.isnan(fp): * raise ValueError("p is nan") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__129, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3787, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__131, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3787, __pyx_L1_error) + __PYX_ERR(0, 3798, __pyx_L1_error) - /* "mtrand.pyx":3786 + /* "mtrand.pyx":3797 * if fp < 0: * raise ValueError("p < 0") * elif fp > 1: # <<<<<<<<<<<<<< @@ -32468,19 +32639,19 @@ */ } - /* "mtrand.pyx":3788 + /* "mtrand.pyx":3799 * elif fp > 1: * raise ValueError("p > 1") * elif np.isnan(fp): # <<<<<<<<<<<<<< * raise ValueError("p is nan") * return discnp_array_sc(self.internal_state, rk_binomial, size, ln, */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3788, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_isnan); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3788, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_isnan); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3788, __pyx_L1_error) + __pyx_t_2 = PyFloat_FromDouble(__pyx_v_fp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { @@ -32493,14 +32664,14 @@ } } if (!__pyx_t_7) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3788, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3799, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_2}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3788, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3799, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -32509,43 +32680,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_2}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3788, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3799, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3788, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3788, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3788, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3799, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3789 + /* "mtrand.pyx":3800 * raise ValueError("p > 1") * elif np.isnan(fp): * raise ValueError("p is nan") # <<<<<<<<<<<<<< * return discnp_array_sc(self.internal_state, rk_binomial, size, ln, * fp, self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__130, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3789, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__132, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3800, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3789, __pyx_L1_error) + __PYX_ERR(0, 3800, __pyx_L1_error) - /* "mtrand.pyx":3788 + /* "mtrand.pyx":3799 * elif fp > 1: * raise ValueError("p > 1") * elif np.isnan(fp): # <<<<<<<<<<<<<< @@ -32554,7 +32725,7 @@ */ } - /* "mtrand.pyx":3790 + /* "mtrand.pyx":3801 * elif np.isnan(fp): * raise ValueError("p is nan") * return discnp_array_sc(self.internal_state, rk_binomial, size, ln, # <<<<<<<<<<<<<< @@ -32563,7 +32734,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3791 + /* "mtrand.pyx":3802 * raise ValueError("p is nan") * return discnp_array_sc(self.internal_state, rk_binomial, size, ln, * fp, self.lock) # <<<<<<<<<<<<<< @@ -32573,21 +32744,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":3790 + /* "mtrand.pyx":3801 * elif np.isnan(fp): * raise ValueError("p is nan") * return discnp_array_sc(self.internal_state, rk_binomial, size, ln, # <<<<<<<<<<<<<< * fp, self.lock) * */ - __pyx_t_1 = __pyx_f_6mtrand_discnp_array_sc(__pyx_v_self->internal_state, rk_binomial, __pyx_v_size, __pyx_v_ln, __pyx_v_fp, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3790, __pyx_L1_error) + __pyx_t_1 = __pyx_f_6mtrand_discnp_array_sc(__pyx_v_self->internal_state, rk_binomial, __pyx_v_size, __pyx_v_ln, __pyx_v_fp, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3801, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; - /* "mtrand.pyx":3778 + /* "mtrand.pyx":3789 * op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if on.shape == op.shape == (): # <<<<<<<<<<<<<< @@ -32596,21 +32767,21 @@ */ } - /* "mtrand.pyx":3793 + /* "mtrand.pyx":3804 * fp, self.lock) * * if np.any(np.less(n, 0)): # <<<<<<<<<<<<<< * raise ValueError("n < 0") * if np.any(np.less(p, 0)): */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3793, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3793, __pyx_L1_error) + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3793, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3793, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -32628,7 +32799,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_n, __pyx_int_0}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3793, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3804, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -32636,13 +32807,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_n, __pyx_int_0}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3793, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3804, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_10 = PyTuple_New(2+__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3793, __pyx_L1_error) + __pyx_t_10 = PyTuple_New(2+__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_2); __pyx_t_2 = NULL; @@ -32653,7 +32824,7 @@ __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_10, 1+__pyx_t_9, __pyx_int_0); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3793, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } @@ -32669,14 +32840,14 @@ } } if (!__pyx_t_7) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3793, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3804, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_8)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_8, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3793, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_8, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3804, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -32685,43 +32856,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_8)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_8, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3793, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_8, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3804, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_10 = PyTuple_New(1+1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3793, __pyx_L1_error) + __pyx_t_10 = PyTuple_New(1+1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_10, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_10, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3793, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_10, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } } __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3793, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3804, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3794 + /* "mtrand.pyx":3805 * * if np.any(np.less(n, 0)): * raise ValueError("n < 0") # <<<<<<<<<<<<<< * if np.any(np.less(p, 0)): * raise ValueError("p < 0") */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__131, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3794, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__133, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3805, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 3794, __pyx_L1_error) + __PYX_ERR(0, 3805, __pyx_L1_error) - /* "mtrand.pyx":3793 + /* "mtrand.pyx":3804 * fp, self.lock) * * if np.any(np.less(n, 0)): # <<<<<<<<<<<<<< @@ -32730,21 +32901,21 @@ */ } - /* "mtrand.pyx":3795 + /* "mtrand.pyx":3806 * if np.any(np.less(n, 0)): * raise ValueError("n < 0") * if np.any(np.less(p, 0)): # <<<<<<<<<<<<<< * raise ValueError("p < 0") * if np.any(np.greater(p, 1)): */ - __pyx_t_8 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3795, __pyx_L1_error) + __pyx_t_8 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3806, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); - __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_any); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3795, __pyx_L1_error) + __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_any); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3806, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3795, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3806, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3795, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3806, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; @@ -32762,7 +32933,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_p, __pyx_int_0}; - __pyx_t_8 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3795, __pyx_L1_error) + __pyx_t_8 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3806, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_8); } else @@ -32770,13 +32941,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_p, __pyx_int_0}; - __pyx_t_8 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3795, __pyx_L1_error) + __pyx_t_8 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3806, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_8); } else #endif { - __pyx_t_2 = PyTuple_New(2+__pyx_t_9); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3795, __pyx_L1_error) + __pyx_t_2 = PyTuple_New(2+__pyx_t_9); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3806, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (__pyx_t_3) { __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __pyx_t_3 = NULL; @@ -32787,7 +32958,7 @@ __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_2, 1+__pyx_t_9, __pyx_int_0); - __pyx_t_8 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_2, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3795, __pyx_L1_error) + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_2, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3806, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } @@ -32803,14 +32974,14 @@ } } if (!__pyx_t_7) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_10, __pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3795, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_10, __pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3806, __pyx_L1_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_8}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_10, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3795, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_10, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3806, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; @@ -32819,43 +32990,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_8}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_10, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3795, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_10, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3806, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } else #endif { - __pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3795, __pyx_L1_error) + __pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3806, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_2, 0+1, __pyx_t_8); __pyx_t_8 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_10, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3795, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_10, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3806, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3795, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3806, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3796 + /* "mtrand.pyx":3807 * raise ValueError("n < 0") * if np.any(np.less(p, 0)): * raise ValueError("p < 0") # <<<<<<<<<<<<<< * if np.any(np.greater(p, 1)): * raise ValueError("p > 1") */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__132, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3796, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__134, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3807, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 3796, __pyx_L1_error) + __PYX_ERR(0, 3807, __pyx_L1_error) - /* "mtrand.pyx":3795 + /* "mtrand.pyx":3806 * if np.any(np.less(n, 0)): * raise ValueError("n < 0") * if np.any(np.less(p, 0)): # <<<<<<<<<<<<<< @@ -32864,21 +33035,21 @@ */ } - /* "mtrand.pyx":3797 + /* "mtrand.pyx":3808 * if np.any(np.less(p, 0)): * raise ValueError("p < 0") * if np.any(np.greater(p, 1)): # <<<<<<<<<<<<<< * raise ValueError("p > 1") * return discnp_array(self.internal_state, rk_binomial, size, on, op, */ - __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3797, __pyx_L1_error) + __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3808, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s_any); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3797, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s_any); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3808, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_t_8 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3797, __pyx_L1_error) + __pyx_t_8 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 3808, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_greater); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3797, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_greater); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3808, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = NULL; @@ -32896,7 +33067,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_8, __pyx_v_p, __pyx_int_1}; - __pyx_t_10 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3797, __pyx_L1_error) + __pyx_t_10 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3808, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_10); } else @@ -32904,13 +33075,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_8, __pyx_v_p, __pyx_int_1}; - __pyx_t_10 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3797, __pyx_L1_error) + __pyx_t_10 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3808, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_10); } else #endif { - __pyx_t_3 = PyTuple_New(2+__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3797, __pyx_L1_error) + __pyx_t_3 = PyTuple_New(2+__pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3808, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (__pyx_t_8) { __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_8); __pyx_t_8 = NULL; @@ -32921,7 +33092,7 @@ __Pyx_INCREF(__pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_t_3, 1+__pyx_t_9, __pyx_int_1); - __pyx_t_10 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_3, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3797, __pyx_L1_error) + __pyx_t_10 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_3, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3808, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } @@ -32937,14 +33108,14 @@ } } if (!__pyx_t_7) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_10); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3797, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_10); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3808, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_10}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3797, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3808, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; @@ -32953,43 +33124,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_10}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3797, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3808, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } else #endif { - __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3797, __pyx_L1_error) + __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3808, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_t_10); __pyx_t_10 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3797, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3808, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3797, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3808, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3798 + /* "mtrand.pyx":3809 * raise ValueError("p < 0") * if np.any(np.greater(p, 1)): * raise ValueError("p > 1") # <<<<<<<<<<<<<< * return discnp_array(self.internal_state, rk_binomial, size, on, op, * self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__133, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3798, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__135, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3809, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 3798, __pyx_L1_error) + __PYX_ERR(0, 3809, __pyx_L1_error) - /* "mtrand.pyx":3797 + /* "mtrand.pyx":3808 * if np.any(np.less(p, 0)): * raise ValueError("p < 0") * if np.any(np.greater(p, 1)): # <<<<<<<<<<<<<< @@ -32998,7 +33169,7 @@ */ } - /* "mtrand.pyx":3799 + /* "mtrand.pyx":3810 * if np.any(np.greater(p, 1)): * raise ValueError("p > 1") * return discnp_array(self.internal_state, rk_binomial, size, on, op, # <<<<<<<<<<<<<< @@ -33007,7 +33178,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3800 + /* "mtrand.pyx":3811 * raise ValueError("p > 1") * return discnp_array(self.internal_state, rk_binomial, size, on, op, * self.lock) # <<<<<<<<<<<<<< @@ -33017,21 +33188,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":3799 + /* "mtrand.pyx":3810 * if np.any(np.greater(p, 1)): * raise ValueError("p > 1") * return discnp_array(self.internal_state, rk_binomial, size, on, op, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_discnp_array(__pyx_v_self->internal_state, rk_binomial, __pyx_v_size, __pyx_v_on, __pyx_v_op, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3799, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_discnp_array(__pyx_v_self->internal_state, rk_binomial, __pyx_v_size, __pyx_v_on, __pyx_v_op, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3810, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":3686 + /* "mtrand.pyx":3697 * * # Complicated, discrete distributions: * def binomial(self, n, p, size=None): # <<<<<<<<<<<<<< @@ -33057,7 +33228,7 @@ return __pyx_r; } -/* "mtrand.pyx":3802 +/* "mtrand.pyx":3813 * self.lock) * * def negative_binomial(self, n, p, size=None): # <<<<<<<<<<<<<< @@ -33095,23 +33266,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_n)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_n)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_p)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_p)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("negative_binomial", 0, 2, 3, 1); __PYX_ERR(0, 3802, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("negative_binomial", 0, 2, 3, 1); __PYX_ERR(0, 3813, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "negative_binomial") < 0)) __PYX_ERR(0, 3802, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "negative_binomial") < 0)) __PYX_ERR(0, 3813, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -33129,7 +33300,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("negative_binomial", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3802, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("negative_binomial", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3813, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.negative_binomial", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -33160,14 +33331,14 @@ PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("negative_binomial", 0); - /* "mtrand.pyx":3878 + /* "mtrand.pyx":3889 * cdef double fp * * on = PyArray_FROM_OTF(n, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_n, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3878, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_n, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3889, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -33175,14 +33346,14 @@ __pyx_v_on = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":3879 + /* "mtrand.pyx":3890 * * on = PyArray_FROM_OTF(n, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if on.shape == op.shape == (): */ - __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3879, __pyx_L1_error) + __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3890, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); @@ -33190,49 +33361,49 @@ __pyx_v_op = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":3881 + /* "mtrand.pyx":3892 * op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if on.shape == op.shape == (): # <<<<<<<<<<<<<< * fp = PyFloat_AsDouble(p) * fn = PyFloat_AsDouble(n) */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_on), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3881, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_on), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3892, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_op), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3881, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_op), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3892, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3881, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3892, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3881, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3892, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3881, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3892, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_4) { - /* "mtrand.pyx":3882 + /* "mtrand.pyx":3893 * * if on.shape == op.shape == (): * fp = PyFloat_AsDouble(p) # <<<<<<<<<<<<<< * fn = PyFloat_AsDouble(n) * */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_p); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3882, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_p); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3893, __pyx_L1_error) __pyx_v_fp = __pyx_t_5; - /* "mtrand.pyx":3883 + /* "mtrand.pyx":3894 * if on.shape == op.shape == (): * fp = PyFloat_AsDouble(p) * fn = PyFloat_AsDouble(n) # <<<<<<<<<<<<<< * * if fn <= 0: */ - __pyx_t_5 = PyFloat_AsDouble(__pyx_v_n); if (unlikely(__pyx_t_5 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3883, __pyx_L1_error) + __pyx_t_5 = PyFloat_AsDouble(__pyx_v_n); if (unlikely(__pyx_t_5 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3894, __pyx_L1_error) __pyx_v_fn = __pyx_t_5; - /* "mtrand.pyx":3885 + /* "mtrand.pyx":3896 * fn = PyFloat_AsDouble(n) * * if fn <= 0: # <<<<<<<<<<<<<< @@ -33240,22 +33411,22 @@ * if fp < 0: */ __pyx_t_4 = ((__pyx_v_fn <= 0.0) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3886 + /* "mtrand.pyx":3897 * * if fn <= 0: * raise ValueError("n <= 0") # <<<<<<<<<<<<<< * if fp < 0: * raise ValueError("p < 0") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__134, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3886, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__136, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3897, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3886, __pyx_L1_error) + __PYX_ERR(0, 3897, __pyx_L1_error) - /* "mtrand.pyx":3885 + /* "mtrand.pyx":3896 * fn = PyFloat_AsDouble(n) * * if fn <= 0: # <<<<<<<<<<<<<< @@ -33264,7 +33435,7 @@ */ } - /* "mtrand.pyx":3887 + /* "mtrand.pyx":3898 * if fn <= 0: * raise ValueError("n <= 0") * if fp < 0: # <<<<<<<<<<<<<< @@ -33272,22 +33443,22 @@ * elif fp > 1: */ __pyx_t_4 = ((__pyx_v_fp < 0.0) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3888 + /* "mtrand.pyx":3899 * raise ValueError("n <= 0") * if fp < 0: * raise ValueError("p < 0") # <<<<<<<<<<<<<< * elif fp > 1: * raise ValueError("p > 1") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__135, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3888, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__137, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3899, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3888, __pyx_L1_error) + __PYX_ERR(0, 3899, __pyx_L1_error) - /* "mtrand.pyx":3887 + /* "mtrand.pyx":3898 * if fn <= 0: * raise ValueError("n <= 0") * if fp < 0: # <<<<<<<<<<<<<< @@ -33296,7 +33467,7 @@ */ } - /* "mtrand.pyx":3889 + /* "mtrand.pyx":3900 * if fp < 0: * raise ValueError("p < 0") * elif fp > 1: # <<<<<<<<<<<<<< @@ -33304,22 +33475,22 @@ * return discdd_array_sc(self.internal_state, rk_negative_binomial, */ __pyx_t_4 = ((__pyx_v_fp > 1.0) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3890 + /* "mtrand.pyx":3901 * raise ValueError("p < 0") * elif fp > 1: * raise ValueError("p > 1") # <<<<<<<<<<<<<< * return discdd_array_sc(self.internal_state, rk_negative_binomial, * size, fn, fp, self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__136, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3890, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__138, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3901, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 3890, __pyx_L1_error) + __PYX_ERR(0, 3901, __pyx_L1_error) - /* "mtrand.pyx":3889 + /* "mtrand.pyx":3900 * if fp < 0: * raise ValueError("p < 0") * elif fp > 1: # <<<<<<<<<<<<<< @@ -33328,7 +33499,7 @@ */ } - /* "mtrand.pyx":3891 + /* "mtrand.pyx":3902 * elif fp > 1: * raise ValueError("p > 1") * return discdd_array_sc(self.internal_state, rk_negative_binomial, # <<<<<<<<<<<<<< @@ -33337,7 +33508,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3892 + /* "mtrand.pyx":3903 * raise ValueError("p > 1") * return discdd_array_sc(self.internal_state, rk_negative_binomial, * size, fn, fp, self.lock) # <<<<<<<<<<<<<< @@ -33347,21 +33518,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":3891 + /* "mtrand.pyx":3902 * elif fp > 1: * raise ValueError("p > 1") * return discdd_array_sc(self.internal_state, rk_negative_binomial, # <<<<<<<<<<<<<< * size, fn, fp, self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_discdd_array_sc(__pyx_v_self->internal_state, rk_negative_binomial, __pyx_v_size, __pyx_v_fn, __pyx_v_fp, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3891, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_discdd_array_sc(__pyx_v_self->internal_state, rk_negative_binomial, __pyx_v_size, __pyx_v_fn, __pyx_v_fp, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3902, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":3881 + /* "mtrand.pyx":3892 * op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if on.shape == op.shape == (): # <<<<<<<<<<<<<< @@ -33370,21 +33541,21 @@ */ } - /* "mtrand.pyx":3894 + /* "mtrand.pyx":3905 * size, fn, fp, self.lock) * * if np.any(np.less_equal(n, 0)): # <<<<<<<<<<<<<< * raise ValueError("n <= 0") * if np.any(np.less(p, 0)): */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3894, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3905, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3894, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3905, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3894, __pyx_L1_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3905, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3894, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3905, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; @@ -33402,7 +33573,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_v_n, __pyx_int_0}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3894, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3905, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -33410,13 +33581,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_v_n, __pyx_int_0}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3894, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3905, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3894, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3905, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -33427,7 +33598,7 @@ __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_int_0); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3894, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3905, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -33443,14 +33614,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3894, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3905, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3894, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3905, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -33459,43 +33630,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3894, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3905, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3894, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3905, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3894, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3905, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3894, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3905, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3895 + /* "mtrand.pyx":3906 * * if np.any(np.less_equal(n, 0)): * raise ValueError("n <= 0") # <<<<<<<<<<<<<< * if np.any(np.less(p, 0)): * raise ValueError("p < 0") */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__137, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3895, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__139, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3906, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 3895, __pyx_L1_error) + __PYX_ERR(0, 3906, __pyx_L1_error) - /* "mtrand.pyx":3894 + /* "mtrand.pyx":3905 * size, fn, fp, self.lock) * * if np.any(np.less_equal(n, 0)): # <<<<<<<<<<<<<< @@ -33504,21 +33675,21 @@ */ } - /* "mtrand.pyx":3896 + /* "mtrand.pyx":3907 * if np.any(np.less_equal(n, 0)): * raise ValueError("n <= 0") * if np.any(np.less(p, 0)): # <<<<<<<<<<<<<< * raise ValueError("p < 0") * if np.any(np.greater(p, 1)): */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3896, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3896, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3896, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3896, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; @@ -33536,7 +33707,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_p, __pyx_int_0}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3896, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3907, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -33544,13 +33715,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_p, __pyx_int_0}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3896, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3907, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3896, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_3) { __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); __pyx_t_3 = NULL; @@ -33561,7 +33732,7 @@ __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_8, __pyx_int_0); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3896, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } @@ -33577,14 +33748,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3896, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3907, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3896, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3907, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -33593,43 +33764,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3896, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3907, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3896, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3896, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3896, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3907, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3897 + /* "mtrand.pyx":3908 * raise ValueError("n <= 0") * if np.any(np.less(p, 0)): * raise ValueError("p < 0") # <<<<<<<<<<<<<< * if np.any(np.greater(p, 1)): * raise ValueError("p > 1") */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__138, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3897, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__140, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3908, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 3897, __pyx_L1_error) + __PYX_ERR(0, 3908, __pyx_L1_error) - /* "mtrand.pyx":3896 + /* "mtrand.pyx":3907 * if np.any(np.less_equal(n, 0)): * raise ValueError("n <= 0") * if np.any(np.less(p, 0)): # <<<<<<<<<<<<<< @@ -33638,21 +33809,21 @@ */ } - /* "mtrand.pyx":3898 + /* "mtrand.pyx":3909 * if np.any(np.less(p, 0)): * raise ValueError("p < 0") * if np.any(np.greater(p, 1)): # <<<<<<<<<<<<<< * raise ValueError("p > 1") * return discdd_array(self.internal_state, rk_negative_binomial, size, */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3898, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3909, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_any); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3898, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_any); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3909, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3898, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3909, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_greater); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3898, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_greater); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3909, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; @@ -33670,7 +33841,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_1, __pyx_v_p, __pyx_int_1}; - __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3898, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3909, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_9); } else @@ -33678,13 +33849,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_1, __pyx_v_p, __pyx_int_1}; - __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3898, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3909, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_9); } else #endif { - __pyx_t_3 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3898, __pyx_L1_error) + __pyx_t_3 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3909, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (__pyx_t_1) { __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __pyx_t_1 = NULL; @@ -33695,7 +33866,7 @@ __Pyx_INCREF(__pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_t_3, 1+__pyx_t_8, __pyx_int_1); - __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_3, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3898, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_3, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3909, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } @@ -33711,14 +33882,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_9); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3898, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_9); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3909, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_9}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3898, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3909, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -33727,43 +33898,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_9}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3898, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3909, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else #endif { - __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3898, __pyx_L1_error) + __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 3909, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_t_9); __pyx_t_9 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3898, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3909, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3898, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(0, 3909, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":3899 + /* "mtrand.pyx":3910 * raise ValueError("p < 0") * if np.any(np.greater(p, 1)): * raise ValueError("p > 1") # <<<<<<<<<<<<<< * return discdd_array(self.internal_state, rk_negative_binomial, size, * on, op, self.lock) */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__139, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3899, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__141, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3910, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 3899, __pyx_L1_error) + __PYX_ERR(0, 3910, __pyx_L1_error) - /* "mtrand.pyx":3898 + /* "mtrand.pyx":3909 * if np.any(np.less(p, 0)): * raise ValueError("p < 0") * if np.any(np.greater(p, 1)): # <<<<<<<<<<<<<< @@ -33772,7 +33943,7 @@ */ } - /* "mtrand.pyx":3900 + /* "mtrand.pyx":3911 * if np.any(np.greater(p, 1)): * raise ValueError("p > 1") * return discdd_array(self.internal_state, rk_negative_binomial, size, # <<<<<<<<<<<<<< @@ -33781,7 +33952,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3901 + /* "mtrand.pyx":3912 * raise ValueError("p > 1") * return discdd_array(self.internal_state, rk_negative_binomial, size, * on, op, self.lock) # <<<<<<<<<<<<<< @@ -33791,21 +33962,21 @@ __pyx_t_2 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_2); - /* "mtrand.pyx":3900 + /* "mtrand.pyx":3911 * if np.any(np.greater(p, 1)): * raise ValueError("p > 1") * return discdd_array(self.internal_state, rk_negative_binomial, size, # <<<<<<<<<<<<<< * on, op, self.lock) * */ - __pyx_t_6 = __pyx_f_6mtrand_discdd_array(__pyx_v_self->internal_state, rk_negative_binomial, __pyx_v_size, __pyx_v_on, __pyx_v_op, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3900, __pyx_L1_error) + __pyx_t_6 = __pyx_f_6mtrand_discdd_array(__pyx_v_self->internal_state, rk_negative_binomial, __pyx_v_size, __pyx_v_on, __pyx_v_op, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3911, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; - /* "mtrand.pyx":3802 + /* "mtrand.pyx":3813 * self.lock) * * def negative_binomial(self, n, p, size=None): # <<<<<<<<<<<<<< @@ -33831,7 +34002,7 @@ return __pyx_r; } -/* "mtrand.pyx":3903 +/* "mtrand.pyx":3914 * on, op, self.lock) * * def poisson(self, lam=1.0, size=None): # <<<<<<<<<<<<<< @@ -33868,18 +34039,18 @@ switch (pos_args) { case 0: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_lam); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_lam); if (value) { values[0] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "poisson") < 0)) __PYX_ERR(0, 3903, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "poisson") < 0)) __PYX_ERR(0, 3914, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -33896,7 +34067,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("poisson", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3903, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("poisson", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3914, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.poisson", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -33926,14 +34097,14 @@ PyObject *__pyx_t_10 = NULL; __Pyx_RefNannySetupContext("poisson", 0); - /* "mtrand.pyx":3972 + /* "mtrand.pyx":3983 * cdef double flam * * olam = PyArray_FROM_OTF(lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if olam.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3972, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -33941,57 +34112,57 @@ __pyx_v_olam = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":3974 + /* "mtrand.pyx":3985 * olam = PyArray_FROM_OTF(lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if olam.shape == (): # <<<<<<<<<<<<<< * flam = PyFloat_AsDouble(lam) * */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_olam), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3974, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_olam), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3985, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3974, __pyx_L1_error) + __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3985, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3974, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3985, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_3) { - /* "mtrand.pyx":3975 + /* "mtrand.pyx":3986 * * if olam.shape == (): * flam = PyFloat_AsDouble(lam) # <<<<<<<<<<<<<< * * if lam < 0: */ - __pyx_t_4 = PyFloat_AsDouble(__pyx_v_lam); if (unlikely(__pyx_t_4 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 3975, __pyx_L1_error) + __pyx_t_4 = PyFloat_AsDouble(__pyx_v_lam); if (unlikely(__pyx_t_4 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 3986, __pyx_L1_error) __pyx_v_flam = __pyx_t_4; - /* "mtrand.pyx":3977 + /* "mtrand.pyx":3988 * flam = PyFloat_AsDouble(lam) * * if lam < 0: # <<<<<<<<<<<<<< * raise ValueError("lam < 0") * if lam > self.poisson_lam_max: */ - __pyx_t_1 = PyObject_RichCompare(__pyx_v_lam, __pyx_int_0, Py_LT); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3977, __pyx_L1_error) - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3977, __pyx_L1_error) + __pyx_t_1 = PyObject_RichCompare(__pyx_v_lam, __pyx_int_0, Py_LT); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3988, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3988, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":3978 + /* "mtrand.pyx":3989 * * if lam < 0: * raise ValueError("lam < 0") # <<<<<<<<<<<<<< * if lam > self.poisson_lam_max: * raise ValueError("lam value too large") */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__140, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3978, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__142, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3989, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 3978, __pyx_L1_error) + __PYX_ERR(0, 3989, __pyx_L1_error) - /* "mtrand.pyx":3977 + /* "mtrand.pyx":3988 * flam = PyFloat_AsDouble(lam) * * if lam < 0: # <<<<<<<<<<<<<< @@ -34000,35 +34171,35 @@ */ } - /* "mtrand.pyx":3979 + /* "mtrand.pyx":3990 * if lam < 0: * raise ValueError("lam < 0") * if lam > self.poisson_lam_max: # <<<<<<<<<<<<<< * raise ValueError("lam value too large") * return discd_array_sc(self.internal_state, rk_poisson, size, flam, */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_poisson_lam_max); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3979, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_poisson_lam_max); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3990, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_RichCompare(__pyx_v_lam, __pyx_t_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3979, __pyx_L1_error) + __pyx_t_2 = PyObject_RichCompare(__pyx_v_lam, __pyx_t_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3990, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3979, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3990, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":3980 + /* "mtrand.pyx":3991 * raise ValueError("lam < 0") * if lam > self.poisson_lam_max: * raise ValueError("lam value too large") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_poisson, size, flam, * self.lock) */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__141, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3980, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__143, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3991, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 3980, __pyx_L1_error) + __PYX_ERR(0, 3991, __pyx_L1_error) - /* "mtrand.pyx":3979 + /* "mtrand.pyx":3990 * if lam < 0: * raise ValueError("lam < 0") * if lam > self.poisson_lam_max: # <<<<<<<<<<<<<< @@ -34037,7 +34208,7 @@ */ } - /* "mtrand.pyx":3981 + /* "mtrand.pyx":3992 * if lam > self.poisson_lam_max: * raise ValueError("lam value too large") * return discd_array_sc(self.internal_state, rk_poisson, size, flam, # <<<<<<<<<<<<<< @@ -34046,7 +34217,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3982 + /* "mtrand.pyx":3993 * raise ValueError("lam value too large") * return discd_array_sc(self.internal_state, rk_poisson, size, flam, * self.lock) # <<<<<<<<<<<<<< @@ -34056,21 +34227,21 @@ __pyx_t_2 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_2); - /* "mtrand.pyx":3981 + /* "mtrand.pyx":3992 * if lam > self.poisson_lam_max: * raise ValueError("lam value too large") * return discd_array_sc(self.internal_state, rk_poisson, size, flam, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_1 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_poisson, __pyx_v_size, __pyx_v_flam, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3981, __pyx_L1_error) + __pyx_t_1 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_poisson, __pyx_v_size, __pyx_v_flam, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; - /* "mtrand.pyx":3974 + /* "mtrand.pyx":3985 * olam = PyArray_FROM_OTF(lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if olam.shape == (): # <<<<<<<<<<<<<< @@ -34079,21 +34250,21 @@ */ } - /* "mtrand.pyx":3984 + /* "mtrand.pyx":3995 * self.lock) * * if np.any(np.less(olam, 0)): # <<<<<<<<<<<<<< * raise ValueError("lam < 0") * if np.any(np.greater(olam, self.poisson_lam_max)): */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3984, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_any); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3984, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_any); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3984, __pyx_L1_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 3995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3984, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; @@ -34111,7 +34282,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_olam), __pyx_int_0}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3984, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3995, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_2); } else @@ -34119,13 +34290,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_olam), __pyx_int_0}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3984, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3995, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3984, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -34136,7 +34307,7 @@ __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_int_0); - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3984, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -34152,14 +34323,14 @@ } } if (!__pyx_t_7) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3984, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3995, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3984, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3995, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -34168,43 +34339,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3984, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3995, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3984, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3984, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3984, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3995, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":3985 + /* "mtrand.pyx":3996 * * if np.any(np.less(olam, 0)): * raise ValueError("lam < 0") # <<<<<<<<<<<<<< * if np.any(np.greater(olam, self.poisson_lam_max)): * raise ValueError("lam value too large.") */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__142, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3985, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__144, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3996, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 3985, __pyx_L1_error) + __PYX_ERR(0, 3996, __pyx_L1_error) - /* "mtrand.pyx":3984 + /* "mtrand.pyx":3995 * self.lock) * * if np.any(np.less(olam, 0)): # <<<<<<<<<<<<<< @@ -34213,24 +34384,24 @@ */ } - /* "mtrand.pyx":3986 + /* "mtrand.pyx":3997 * if np.any(np.less(olam, 0)): * raise ValueError("lam < 0") * if np.any(np.greater(olam, self.poisson_lam_max)): # <<<<<<<<<<<<<< * raise ValueError("lam value too large.") * return discd_array(self.internal_state, rk_poisson, size, olam, */ - __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3986, __pyx_L1_error) + __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3997, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3986, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3997, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3986, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3997, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_greater); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3986, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_greater); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 3997, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_poisson_lam_max); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3986, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_poisson_lam_max); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3997, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; __pyx_t_8 = 0; @@ -34247,7 +34418,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_olam), __pyx_t_2}; - __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3986, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3997, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -34256,14 +34427,14 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_olam), __pyx_t_2}; - __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3986, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3997, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_10 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3986, __pyx_L1_error) + __pyx_t_10 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3997, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -34274,7 +34445,7 @@ __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_10, 1+__pyx_t_8, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_10, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3986, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_10, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 3997, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } @@ -34290,14 +34461,14 @@ } } if (!__pyx_t_7) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3986, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3997, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_5}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3986, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3997, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; @@ -34306,43 +34477,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_5}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3986, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3997, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else #endif { - __pyx_t_10 = PyTuple_New(1+1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3986, __pyx_L1_error) + __pyx_t_10 = PyTuple_New(1+1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 3997, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0+1, __pyx_t_5); __pyx_t_5 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_10, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3986, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_10, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3997, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3986, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 3997, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":3987 + /* "mtrand.pyx":3998 * raise ValueError("lam < 0") * if np.any(np.greater(olam, self.poisson_lam_max)): * raise ValueError("lam value too large.") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_poisson, size, olam, * self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__143, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3987, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__145, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3998, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 3987, __pyx_L1_error) + __PYX_ERR(0, 3998, __pyx_L1_error) - /* "mtrand.pyx":3986 + /* "mtrand.pyx":3997 * if np.any(np.less(olam, 0)): * raise ValueError("lam < 0") * if np.any(np.greater(olam, self.poisson_lam_max)): # <<<<<<<<<<<<<< @@ -34351,7 +34522,7 @@ */ } - /* "mtrand.pyx":3988 + /* "mtrand.pyx":3999 * if np.any(np.greater(olam, self.poisson_lam_max)): * raise ValueError("lam value too large.") * return discd_array(self.internal_state, rk_poisson, size, olam, # <<<<<<<<<<<<<< @@ -34360,7 +34531,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":3989 + /* "mtrand.pyx":4000 * raise ValueError("lam value too large.") * return discd_array(self.internal_state, rk_poisson, size, olam, * self.lock) # <<<<<<<<<<<<<< @@ -34370,21 +34541,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":3988 + /* "mtrand.pyx":3999 * if np.any(np.greater(olam, self.poisson_lam_max)): * raise ValueError("lam value too large.") * return discd_array(self.internal_state, rk_poisson, size, olam, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_9 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_poisson, __pyx_v_size, __pyx_v_olam, __pyx_t_1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3988, __pyx_L1_error) + __pyx_t_9 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_poisson, __pyx_v_size, __pyx_v_olam, __pyx_t_1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 3999, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; - /* "mtrand.pyx":3903 + /* "mtrand.pyx":3914 * on, op, self.lock) * * def poisson(self, lam=1.0, size=None): # <<<<<<<<<<<<<< @@ -34410,7 +34581,7 @@ return __pyx_r; } -/* "mtrand.pyx":3991 +/* "mtrand.pyx":4002 * self.lock) * * def zipf(self, a, size=None): # <<<<<<<<<<<<<< @@ -34445,17 +34616,17 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_a)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_a)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "zipf") < 0)) __PYX_ERR(0, 3991, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "zipf") < 0)) __PYX_ERR(0, 4002, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -34471,7 +34642,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("zipf", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 3991, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("zipf", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 4002, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.zipf", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -34498,16 +34669,17 @@ PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; + int __pyx_t_10; __Pyx_RefNannySetupContext("zipf", 0); - /* "mtrand.pyx":4068 + /* "mtrand.pyx":4079 * cdef double fa * * oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if oa.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4068, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_a, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4079, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -34515,97 +34687,97 @@ __pyx_v_oa = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":4070 + /* "mtrand.pyx":4081 * oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oa.shape == (): # <<<<<<<<<<<<<< * fa = PyFloat_AsDouble(a) * */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oa), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4070, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_oa), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4081, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4070, __pyx_L1_error) + __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4081, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4070, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4081, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_3) { - /* "mtrand.pyx":4071 + /* "mtrand.pyx":4082 * * if oa.shape == (): * fa = PyFloat_AsDouble(a) # <<<<<<<<<<<<<< * - * if fa <= 1.0: + * # use logic that ensures NaN is rejected. */ - __pyx_t_4 = PyFloat_AsDouble(__pyx_v_a); if (unlikely(__pyx_t_4 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 4071, __pyx_L1_error) + __pyx_t_4 = PyFloat_AsDouble(__pyx_v_a); if (unlikely(__pyx_t_4 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 4082, __pyx_L1_error) __pyx_v_fa = __pyx_t_4; - /* "mtrand.pyx":4073 - * fa = PyFloat_AsDouble(a) + /* "mtrand.pyx":4085 * - * if fa <= 1.0: # <<<<<<<<<<<<<< - * raise ValueError("a <= 1.0") + * # use logic that ensures NaN is rejected. + * if not fa > 1.0: # <<<<<<<<<<<<<< + * raise ValueError("'a' must be a valid float > 1.0") * return discd_array_sc(self.internal_state, rk_zipf, size, fa, */ - __pyx_t_3 = ((__pyx_v_fa <= 1.0) != 0); - if (__pyx_t_3) { + __pyx_t_3 = ((!((__pyx_v_fa > 1.0) != 0)) != 0); + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":4074 - * - * if fa <= 1.0: - * raise ValueError("a <= 1.0") # <<<<<<<<<<<<<< + /* "mtrand.pyx":4086 + * # use logic that ensures NaN is rejected. + * if not fa > 1.0: + * raise ValueError("'a' must be a valid float > 1.0") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_zipf, size, fa, * self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__144, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4074, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__146, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4086, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 4074, __pyx_L1_error) + __PYX_ERR(0, 4086, __pyx_L1_error) - /* "mtrand.pyx":4073 - * fa = PyFloat_AsDouble(a) + /* "mtrand.pyx":4085 * - * if fa <= 1.0: # <<<<<<<<<<<<<< - * raise ValueError("a <= 1.0") + * # use logic that ensures NaN is rejected. + * if not fa > 1.0: # <<<<<<<<<<<<<< + * raise ValueError("'a' must be a valid float > 1.0") * return discd_array_sc(self.internal_state, rk_zipf, size, fa, */ } - /* "mtrand.pyx":4075 - * if fa <= 1.0: - * raise ValueError("a <= 1.0") + /* "mtrand.pyx":4087 + * if not fa > 1.0: + * raise ValueError("'a' must be a valid float > 1.0") * return discd_array_sc(self.internal_state, rk_zipf, size, fa, # <<<<<<<<<<<<<< * self.lock) * */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":4076 - * raise ValueError("a <= 1.0") + /* "mtrand.pyx":4088 + * raise ValueError("'a' must be a valid float > 1.0") * return discd_array_sc(self.internal_state, rk_zipf, size, fa, * self.lock) # <<<<<<<<<<<<<< * - * if np.any(np.less_equal(oa, 1.0)): + * # use logic that ensures NaN is rejected. */ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":4075 - * if fa <= 1.0: - * raise ValueError("a <= 1.0") + /* "mtrand.pyx":4087 + * if not fa > 1.0: + * raise ValueError("'a' must be a valid float > 1.0") * return discd_array_sc(self.internal_state, rk_zipf, size, fa, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_zipf, __pyx_v_size, __pyx_v_fa, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4075, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_zipf, __pyx_v_size, __pyx_v_fa, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4087, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":4070 + /* "mtrand.pyx":4081 * oa = PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if oa.shape == (): # <<<<<<<<<<<<<< @@ -34614,21 +34786,21 @@ */ } - /* "mtrand.pyx":4078 - * self.lock) + /* "mtrand.pyx":4091 * - * if np.any(np.less_equal(oa, 1.0)): # <<<<<<<<<<<<<< - * raise ValueError("a <= 1.0") + * # use logic that ensures NaN is rejected. + * if not np.all(np.greater(oa, 1.0)): # <<<<<<<<<<<<<< + * raise ValueError("'a' must contain valid floats > 1.0") * return discd_array(self.internal_state, rk_zipf, size, oa, self.lock) */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4078, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4091, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4078, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_all); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4091, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4078, __pyx_L1_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4091, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4078, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_greater); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4091, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; @@ -34646,7 +34818,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_oa), __pyx_float_1_0}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4078, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4091, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -34654,13 +34826,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_oa), __pyx_float_1_0}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4078, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4091, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4078, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4091, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -34671,7 +34843,7 @@ __Pyx_INCREF(__pyx_float_1_0); __Pyx_GIVEREF(__pyx_float_1_0); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_float_1_0); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4078, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4091, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -34687,14 +34859,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4078, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4091, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4078, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4091, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -34703,54 +34875,55 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4078, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4091, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4078, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4091, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4078, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4091, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4078, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4091, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_3) { + __pyx_t_10 = ((!__pyx_t_3) != 0); + if (unlikely(__pyx_t_10)) { - /* "mtrand.pyx":4079 - * - * if np.any(np.less_equal(oa, 1.0)): - * raise ValueError("a <= 1.0") # <<<<<<<<<<<<<< + /* "mtrand.pyx":4092 + * # use logic that ensures NaN is rejected. + * if not np.all(np.greater(oa, 1.0)): + * raise ValueError("'a' must contain valid floats > 1.0") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_zipf, size, oa, self.lock) * */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__145, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4079, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__147, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4092, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 4079, __pyx_L1_error) + __PYX_ERR(0, 4092, __pyx_L1_error) - /* "mtrand.pyx":4078 - * self.lock) + /* "mtrand.pyx":4091 * - * if np.any(np.less_equal(oa, 1.0)): # <<<<<<<<<<<<<< - * raise ValueError("a <= 1.0") + * # use logic that ensures NaN is rejected. + * if not np.all(np.greater(oa, 1.0)): # <<<<<<<<<<<<<< + * raise ValueError("'a' must contain valid floats > 1.0") * return discd_array(self.internal_state, rk_zipf, size, oa, self.lock) */ } - /* "mtrand.pyx":4080 - * if np.any(np.less_equal(oa, 1.0)): - * raise ValueError("a <= 1.0") + /* "mtrand.pyx":4093 + * if not np.all(np.greater(oa, 1.0)): + * raise ValueError("'a' must contain valid floats > 1.0") * return discd_array(self.internal_state, rk_zipf, size, oa, self.lock) # <<<<<<<<<<<<<< * * def geometric(self, p, size=None): @@ -34758,14 +34931,14 @@ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_2); - __pyx_t_5 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_zipf, __pyx_v_size, __pyx_v_oa, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4080, __pyx_L1_error) + __pyx_t_5 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_zipf, __pyx_v_size, __pyx_v_oa, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4093, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; - /* "mtrand.pyx":3991 + /* "mtrand.pyx":4002 * self.lock) * * def zipf(self, a, size=None): # <<<<<<<<<<<<<< @@ -34790,7 +34963,7 @@ return __pyx_r; } -/* "mtrand.pyx":4082 +/* "mtrand.pyx":4095 * return discd_array(self.internal_state, rk_zipf, size, oa, self.lock) * * def geometric(self, p, size=None): # <<<<<<<<<<<<<< @@ -34825,17 +34998,17 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_p)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_p)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "geometric") < 0)) __PYX_ERR(0, 4082, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "geometric") < 0)) __PYX_ERR(0, 4095, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -34851,7 +35024,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("geometric", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 4082, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("geometric", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 4095, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.geometric", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -34880,14 +35053,14 @@ PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("geometric", 0); - /* "mtrand.pyx":4131 + /* "mtrand.pyx":4144 * cdef double fp * * op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if op.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4131, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4144, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -34895,32 +35068,32 @@ __pyx_v_op = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":4133 + /* "mtrand.pyx":4146 * op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if op.shape == (): # <<<<<<<<<<<<<< * fp = PyFloat_AsDouble(p) * */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_op), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4133, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_op), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4133, __pyx_L1_error) + __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4146, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4133, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4146, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_3) { - /* "mtrand.pyx":4134 + /* "mtrand.pyx":4147 * * if op.shape == (): * fp = PyFloat_AsDouble(p) # <<<<<<<<<<<<<< * * if fp < 0.0: */ - __pyx_t_4 = PyFloat_AsDouble(__pyx_v_p); if (unlikely(__pyx_t_4 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 4134, __pyx_L1_error) + __pyx_t_4 = PyFloat_AsDouble(__pyx_v_p); if (unlikely(__pyx_t_4 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 4147, __pyx_L1_error) __pyx_v_fp = __pyx_t_4; - /* "mtrand.pyx":4136 + /* "mtrand.pyx":4149 * fp = PyFloat_AsDouble(p) * * if fp < 0.0: # <<<<<<<<<<<<<< @@ -34928,22 +35101,22 @@ * if fp > 1.0: */ __pyx_t_3 = ((__pyx_v_fp < 0.0) != 0); - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":4137 + /* "mtrand.pyx":4150 * * if fp < 0.0: * raise ValueError("p < 0.0") # <<<<<<<<<<<<<< * if fp > 1.0: * raise ValueError("p > 1.0") */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__146, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4137, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__148, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 4137, __pyx_L1_error) + __PYX_ERR(0, 4150, __pyx_L1_error) - /* "mtrand.pyx":4136 + /* "mtrand.pyx":4149 * fp = PyFloat_AsDouble(p) * * if fp < 0.0: # <<<<<<<<<<<<<< @@ -34952,7 +35125,7 @@ */ } - /* "mtrand.pyx":4138 + /* "mtrand.pyx":4151 * if fp < 0.0: * raise ValueError("p < 0.0") * if fp > 1.0: # <<<<<<<<<<<<<< @@ -34960,22 +35133,22 @@ * return discd_array_sc(self.internal_state, rk_geometric, size, fp, */ __pyx_t_3 = ((__pyx_v_fp > 1.0) != 0); - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":4139 + /* "mtrand.pyx":4152 * raise ValueError("p < 0.0") * if fp > 1.0: * raise ValueError("p > 1.0") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_geometric, size, fp, * self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__147, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4139, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__149, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4152, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 4139, __pyx_L1_error) + __PYX_ERR(0, 4152, __pyx_L1_error) - /* "mtrand.pyx":4138 + /* "mtrand.pyx":4151 * if fp < 0.0: * raise ValueError("p < 0.0") * if fp > 1.0: # <<<<<<<<<<<<<< @@ -34984,7 +35157,7 @@ */ } - /* "mtrand.pyx":4140 + /* "mtrand.pyx":4153 * if fp > 1.0: * raise ValueError("p > 1.0") * return discd_array_sc(self.internal_state, rk_geometric, size, fp, # <<<<<<<<<<<<<< @@ -34993,7 +35166,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":4141 + /* "mtrand.pyx":4154 * raise ValueError("p > 1.0") * return discd_array_sc(self.internal_state, rk_geometric, size, fp, * self.lock) # <<<<<<<<<<<<<< @@ -35003,21 +35176,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":4140 + /* "mtrand.pyx":4153 * if fp > 1.0: * raise ValueError("p > 1.0") * return discd_array_sc(self.internal_state, rk_geometric, size, fp, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_geometric, __pyx_v_size, __pyx_v_fp, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4140, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_geometric, __pyx_v_size, __pyx_v_fp, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":4133 + /* "mtrand.pyx":4146 * op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if op.shape == (): # <<<<<<<<<<<<<< @@ -35026,21 +35199,21 @@ */ } - /* "mtrand.pyx":4143 + /* "mtrand.pyx":4156 * self.lock) * * if np.any(np.less(op, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("p < 0.0") * if np.any(np.greater(op, 1.0)): */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4143, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4143, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4143, __pyx_L1_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4143, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; @@ -35058,7 +35231,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_op), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4143, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4156, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -35066,13 +35239,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_op), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4143, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4156, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4143, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -35083,7 +35256,7 @@ __Pyx_INCREF(__pyx_float_0_0); __Pyx_GIVEREF(__pyx_float_0_0); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_float_0_0); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4143, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -35099,14 +35272,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4143, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4156, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4143, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4156, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -35115,43 +35288,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4143, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4156, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4143, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4143, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4143, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4156, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":4144 + /* "mtrand.pyx":4157 * * if np.any(np.less(op, 0.0)): * raise ValueError("p < 0.0") # <<<<<<<<<<<<<< * if np.any(np.greater(op, 1.0)): * raise ValueError("p > 1.0") */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__148, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4144, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__150, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4157, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 4144, __pyx_L1_error) + __PYX_ERR(0, 4157, __pyx_L1_error) - /* "mtrand.pyx":4143 + /* "mtrand.pyx":4156 * self.lock) * * if np.any(np.less(op, 0.0)): # <<<<<<<<<<<<<< @@ -35160,21 +35333,21 @@ */ } - /* "mtrand.pyx":4145 + /* "mtrand.pyx":4158 * if np.any(np.less(op, 0.0)): * raise ValueError("p < 0.0") * if np.any(np.greater(op, 1.0)): # <<<<<<<<<<<<<< * raise ValueError("p > 1.0") * return discd_array(self.internal_state, rk_geometric, size, op, */ - __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4145, __pyx_L1_error) + __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4145, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4145, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_greater); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4145, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_greater); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; @@ -35192,7 +35365,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_1, ((PyObject *)__pyx_v_op), __pyx_float_1_0}; - __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4145, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4158, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_5); } else @@ -35200,13 +35373,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_1, ((PyObject *)__pyx_v_op), __pyx_float_1_0}; - __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4145, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4158, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_5); } else #endif { - __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4145, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_1) { __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); __pyx_t_1 = NULL; @@ -35217,7 +35390,7 @@ __Pyx_INCREF(__pyx_float_1_0); __Pyx_GIVEREF(__pyx_float_1_0); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_8, __pyx_float_1_0); - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4145, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } @@ -35233,14 +35406,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4145, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4158, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_5}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4145, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4158, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; @@ -35249,43 +35422,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_5}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4145, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4158, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else #endif { - __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4145, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_5); __pyx_t_5 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4145, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4145, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4158, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":4146 + /* "mtrand.pyx":4159 * raise ValueError("p < 0.0") * if np.any(np.greater(op, 1.0)): * raise ValueError("p > 1.0") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_geometric, size, op, * self.lock) */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__149, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4146, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__151, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 4146, __pyx_L1_error) + __PYX_ERR(0, 4159, __pyx_L1_error) - /* "mtrand.pyx":4145 + /* "mtrand.pyx":4158 * if np.any(np.less(op, 0.0)): * raise ValueError("p < 0.0") * if np.any(np.greater(op, 1.0)): # <<<<<<<<<<<<<< @@ -35294,7 +35467,7 @@ */ } - /* "mtrand.pyx":4147 + /* "mtrand.pyx":4160 * if np.any(np.greater(op, 1.0)): * raise ValueError("p > 1.0") * return discd_array(self.internal_state, rk_geometric, size, op, # <<<<<<<<<<<<<< @@ -35303,7 +35476,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":4148 + /* "mtrand.pyx":4161 * raise ValueError("p > 1.0") * return discd_array(self.internal_state, rk_geometric, size, op, * self.lock) # <<<<<<<<<<<<<< @@ -35313,21 +35486,21 @@ __pyx_t_2 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_2); - /* "mtrand.pyx":4147 + /* "mtrand.pyx":4160 * if np.any(np.greater(op, 1.0)): * raise ValueError("p > 1.0") * return discd_array(self.internal_state, rk_geometric, size, op, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_9 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_geometric, __pyx_v_size, __pyx_v_op, __pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4147, __pyx_L1_error) + __pyx_t_9 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_geometric, __pyx_v_size, __pyx_v_op, __pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; - /* "mtrand.pyx":4082 + /* "mtrand.pyx":4095 * return discd_array(self.internal_state, rk_zipf, size, oa, self.lock) * * def geometric(self, p, size=None): # <<<<<<<<<<<<<< @@ -35352,7 +35525,7 @@ return __pyx_r; } -/* "mtrand.pyx":4150 +/* "mtrand.pyx":4163 * self.lock) * * def hypergeometric(self, ngood, nbad, nsample, size=None): # <<<<<<<<<<<<<< @@ -35393,29 +35566,29 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_ngood)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_ngood)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nbad)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_nbad)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("hypergeometric", 0, 3, 4, 1); __PYX_ERR(0, 4150, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("hypergeometric", 0, 3, 4, 1); __PYX_ERR(0, 4163, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: - if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nsample)) != 0)) kw_args--; + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_nsample)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("hypergeometric", 0, 3, 4, 2); __PYX_ERR(0, 4150, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("hypergeometric", 0, 3, 4, 2); __PYX_ERR(0, 4163, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "hypergeometric") < 0)) __PYX_ERR(0, 4150, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "hypergeometric") < 0)) __PYX_ERR(0, 4163, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -35435,7 +35608,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("hypergeometric", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 4150, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("hypergeometric", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 4163, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.hypergeometric", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -35470,14 +35643,14 @@ PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("hypergeometric", 0); - /* "mtrand.pyx":4241 + /* "mtrand.pyx":4254 * cdef long lngood, lnbad, lnsample * * ongood = PyArray_FROM_OTF(ngood, NPY_LONG, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * onbad = PyArray_FROM_OTF(nbad, NPY_LONG, NPY_ARRAY_ALIGNED) * onsample = PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED) */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_ngood, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4241, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_ngood, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -35485,14 +35658,14 @@ __pyx_v_ongood = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":4242 + /* "mtrand.pyx":4255 * * ongood = PyArray_FROM_OTF(ngood, NPY_LONG, NPY_ARRAY_ALIGNED) * onbad = PyArray_FROM_OTF(nbad, NPY_LONG, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * onsample = PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED) * */ - __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_nbad, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4242, __pyx_L1_error) + __pyx_t_2 = PyArray_FROM_OTF(__pyx_v_nbad, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); @@ -35500,14 +35673,14 @@ __pyx_v_onbad = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; - /* "mtrand.pyx":4243 + /* "mtrand.pyx":4256 * ongood = PyArray_FROM_OTF(ngood, NPY_LONG, NPY_ARRAY_ALIGNED) * onbad = PyArray_FROM_OTF(nbad, NPY_LONG, NPY_ARRAY_ALIGNED) * onsample = PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if ongood.shape == onbad.shape == onsample.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_nsample, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4243, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_nsample, NPY_LONG, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -35515,66 +35688,66 @@ __pyx_v_onsample = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":4245 + /* "mtrand.pyx":4258 * onsample = PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED) * * if ongood.shape == onbad.shape == onsample.shape == (): # <<<<<<<<<<<<<< * lngood = PyInt_AsLong(ngood) * lnbad = PyInt_AsLong(nbad) */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_ongood), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4245, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_ongood), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_onbad), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4245, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_onbad), __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4245, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4258, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_onsample), __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4245, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_onsample), __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4245, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4258, __pyx_L1_error) if (__Pyx_PyObject_IsTrue(__pyx_t_3)) { __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4245, __pyx_L1_error) + __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4258, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 4245, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 4258, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_5) { - /* "mtrand.pyx":4246 + /* "mtrand.pyx":4259 * * if ongood.shape == onbad.shape == onsample.shape == (): * lngood = PyInt_AsLong(ngood) # <<<<<<<<<<<<<< * lnbad = PyInt_AsLong(nbad) * lnsample = PyInt_AsLong(nsample) */ - __pyx_t_6 = PyInt_AsLong(__pyx_v_ngood); if (unlikely(__pyx_t_6 == -1L && PyErr_Occurred())) __PYX_ERR(0, 4246, __pyx_L1_error) + __pyx_t_6 = PyInt_AsLong(__pyx_v_ngood); if (unlikely(__pyx_t_6 == ((long)-1L) && PyErr_Occurred())) __PYX_ERR(0, 4259, __pyx_L1_error) __pyx_v_lngood = __pyx_t_6; - /* "mtrand.pyx":4247 + /* "mtrand.pyx":4260 * if ongood.shape == onbad.shape == onsample.shape == (): * lngood = PyInt_AsLong(ngood) * lnbad = PyInt_AsLong(nbad) # <<<<<<<<<<<<<< * lnsample = PyInt_AsLong(nsample) * */ - __pyx_t_6 = PyInt_AsLong(__pyx_v_nbad); if (unlikely(__pyx_t_6 == -1L && PyErr_Occurred())) __PYX_ERR(0, 4247, __pyx_L1_error) + __pyx_t_6 = PyInt_AsLong(__pyx_v_nbad); if (unlikely(__pyx_t_6 == ((long)-1L) && PyErr_Occurred())) __PYX_ERR(0, 4260, __pyx_L1_error) __pyx_v_lnbad = __pyx_t_6; - /* "mtrand.pyx":4248 + /* "mtrand.pyx":4261 * lngood = PyInt_AsLong(ngood) * lnbad = PyInt_AsLong(nbad) * lnsample = PyInt_AsLong(nsample) # <<<<<<<<<<<<<< * * if lngood < 0: */ - __pyx_t_6 = PyInt_AsLong(__pyx_v_nsample); if (unlikely(__pyx_t_6 == -1L && PyErr_Occurred())) __PYX_ERR(0, 4248, __pyx_L1_error) + __pyx_t_6 = PyInt_AsLong(__pyx_v_nsample); if (unlikely(__pyx_t_6 == ((long)-1L) && PyErr_Occurred())) __PYX_ERR(0, 4261, __pyx_L1_error) __pyx_v_lnsample = __pyx_t_6; - /* "mtrand.pyx":4250 + /* "mtrand.pyx":4263 * lnsample = PyInt_AsLong(nsample) * * if lngood < 0: # <<<<<<<<<<<<<< @@ -35582,22 +35755,22 @@ * if lnbad < 0: */ __pyx_t_5 = ((__pyx_v_lngood < 0) != 0); - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":4251 + /* "mtrand.pyx":4264 * * if lngood < 0: * raise ValueError("ngood < 0") # <<<<<<<<<<<<<< * if lnbad < 0: * raise ValueError("nbad < 0") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__150, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4251, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__152, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 4251, __pyx_L1_error) + __PYX_ERR(0, 4264, __pyx_L1_error) - /* "mtrand.pyx":4250 + /* "mtrand.pyx":4263 * lnsample = PyInt_AsLong(nsample) * * if lngood < 0: # <<<<<<<<<<<<<< @@ -35606,7 +35779,7 @@ */ } - /* "mtrand.pyx":4252 + /* "mtrand.pyx":4265 * if lngood < 0: * raise ValueError("ngood < 0") * if lnbad < 0: # <<<<<<<<<<<<<< @@ -35614,22 +35787,22 @@ * if lnsample < 1: */ __pyx_t_5 = ((__pyx_v_lnbad < 0) != 0); - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":4253 + /* "mtrand.pyx":4266 * raise ValueError("ngood < 0") * if lnbad < 0: * raise ValueError("nbad < 0") # <<<<<<<<<<<<<< * if lnsample < 1: * raise ValueError("nsample < 1") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__151, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4253, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__153, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4266, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 4253, __pyx_L1_error) + __PYX_ERR(0, 4266, __pyx_L1_error) - /* "mtrand.pyx":4252 + /* "mtrand.pyx":4265 * if lngood < 0: * raise ValueError("ngood < 0") * if lnbad < 0: # <<<<<<<<<<<<<< @@ -35638,7 +35811,7 @@ */ } - /* "mtrand.pyx":4254 + /* "mtrand.pyx":4267 * if lnbad < 0: * raise ValueError("nbad < 0") * if lnsample < 1: # <<<<<<<<<<<<<< @@ -35646,22 +35819,22 @@ * if lngood + lnbad < lnsample: */ __pyx_t_5 = ((__pyx_v_lnsample < 1) != 0); - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":4255 + /* "mtrand.pyx":4268 * raise ValueError("nbad < 0") * if lnsample < 1: * raise ValueError("nsample < 1") # <<<<<<<<<<<<<< * if lngood + lnbad < lnsample: * raise ValueError("ngood + nbad < nsample") */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__152, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4255, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__154, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4268, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 4255, __pyx_L1_error) + __PYX_ERR(0, 4268, __pyx_L1_error) - /* "mtrand.pyx":4254 + /* "mtrand.pyx":4267 * if lnbad < 0: * raise ValueError("nbad < 0") * if lnsample < 1: # <<<<<<<<<<<<<< @@ -35670,7 +35843,7 @@ */ } - /* "mtrand.pyx":4256 + /* "mtrand.pyx":4269 * if lnsample < 1: * raise ValueError("nsample < 1") * if lngood + lnbad < lnsample: # <<<<<<<<<<<<<< @@ -35678,22 +35851,22 @@ * return discnmN_array_sc(self.internal_state, rk_hypergeometric, */ __pyx_t_5 = (((__pyx_v_lngood + __pyx_v_lnbad) < __pyx_v_lnsample) != 0); - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":4257 + /* "mtrand.pyx":4270 * raise ValueError("nsample < 1") * if lngood + lnbad < lnsample: * raise ValueError("ngood + nbad < nsample") # <<<<<<<<<<<<<< * return discnmN_array_sc(self.internal_state, rk_hypergeometric, * size, lngood, lnbad, lnsample, self.lock) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__153, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4257, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__155, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4270, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 4257, __pyx_L1_error) + __PYX_ERR(0, 4270, __pyx_L1_error) - /* "mtrand.pyx":4256 + /* "mtrand.pyx":4269 * if lnsample < 1: * raise ValueError("nsample < 1") * if lngood + lnbad < lnsample: # <<<<<<<<<<<<<< @@ -35702,7 +35875,7 @@ */ } - /* "mtrand.pyx":4258 + /* "mtrand.pyx":4271 * if lngood + lnbad < lnsample: * raise ValueError("ngood + nbad < nsample") * return discnmN_array_sc(self.internal_state, rk_hypergeometric, # <<<<<<<<<<<<<< @@ -35711,7 +35884,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":4259 + /* "mtrand.pyx":4272 * raise ValueError("ngood + nbad < nsample") * return discnmN_array_sc(self.internal_state, rk_hypergeometric, * size, lngood, lnbad, lnsample, self.lock) # <<<<<<<<<<<<<< @@ -35721,21 +35894,21 @@ __pyx_t_3 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_3); - /* "mtrand.pyx":4258 + /* "mtrand.pyx":4271 * if lngood + lnbad < lnsample: * raise ValueError("ngood + nbad < nsample") * return discnmN_array_sc(self.internal_state, rk_hypergeometric, # <<<<<<<<<<<<<< * size, lngood, lnbad, lnsample, self.lock) * */ - __pyx_t_1 = __pyx_f_6mtrand_discnmN_array_sc(__pyx_v_self->internal_state, rk_hypergeometric, __pyx_v_size, __pyx_v_lngood, __pyx_v_lnbad, __pyx_v_lnsample, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4258, __pyx_L1_error) + __pyx_t_1 = __pyx_f_6mtrand_discnmN_array_sc(__pyx_v_self->internal_state, rk_hypergeometric, __pyx_v_size, __pyx_v_lngood, __pyx_v_lnbad, __pyx_v_lnsample, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4271, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; - /* "mtrand.pyx":4245 + /* "mtrand.pyx":4258 * onsample = PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED) * * if ongood.shape == onbad.shape == onsample.shape == (): # <<<<<<<<<<<<<< @@ -35744,21 +35917,21 @@ */ } - /* "mtrand.pyx":4261 + /* "mtrand.pyx":4274 * size, lngood, lnbad, lnsample, self.lock) * * if np.any(np.less(ongood, 0)): # <<<<<<<<<<<<<< * raise ValueError("ngood < 0") * if np.any(np.less(onbad, 0)): */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4261, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4261, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_any); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4261, __pyx_L1_error) + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4261, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; @@ -35776,7 +35949,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_4, ((PyObject *)__pyx_v_ongood), __pyx_int_0}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4261, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4274, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -35784,13 +35957,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_4, ((PyObject *)__pyx_v_ongood), __pyx_int_0}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4261, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4274, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4261, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_4); __pyx_t_4 = NULL; @@ -35801,7 +35974,7 @@ __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_int_0); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4261, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -35817,14 +35990,14 @@ } } if (!__pyx_t_7) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4261, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4274, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4261, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4274, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; @@ -35833,43 +36006,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_3}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4261, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4274, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4261, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_3); __pyx_t_3 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4261, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 4261, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 4274, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":4262 + /* "mtrand.pyx":4275 * * if np.any(np.less(ongood, 0)): * raise ValueError("ngood < 0") # <<<<<<<<<<<<<< * if np.any(np.less(onbad, 0)): * raise ValueError("nbad < 0") */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__154, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4262, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__156, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4275, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 4262, __pyx_L1_error) + __PYX_ERR(0, 4275, __pyx_L1_error) - /* "mtrand.pyx":4261 + /* "mtrand.pyx":4274 * size, lngood, lnbad, lnsample, self.lock) * * if np.any(np.less(ongood, 0)): # <<<<<<<<<<<<<< @@ -35878,21 +36051,21 @@ */ } - /* "mtrand.pyx":4263 + /* "mtrand.pyx":4276 * if np.any(np.less(ongood, 0)): * raise ValueError("ngood < 0") * if np.any(np.less(onbad, 0)): # <<<<<<<<<<<<<< * raise ValueError("nbad < 0") * if np.any(np.less(onsample, 1)): */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4263, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4263, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4263, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4263, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; @@ -35910,7 +36083,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_onbad), __pyx_int_0}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4263, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4276, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else @@ -35918,13 +36091,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_3, ((PyObject *)__pyx_v_onbad), __pyx_int_0}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4263, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4276, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif { - __pyx_t_4 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4263, __pyx_L1_error) + __pyx_t_4 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (__pyx_t_3) { __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = NULL; @@ -35935,7 +36108,7 @@ __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_4, 1+__pyx_t_8, __pyx_int_0); - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4263, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } @@ -35951,14 +36124,14 @@ } } if (!__pyx_t_7) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4263, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4276, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4263, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4276, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -35967,43 +36140,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4263, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4276, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4263, __pyx_L1_error) + __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_2); __pyx_t_2 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4263, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 4263, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 4276, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":4264 + /* "mtrand.pyx":4277 * raise ValueError("ngood < 0") * if np.any(np.less(onbad, 0)): * raise ValueError("nbad < 0") # <<<<<<<<<<<<<< * if np.any(np.less(onsample, 1)): * raise ValueError("nsample < 1") */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__155, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4264, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__157, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4277, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 4264, __pyx_L1_error) + __PYX_ERR(0, 4277, __pyx_L1_error) - /* "mtrand.pyx":4263 + /* "mtrand.pyx":4276 * if np.any(np.less(ongood, 0)): * raise ValueError("ngood < 0") * if np.any(np.less(onbad, 0)): # <<<<<<<<<<<<<< @@ -36012,21 +36185,21 @@ */ } - /* "mtrand.pyx":4265 + /* "mtrand.pyx":4278 * if np.any(np.less(onbad, 0)): * raise ValueError("nbad < 0") * if np.any(np.less(onsample, 1)): # <<<<<<<<<<<<<< * raise ValueError("nsample < 1") * if np.any(np.less(np.add(ongood, onbad),onsample)): */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4265, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_any); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4265, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_any); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4265, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4265, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -36044,7 +36217,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_2, ((PyObject *)__pyx_v_onsample), __pyx_int_1}; - __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4265, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4278, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_9); } else @@ -36052,13 +36225,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_2, ((PyObject *)__pyx_v_onsample), __pyx_int_1}; - __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4265, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4278, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_9); } else #endif { - __pyx_t_3 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4265, __pyx_L1_error) + __pyx_t_3 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __pyx_t_2 = NULL; @@ -36069,7 +36242,7 @@ __Pyx_INCREF(__pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_t_3, 1+__pyx_t_8, __pyx_int_1); - __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_3, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4265, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_3, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } @@ -36085,14 +36258,14 @@ } } if (!__pyx_t_7) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4265, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4278, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_9}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4265, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4278, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -36101,43 +36274,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_9}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4265, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4278, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else #endif { - __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4265, __pyx_L1_error) + __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_t_9); __pyx_t_9 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4265, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 4265, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 4278, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":4266 + /* "mtrand.pyx":4279 * raise ValueError("nbad < 0") * if np.any(np.less(onsample, 1)): * raise ValueError("nsample < 1") # <<<<<<<<<<<<<< * if np.any(np.less(np.add(ongood, onbad),onsample)): * raise ValueError("ngood + nbad < nsample") */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__156, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4266, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__158, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4279, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 4266, __pyx_L1_error) + __PYX_ERR(0, 4279, __pyx_L1_error) - /* "mtrand.pyx":4265 + /* "mtrand.pyx":4278 * if np.any(np.less(onbad, 0)): * raise ValueError("nbad < 0") * if np.any(np.less(onsample, 1)): # <<<<<<<<<<<<<< @@ -36146,26 +36319,26 @@ */ } - /* "mtrand.pyx":4267 + /* "mtrand.pyx":4280 * if np.any(np.less(onsample, 1)): * raise ValueError("nsample < 1") * if np.any(np.less(np.add(ongood, onbad),onsample)): # <<<<<<<<<<<<<< * raise ValueError("ngood + nbad < nsample") * return discnmN_array(self.internal_state, rk_hypergeometric, size, */ - __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_any); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_any); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_less); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_add); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_add); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -36183,7 +36356,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[3] = {__pyx_t_2, ((PyObject *)__pyx_v_ongood), ((PyObject *)__pyx_v_onbad)}; - __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_10, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_10, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_9); } else @@ -36191,13 +36364,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[3] = {__pyx_t_2, ((PyObject *)__pyx_v_ongood), ((PyObject *)__pyx_v_onbad)}; - __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_10, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_10, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_9); } else #endif { - __pyx_t_11 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_11 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_2); __pyx_t_2 = NULL; @@ -36208,7 +36381,7 @@ __Pyx_INCREF(((PyObject *)__pyx_v_onbad)); __Pyx_GIVEREF(((PyObject *)__pyx_v_onbad)); PyTuple_SET_ITEM(__pyx_t_11, 1+__pyx_t_8, ((PyObject *)__pyx_v_onbad)); - __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_10, __pyx_t_11, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_10, __pyx_t_11, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } @@ -36228,7 +36401,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_10, __pyx_t_9, ((PyObject *)__pyx_v_onsample)}; - __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -36237,14 +36410,14 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_10, __pyx_t_9, ((PyObject *)__pyx_v_onsample)}; - __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else #endif { - __pyx_t_11 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_11 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); if (__pyx_t_10) { __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_10); __pyx_t_10 = NULL; @@ -36255,7 +36428,7 @@ __Pyx_GIVEREF(((PyObject *)__pyx_v_onsample)); PyTuple_SET_ITEM(__pyx_t_11, 1+__pyx_t_8, ((PyObject *)__pyx_v_onsample)); __pyx_t_9 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_11, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_11, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } @@ -36271,14 +36444,14 @@ } } if (!__pyx_t_7) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_4}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; @@ -36287,43 +36460,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_4}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { - __pyx_t_11 = PyTuple_New(1+1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_11 = PyTuple_New(1+1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 0+1, __pyx_t_4); __pyx_t_4 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_11, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_11, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 4267, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 4280, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { + if (unlikely(__pyx_t_5)) { - /* "mtrand.pyx":4268 + /* "mtrand.pyx":4281 * raise ValueError("nsample < 1") * if np.any(np.less(np.add(ongood, onbad),onsample)): * raise ValueError("ngood + nbad < nsample") # <<<<<<<<<<<<<< * return discnmN_array(self.internal_state, rk_hypergeometric, size, * ongood, onbad, onsample, self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__157, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4268, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__159, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 4268, __pyx_L1_error) + __PYX_ERR(0, 4281, __pyx_L1_error) - /* "mtrand.pyx":4267 + /* "mtrand.pyx":4280 * if np.any(np.less(onsample, 1)): * raise ValueError("nsample < 1") * if np.any(np.less(np.add(ongood, onbad),onsample)): # <<<<<<<<<<<<<< @@ -36332,7 +36505,7 @@ */ } - /* "mtrand.pyx":4269 + /* "mtrand.pyx":4282 * if np.any(np.less(np.add(ongood, onbad),onsample)): * raise ValueError("ngood + nbad < nsample") * return discnmN_array(self.internal_state, rk_hypergeometric, size, # <<<<<<<<<<<<<< @@ -36341,7 +36514,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":4270 + /* "mtrand.pyx":4283 * raise ValueError("ngood + nbad < nsample") * return discnmN_array(self.internal_state, rk_hypergeometric, size, * ongood, onbad, onsample, self.lock) # <<<<<<<<<<<<<< @@ -36351,21 +36524,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":4269 + /* "mtrand.pyx":4282 * if np.any(np.less(np.add(ongood, onbad),onsample)): * raise ValueError("ngood + nbad < nsample") * return discnmN_array(self.internal_state, rk_hypergeometric, size, # <<<<<<<<<<<<<< * ongood, onbad, onsample, self.lock) * */ - __pyx_t_3 = __pyx_f_6mtrand_discnmN_array(__pyx_v_self->internal_state, rk_hypergeometric, __pyx_v_size, __pyx_v_ongood, __pyx_v_onbad, __pyx_v_onsample, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4269, __pyx_L1_error) + __pyx_t_3 = __pyx_f_6mtrand_discnmN_array(__pyx_v_self->internal_state, rk_hypergeometric, __pyx_v_size, __pyx_v_ongood, __pyx_v_onbad, __pyx_v_onsample, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; - /* "mtrand.pyx":4150 + /* "mtrand.pyx":4163 * self.lock) * * def hypergeometric(self, ngood, nbad, nsample, size=None): # <<<<<<<<<<<<<< @@ -36394,7 +36567,7 @@ return __pyx_r; } -/* "mtrand.pyx":4272 +/* "mtrand.pyx":4285 * ongood, onbad, onsample, self.lock) * * def logseries(self, p, size=None): # <<<<<<<<<<<<<< @@ -36429,17 +36602,17 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_p)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_p)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "logseries") < 0)) __PYX_ERR(0, 4272, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "logseries") < 0)) __PYX_ERR(0, 4285, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -36455,7 +36628,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("logseries", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 4272, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("logseries", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 4285, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.logseries", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -36484,14 +36657,14 @@ PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("logseries", 0); - /* "mtrand.pyx":4349 + /* "mtrand.pyx":4362 * cdef double fp * * op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) # <<<<<<<<<<<<<< * * if op.shape == (): */ - __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4349, __pyx_L1_error) + __pyx_t_1 = PyArray_FROM_OTF(__pyx_v_p, NPY_DOUBLE, NPY_ARRAY_ALIGNED); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4362, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); @@ -36499,32 +36672,32 @@ __pyx_v_op = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":4351 + /* "mtrand.pyx":4364 * op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if op.shape == (): # <<<<<<<<<<<<<< * fp = PyFloat_AsDouble(p) * */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_op), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4351, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_op), __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4364, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4351, __pyx_L1_error) + __pyx_t_1 = PyObject_RichCompare(__pyx_t_2, __pyx_empty_tuple, Py_EQ); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4364, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4351, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4364, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_3) { - /* "mtrand.pyx":4352 + /* "mtrand.pyx":4365 * * if op.shape == (): * fp = PyFloat_AsDouble(p) # <<<<<<<<<<<<<< * * if fp <= 0.0: */ - __pyx_t_4 = PyFloat_AsDouble(__pyx_v_p); if (unlikely(__pyx_t_4 == (-1.0) && PyErr_Occurred())) __PYX_ERR(0, 4352, __pyx_L1_error) + __pyx_t_4 = PyFloat_AsDouble(__pyx_v_p); if (unlikely(__pyx_t_4 == ((double)(-1.0)) && PyErr_Occurred())) __PYX_ERR(0, 4365, __pyx_L1_error) __pyx_v_fp = __pyx_t_4; - /* "mtrand.pyx":4354 + /* "mtrand.pyx":4367 * fp = PyFloat_AsDouble(p) * * if fp <= 0.0: # <<<<<<<<<<<<<< @@ -36532,22 +36705,22 @@ * if fp >= 1.0: */ __pyx_t_3 = ((__pyx_v_fp <= 0.0) != 0); - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":4355 + /* "mtrand.pyx":4368 * * if fp <= 0.0: * raise ValueError("p <= 0.0") # <<<<<<<<<<<<<< * if fp >= 1.0: * raise ValueError("p >= 1.0") */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__158, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4355, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__160, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4368, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 4355, __pyx_L1_error) + __PYX_ERR(0, 4368, __pyx_L1_error) - /* "mtrand.pyx":4354 + /* "mtrand.pyx":4367 * fp = PyFloat_AsDouble(p) * * if fp <= 0.0: # <<<<<<<<<<<<<< @@ -36556,7 +36729,7 @@ */ } - /* "mtrand.pyx":4356 + /* "mtrand.pyx":4369 * if fp <= 0.0: * raise ValueError("p <= 0.0") * if fp >= 1.0: # <<<<<<<<<<<<<< @@ -36564,22 +36737,22 @@ * return discd_array_sc(self.internal_state, rk_logseries, size, fp, */ __pyx_t_3 = ((__pyx_v_fp >= 1.0) != 0); - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":4357 + /* "mtrand.pyx":4370 * raise ValueError("p <= 0.0") * if fp >= 1.0: * raise ValueError("p >= 1.0") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_logseries, size, fp, * self.lock) */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__159, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4357, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__161, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4370, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 4357, __pyx_L1_error) + __PYX_ERR(0, 4370, __pyx_L1_error) - /* "mtrand.pyx":4356 + /* "mtrand.pyx":4369 * if fp <= 0.0: * raise ValueError("p <= 0.0") * if fp >= 1.0: # <<<<<<<<<<<<<< @@ -36588,7 +36761,7 @@ */ } - /* "mtrand.pyx":4358 + /* "mtrand.pyx":4371 * if fp >= 1.0: * raise ValueError("p >= 1.0") * return discd_array_sc(self.internal_state, rk_logseries, size, fp, # <<<<<<<<<<<<<< @@ -36597,7 +36770,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":4359 + /* "mtrand.pyx":4372 * raise ValueError("p >= 1.0") * return discd_array_sc(self.internal_state, rk_logseries, size, fp, * self.lock) # <<<<<<<<<<<<<< @@ -36607,21 +36780,21 @@ __pyx_t_1 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_1); - /* "mtrand.pyx":4358 + /* "mtrand.pyx":4371 * if fp >= 1.0: * raise ValueError("p >= 1.0") * return discd_array_sc(self.internal_state, rk_logseries, size, fp, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_2 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_logseries, __pyx_v_size, __pyx_v_fp, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4358, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_discd_array_sc(__pyx_v_self->internal_state, rk_logseries, __pyx_v_size, __pyx_v_fp, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4371, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; - /* "mtrand.pyx":4351 + /* "mtrand.pyx":4364 * op = PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED) * * if op.shape == (): # <<<<<<<<<<<<<< @@ -36630,21 +36803,21 @@ */ } - /* "mtrand.pyx":4361 + /* "mtrand.pyx":4374 * self.lock) * * if np.any(np.less_equal(op, 0.0)): # <<<<<<<<<<<<<< * raise ValueError("p <= 0.0") * if np.any(np.greater_equal(op, 1.0)): */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4361, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4374, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4361, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_any); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4374, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4361, __pyx_L1_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4374, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4361, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4374, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; @@ -36662,7 +36835,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_op), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4361, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4374, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else @@ -36670,13 +36843,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_6, ((PyObject *)__pyx_v_op), __pyx_float_0_0}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4361, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4374, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else #endif { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4361, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4374, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -36687,7 +36860,7 @@ __Pyx_INCREF(__pyx_float_0_0); __Pyx_GIVEREF(__pyx_float_0_0); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_float_0_0); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4361, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4374, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } @@ -36703,14 +36876,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4361, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4374, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4361, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4374, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -36719,43 +36892,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4361, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4374, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { - __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4361, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4374, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4361, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4374, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4361, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4374, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":4362 + /* "mtrand.pyx":4375 * * if np.any(np.less_equal(op, 0.0)): * raise ValueError("p <= 0.0") # <<<<<<<<<<<<<< * if np.any(np.greater_equal(op, 1.0)): * raise ValueError("p >= 1.0") */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__160, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4362, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__162, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4375, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 4362, __pyx_L1_error) + __PYX_ERR(0, 4375, __pyx_L1_error) - /* "mtrand.pyx":4361 + /* "mtrand.pyx":4374 * self.lock) * * if np.any(np.less_equal(op, 0.0)): # <<<<<<<<<<<<<< @@ -36764,21 +36937,21 @@ */ } - /* "mtrand.pyx":4363 + /* "mtrand.pyx":4376 * if np.any(np.less_equal(op, 0.0)): * raise ValueError("p <= 0.0") * if np.any(np.greater_equal(op, 1.0)): # <<<<<<<<<<<<<< * raise ValueError("p >= 1.0") * return discd_array(self.internal_state, rk_logseries, size, op, */ - __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4363, __pyx_L1_error) + __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4376, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4363, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_any); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4376, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4363, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4376, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_greater_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4363, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_greater_equal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4376, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; @@ -36796,7 +36969,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_1, ((PyObject *)__pyx_v_op), __pyx_float_1_0}; - __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4363, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4376, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_5); } else @@ -36804,13 +36977,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[3] = {__pyx_t_1, ((PyObject *)__pyx_v_op), __pyx_float_1_0}; - __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4363, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4376, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_5); } else #endif { - __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4363, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4376, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_1) { __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); __pyx_t_1 = NULL; @@ -36821,7 +36994,7 @@ __Pyx_INCREF(__pyx_float_1_0); __Pyx_GIVEREF(__pyx_float_1_0); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_8, __pyx_float_1_0); - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4363, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4376, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } @@ -36837,14 +37010,14 @@ } } if (!__pyx_t_7) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4363, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4376, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_5}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4363, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4376, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; @@ -36853,43 +37026,43 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_5}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4363, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4376, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else #endif { - __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4363, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4376, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_5); __pyx_t_5 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4363, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4376, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4363, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 4376, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_3) { + if (unlikely(__pyx_t_3)) { - /* "mtrand.pyx":4364 + /* "mtrand.pyx":4377 * raise ValueError("p <= 0.0") * if np.any(np.greater_equal(op, 1.0)): * raise ValueError("p >= 1.0") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_logseries, size, op, * self.lock) */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__161, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4364, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__163, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4377, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 4364, __pyx_L1_error) + __PYX_ERR(0, 4377, __pyx_L1_error) - /* "mtrand.pyx":4363 + /* "mtrand.pyx":4376 * if np.any(np.less_equal(op, 0.0)): * raise ValueError("p <= 0.0") * if np.any(np.greater_equal(op, 1.0)): # <<<<<<<<<<<<<< @@ -36898,7 +37071,7 @@ */ } - /* "mtrand.pyx":4365 + /* "mtrand.pyx":4378 * if np.any(np.greater_equal(op, 1.0)): * raise ValueError("p >= 1.0") * return discd_array(self.internal_state, rk_logseries, size, op, # <<<<<<<<<<<<<< @@ -36907,7 +37080,7 @@ */ __Pyx_XDECREF(__pyx_r); - /* "mtrand.pyx":4366 + /* "mtrand.pyx":4379 * raise ValueError("p >= 1.0") * return discd_array(self.internal_state, rk_logseries, size, op, * self.lock) # <<<<<<<<<<<<<< @@ -36917,21 +37090,21 @@ __pyx_t_2 = __pyx_v_self->lock; __Pyx_INCREF(__pyx_t_2); - /* "mtrand.pyx":4365 + /* "mtrand.pyx":4378 * if np.any(np.greater_equal(op, 1.0)): * raise ValueError("p >= 1.0") * return discd_array(self.internal_state, rk_logseries, size, op, # <<<<<<<<<<<<<< * self.lock) * */ - __pyx_t_9 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_logseries, __pyx_v_size, __pyx_v_op, __pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4365, __pyx_L1_error) + __pyx_t_9 = __pyx_f_6mtrand_discd_array(__pyx_v_self->internal_state, rk_logseries, __pyx_v_size, __pyx_v_op, __pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4378, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; - /* "mtrand.pyx":4272 + /* "mtrand.pyx":4285 * ongood, onbad, onsample, self.lock) * * def logseries(self, p, size=None): # <<<<<<<<<<<<<< @@ -36956,7 +37129,7 @@ return __pyx_r; } -/* "mtrand.pyx":4369 +/* "mtrand.pyx":4382 * * # Multivariate distributions: * def multivariate_normal(self, mean, cov, size=None, check_valid='warn', # <<<<<<<<<<<<<< @@ -37002,35 +37175,35 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mean)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mean)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_cov)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_cov)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("multivariate_normal", 0, 2, 5, 1); __PYX_ERR(0, 4369, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("multivariate_normal", 0, 2, 5, 1); __PYX_ERR(0, 4382, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_check_valid); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_check_valid); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_tol); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_tol); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "multivariate_normal") < 0)) __PYX_ERR(0, 4369, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "multivariate_normal") < 0)) __PYX_ERR(0, 4382, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -37054,7 +37227,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("multivariate_normal", 0, 2, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 4369, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("multivariate_normal", 0, 2, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 4382, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.multivariate_normal", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -37095,38 +37268,38 @@ __Pyx_INCREF(__pyx_v_mean); __Pyx_INCREF(__pyx_v_cov); - /* "mtrand.pyx":4470 + /* "mtrand.pyx":4483 * * """ * from numpy.dual import svd # <<<<<<<<<<<<<< * * # Check preconditions on arguments */ - __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4470, __pyx_L1_error) + __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_svd); __Pyx_GIVEREF(__pyx_n_s_svd); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_svd); - __pyx_t_2 = __Pyx_Import(__pyx_n_s_numpy_dual, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4470, __pyx_L1_error) + __pyx_t_2 = __Pyx_Import(__pyx_n_s_numpy_dual, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_svd); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4470, __pyx_L1_error) + __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_svd); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_t_1); __pyx_v_svd = __pyx_t_1; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":4473 + /* "mtrand.pyx":4486 * * # Check preconditions on arguments * mean = np.array(mean) # <<<<<<<<<<<<<< * cov = np.array(cov) * if size is None: */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4473, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4486, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4473, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4486, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; @@ -37140,13 +37313,13 @@ } } if (!__pyx_t_1) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_mean); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4473, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_mean); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4486, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_v_mean}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4473, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4486, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else @@ -37154,19 +37327,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_v_mean}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4473, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4486, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif { - __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4473, __pyx_L1_error) + __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4486, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_v_mean); __Pyx_GIVEREF(__pyx_v_mean); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_v_mean); - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4473, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4486, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } @@ -37175,16 +37348,16 @@ __Pyx_DECREF_SET(__pyx_v_mean, __pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":4474 + /* "mtrand.pyx":4487 * # Check preconditions on arguments * mean = np.array(mean) * cov = np.array(cov) # <<<<<<<<<<<<<< * if size is None: * shape = [] */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4474, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_array); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4474, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_array); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = NULL; @@ -37198,13 +37371,13 @@ } } if (!__pyx_t_3) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_cov); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4474, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_cov); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_v_cov}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4474, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4487, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else @@ -37212,19 +37385,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_v_cov}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4474, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4487, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif { - __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4474, __pyx_L1_error) + __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_v_cov); __Pyx_GIVEREF(__pyx_v_cov); PyTuple_SET_ITEM(__pyx_t_1, 0+1, __pyx_v_cov); - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4474, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } @@ -37233,7 +37406,7 @@ __Pyx_DECREF_SET(__pyx_v_cov, __pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":4475 + /* "mtrand.pyx":4488 * mean = np.array(mean) * cov = np.array(cov) * if size is None: # <<<<<<<<<<<<<< @@ -37244,19 +37417,19 @@ __pyx_t_6 = (__pyx_t_5 != 0); if (__pyx_t_6) { - /* "mtrand.pyx":4476 + /* "mtrand.pyx":4489 * cov = np.array(cov) * if size is None: * shape = [] # <<<<<<<<<<<<<< * elif isinstance(size, (int, long, np.integer)): * shape = [size] */ - __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4476, __pyx_L1_error) + __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4489, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_shape = __pyx_t_2; __pyx_t_2 = 0; - /* "mtrand.pyx":4475 + /* "mtrand.pyx":4488 * mean = np.array(mean) * cov = np.array(cov) * if size is None: # <<<<<<<<<<<<<< @@ -37266,16 +37439,16 @@ goto __pyx_L3; } - /* "mtrand.pyx":4477 + /* "mtrand.pyx":4490 * if size is None: * shape = [] * elif isinstance(size, (int, long, np.integer)): # <<<<<<<<<<<<<< * shape = [size] * else: */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4477, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4490, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_integer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4477, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_integer); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4490, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = PyInt_Check(__pyx_v_size); @@ -37300,14 +37473,14 @@ __pyx_t_7 = (__pyx_t_6 != 0); if (__pyx_t_7) { - /* "mtrand.pyx":4478 + /* "mtrand.pyx":4491 * shape = [] * elif isinstance(size, (int, long, np.integer)): * shape = [size] # <<<<<<<<<<<<<< * else: * shape = size */ - __pyx_t_4 = PyList_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4478, __pyx_L1_error) + __pyx_t_4 = PyList_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_INCREF(__pyx_v_size); __Pyx_GIVEREF(__pyx_v_size); @@ -37315,7 +37488,7 @@ __pyx_v_shape = __pyx_t_4; __pyx_t_4 = 0; - /* "mtrand.pyx":4477 + /* "mtrand.pyx":4490 * if size is None: * shape = [] * elif isinstance(size, (int, long, np.integer)): # <<<<<<<<<<<<<< @@ -37325,7 +37498,7 @@ goto __pyx_L3; } - /* "mtrand.pyx":4480 + /* "mtrand.pyx":4493 * shape = [size] * else: * shape = size # <<<<<<<<<<<<<< @@ -37338,34 +37511,34 @@ } __pyx_L3:; - /* "mtrand.pyx":4482 + /* "mtrand.pyx":4495 * shape = size * * if len(mean.shape) != 1: # <<<<<<<<<<<<<< * raise ValueError("mean must be 1 dimensional") * if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]): */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_mean, __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4482, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_mean, __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4495, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_8 = PyObject_Length(__pyx_t_4); if (unlikely(__pyx_t_8 == -1)) __PYX_ERR(0, 4482, __pyx_L1_error) + __pyx_t_8 = PyObject_Length(__pyx_t_4); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 4495, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_7 = ((__pyx_t_8 != 1) != 0); - if (__pyx_t_7) { + if (unlikely(__pyx_t_7)) { - /* "mtrand.pyx":4483 + /* "mtrand.pyx":4496 * * if len(mean.shape) != 1: * raise ValueError("mean must be 1 dimensional") # <<<<<<<<<<<<<< * if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]): * raise ValueError("cov must be 2 dimensional and square") */ - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__162, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4483, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__164, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4496, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(0, 4483, __pyx_L1_error) + __PYX_ERR(0, 4496, __pyx_L1_error) - /* "mtrand.pyx":4482 + /* "mtrand.pyx":4495 * shape = size * * if len(mean.shape) != 1: # <<<<<<<<<<<<<< @@ -37374,16 +37547,16 @@ */ } - /* "mtrand.pyx":4484 + /* "mtrand.pyx":4497 * if len(mean.shape) != 1: * raise ValueError("mean must be 1 dimensional") * if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]): # <<<<<<<<<<<<<< * raise ValueError("cov must be 2 dimensional and square") * if mean.shape[0] != cov.shape[0]: */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_cov, __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4484, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_cov, __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4497, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_8 = PyObject_Length(__pyx_t_4); if (unlikely(__pyx_t_8 == -1)) __PYX_ERR(0, 4484, __pyx_L1_error) + __pyx_t_8 = PyObject_Length(__pyx_t_4); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 4497, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = ((__pyx_t_8 != 2) != 0); if (!__pyx_t_6) { @@ -37391,39 +37564,39 @@ __pyx_t_7 = __pyx_t_6; goto __pyx_L9_bool_binop_done; } - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_cov, __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4484, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_cov, __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4497, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_4, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4484, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_4, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4497, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_cov, __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4484, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_cov, __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4497, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_4, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4484, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_4, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4497, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4484, __pyx_L1_error) + __pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_t_1, Py_NE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4497, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 4484, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 4497, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_7 = __pyx_t_6; __pyx_L9_bool_binop_done:; - if (__pyx_t_7) { + if (unlikely(__pyx_t_7)) { - /* "mtrand.pyx":4485 + /* "mtrand.pyx":4498 * raise ValueError("mean must be 1 dimensional") * if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]): * raise ValueError("cov must be 2 dimensional and square") # <<<<<<<<<<<<<< * if mean.shape[0] != cov.shape[0]: * raise ValueError("mean and cov must have same length") */ - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__163, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4485, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__165, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4498, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(0, 4485, __pyx_L1_error) + __PYX_ERR(0, 4498, __pyx_L1_error) - /* "mtrand.pyx":4484 + /* "mtrand.pyx":4497 * if len(mean.shape) != 1: * raise ValueError("mean must be 1 dimensional") * if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]): # <<<<<<<<<<<<<< @@ -37432,44 +37605,44 @@ */ } - /* "mtrand.pyx":4486 + /* "mtrand.pyx":4499 * if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]): * raise ValueError("cov must be 2 dimensional and square") * if mean.shape[0] != cov.shape[0]: # <<<<<<<<<<<<<< * raise ValueError("mean and cov must have same length") * */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_mean, __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4486, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_mean, __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_4, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4486, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_4, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_cov, __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4486, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_cov, __pyx_n_s_shape); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_4, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4486, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_4, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_NE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4486, __pyx_L1_error) + __pyx_t_4 = PyObject_RichCompare(__pyx_t_1, __pyx_t_2, Py_NE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4499, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 4486, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 4499, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (__pyx_t_7) { + if (unlikely(__pyx_t_7)) { - /* "mtrand.pyx":4487 + /* "mtrand.pyx":4500 * raise ValueError("cov must be 2 dimensional and square") * if mean.shape[0] != cov.shape[0]: * raise ValueError("mean and cov must have same length") # <<<<<<<<<<<<<< * * # Compute shape of output and create a matrix of independent */ - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__164, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4487, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__166, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4500, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(0, 4487, __pyx_L1_error) + __PYX_ERR(0, 4500, __pyx_L1_error) - /* "mtrand.pyx":4486 + /* "mtrand.pyx":4499 * if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]): * raise ValueError("cov must be 2 dimensional and square") * if mean.shape[0] != cov.shape[0]: # <<<<<<<<<<<<<< @@ -37478,44 +37651,44 @@ */ } - /* "mtrand.pyx":4493 + /* "mtrand.pyx":4506 * # with the same length as mean and as many rows are necessary to * # form a matrix of shape final_shape. * final_shape = list(shape[:]) # <<<<<<<<<<<<<< * final_shape.append(mean.shape[0]) * x = self.standard_normal(final_shape).reshape(-1, mean.shape[0]) */ - __pyx_t_4 = __Pyx_PyObject_GetSlice(__pyx_v_shape, 0, 0, NULL, NULL, &__pyx_slice__165, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4493, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetSlice(__pyx_v_shape, 0, 0, NULL, NULL, &__pyx_slice__167, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = PySequence_List(__pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4493, __pyx_L1_error) + __pyx_t_2 = PySequence_List(__pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_final_shape = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":4494 + /* "mtrand.pyx":4507 * # form a matrix of shape final_shape. * final_shape = list(shape[:]) * final_shape.append(mean.shape[0]) # <<<<<<<<<<<<<< * x = self.standard_normal(final_shape).reshape(-1, mean.shape[0]) * */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_mean, __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4494, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_mean, __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4507, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_2, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4494, __pyx_L1_error) + __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_2, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4507, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_final_shape, __pyx_t_4); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(0, 4494, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_final_shape, __pyx_t_4); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(0, 4507, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - /* "mtrand.pyx":4495 + /* "mtrand.pyx":4508 * final_shape = list(shape[:]) * final_shape.append(mean.shape[0]) * x = self.standard_normal(final_shape).reshape(-1, mean.shape[0]) # <<<<<<<<<<<<<< * * # Transform matrix of standard normals into matrix where each row */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_standard_normal); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4495, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_standard_normal); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { @@ -37528,13 +37701,13 @@ } } if (!__pyx_t_3) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_final_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4495, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_final_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_v_final_shape}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4495, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4508, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else @@ -37542,30 +37715,30 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_v_final_shape}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4495, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4508, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif { - __pyx_t_10 = PyTuple_New(1+1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4495, __pyx_L1_error) + __pyx_t_10 = PyTuple_New(1+1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_v_final_shape); __Pyx_GIVEREF(__pyx_v_final_shape); PyTuple_SET_ITEM(__pyx_t_10, 0+1, __pyx_v_final_shape); - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_10, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4495, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_10, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_reshape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4495, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_reshape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_mean, __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4495, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_mean, __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_10 = __Pyx_GetItemInt(__pyx_t_2, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4495, __pyx_L1_error) + __pyx_t_10 = __Pyx_GetItemInt(__pyx_t_2, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -37583,7 +37756,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_int_neg_1, __pyx_t_10}; - __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_11, 2+__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4495, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_11, 2+__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4508, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; @@ -37592,14 +37765,14 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_int_neg_1, __pyx_t_10}; - __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_11, 2+__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4495, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_11, 2+__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4508, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } else #endif { - __pyx_t_3 = PyTuple_New(2+__pyx_t_11); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4495, __pyx_L1_error) + __pyx_t_3 = PyTuple_New(2+__pyx_t_11); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __pyx_t_2 = NULL; @@ -37610,7 +37783,7 @@ __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_3, 1+__pyx_t_11, __pyx_t_10); __pyx_t_10 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4495, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } @@ -37618,7 +37791,7 @@ __pyx_v_x = __pyx_t_4; __pyx_t_4 = 0; - /* "mtrand.pyx":4511 + /* "mtrand.pyx":4524 * # been checked. * * (u, s, v) = svd(cov) # <<<<<<<<<<<<<< @@ -37637,13 +37810,13 @@ } } if (!__pyx_t_3) { - __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_cov); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4511, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_cov); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4524, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_v_cov}; - __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4511, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4524, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_4); } else @@ -37651,19 +37824,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_v_cov}; - __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4511, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4524, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_4); } else #endif { - __pyx_t_10 = PyTuple_New(1+1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4511, __pyx_L1_error) + __pyx_t_10 = PyTuple_New(1+1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4524, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_v_cov); __Pyx_GIVEREF(__pyx_v_cov); PyTuple_SET_ITEM(__pyx_t_10, 0+1, __pyx_v_cov); - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_10, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4511, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_10, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4524, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } @@ -37671,15 +37844,11 @@ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_4))) || (PyList_CheckExact(__pyx_t_4))) { PyObject* sequence = __pyx_t_4; - #if !CYTHON_COMPILING_IN_PYPY - Py_ssize_t size = Py_SIZE(sequence); - #else - Py_ssize_t size = PySequence_Size(sequence); - #endif + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 3)) { if (size > 3) __Pyx_RaiseTooManyValuesError(3); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 4511, __pyx_L1_error) + __PYX_ERR(0, 4524, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { @@ -37695,17 +37864,17 @@ __Pyx_INCREF(__pyx_t_10); __Pyx_INCREF(__pyx_t_3); #else - __pyx_t_1 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4511, __pyx_L1_error) + __pyx_t_1 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4524, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_10 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4511, __pyx_L1_error) + __pyx_t_10 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4524, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); - __pyx_t_3 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4511, __pyx_L1_error) + __pyx_t_3 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4524, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else { Py_ssize_t index = -1; - __pyx_t_2 = PyObject_GetIter(__pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4511, __pyx_L1_error) + __pyx_t_2 = PyObject_GetIter(__pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4524, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_12 = Py_TYPE(__pyx_t_2)->tp_iternext; @@ -37715,7 +37884,7 @@ __Pyx_GOTREF(__pyx_t_10); index = 2; __pyx_t_3 = __pyx_t_12(__pyx_t_2); if (unlikely(!__pyx_t_3)) goto __pyx_L12_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_12(__pyx_t_2), 3) < 0) __PYX_ERR(0, 4511, __pyx_L1_error) + if (__Pyx_IternextUnpackEndCheck(__pyx_t_12(__pyx_t_2), 3) < 0) __PYX_ERR(0, 4524, __pyx_L1_error) __pyx_t_12 = NULL; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L13_unpacking_done; @@ -37723,7 +37892,7 @@ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_12 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 4511, __pyx_L1_error) + __PYX_ERR(0, 4524, __pyx_L1_error) __pyx_L13_unpacking_done:; } __pyx_v_u = __pyx_t_1; @@ -37733,48 +37902,48 @@ __pyx_v_v = __pyx_t_3; __pyx_t_3 = 0; - /* "mtrand.pyx":4513 + /* "mtrand.pyx":4526 * (u, s, v) = svd(cov) * * if check_valid != 'ignore': # <<<<<<<<<<<<<< * if check_valid != 'warn' and check_valid != 'raise': * raise ValueError("check_valid must equal 'warn', 'raise', or 'ignore'") */ - __pyx_t_7 = (__Pyx_PyString_Equals(__pyx_v_check_valid, __pyx_n_s_ignore, Py_NE)); if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 4513, __pyx_L1_error) + __pyx_t_7 = (__Pyx_PyString_Equals(__pyx_v_check_valid, __pyx_n_s_ignore, Py_NE)); if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 4526, __pyx_L1_error) if (__pyx_t_7) { - /* "mtrand.pyx":4514 + /* "mtrand.pyx":4527 * * if check_valid != 'ignore': * if check_valid != 'warn' and check_valid != 'raise': # <<<<<<<<<<<<<< * raise ValueError("check_valid must equal 'warn', 'raise', or 'ignore'") * */ - __pyx_t_6 = (__Pyx_PyString_Equals(__pyx_v_check_valid, __pyx_n_s_warn, Py_NE)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 4514, __pyx_L1_error) + __pyx_t_6 = (__Pyx_PyString_Equals(__pyx_v_check_valid, __pyx_n_s_warn, Py_NE)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 4527, __pyx_L1_error) if (__pyx_t_6) { } else { __pyx_t_7 = __pyx_t_6; goto __pyx_L16_bool_binop_done; } - __pyx_t_6 = (__Pyx_PyString_Equals(__pyx_v_check_valid, __pyx_n_s_raise, Py_NE)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 4514, __pyx_L1_error) + __pyx_t_6 = (__Pyx_PyString_Equals(__pyx_v_check_valid, __pyx_n_s_raise, Py_NE)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 4527, __pyx_L1_error) __pyx_t_7 = __pyx_t_6; __pyx_L16_bool_binop_done:; - if (__pyx_t_7) { + if (unlikely(__pyx_t_7)) { - /* "mtrand.pyx":4515 + /* "mtrand.pyx":4528 * if check_valid != 'ignore': * if check_valid != 'warn' and check_valid != 'raise': * raise ValueError("check_valid must equal 'warn', 'raise', or 'ignore'") # <<<<<<<<<<<<<< * * psd = np.allclose(np.dot(v.T * s, v), cov, rtol=tol, atol=tol) */ - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__166, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4515, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__168, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4528, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(0, 4515, __pyx_L1_error) + __PYX_ERR(0, 4528, __pyx_L1_error) - /* "mtrand.pyx":4514 + /* "mtrand.pyx":4527 * * if check_valid != 'ignore': * if check_valid != 'warn' and check_valid != 'raise': # <<<<<<<<<<<<<< @@ -37783,26 +37952,26 @@ */ } - /* "mtrand.pyx":4517 + /* "mtrand.pyx":4530 * raise ValueError("check_valid must equal 'warn', 'raise', or 'ignore'") * * psd = np.allclose(np.dot(v.T * s, v), cov, rtol=tol, atol=tol) # <<<<<<<<<<<<<< * if not psd: * if check_valid == 'warn': */ - __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4517, __pyx_L1_error) + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4530, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_allclose); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4517, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_allclose); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4530, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4517, __pyx_L1_error) + __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4530, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s_dot); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4517, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s_dot); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4530, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_v_v, __pyx_n_s_T); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4517, __pyx_L1_error) + __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_v_v, __pyx_n_s_T); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4530, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); - __pyx_t_2 = PyNumber_Multiply(__pyx_t_10, __pyx_v_s); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4517, __pyx_L1_error) + __pyx_t_2 = PyNumber_Multiply(__pyx_t_10, __pyx_v_s); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4530, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = NULL; @@ -37820,7 +37989,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_10, __pyx_t_2, __pyx_v_v}; - __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_11, 2+__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4517, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_11, 2+__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4530, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; @@ -37829,14 +37998,14 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[3] = {__pyx_t_10, __pyx_t_2, __pyx_v_v}; - __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_11, 2+__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4517, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_11, 2+__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4530, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { - __pyx_t_13 = PyTuple_New(2+__pyx_t_11); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4517, __pyx_L1_error) + __pyx_t_13 = PyTuple_New(2+__pyx_t_11); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4530, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); if (__pyx_t_10) { __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_13, 0, __pyx_t_10); __pyx_t_10 = NULL; @@ -37847,12 +38016,12 @@ __Pyx_GIVEREF(__pyx_v_v); PyTuple_SET_ITEM(__pyx_t_13, 1+__pyx_t_11, __pyx_v_v); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_13, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4517, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_13, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4530, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4517, __pyx_L1_error) + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4530, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_4); @@ -37860,11 +38029,11 @@ __Pyx_GIVEREF(__pyx_v_cov); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_cov); __pyx_t_4 = 0; - __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4517, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4530, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_rtol, __pyx_v_tol) < 0) __PYX_ERR(0, 4517, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_atol, __pyx_v_tol) < 0) __PYX_ERR(0, 4517, __pyx_L1_error) - __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4517, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_rtol, __pyx_v_tol) < 0) __PYX_ERR(0, 4530, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_atol, __pyx_v_tol) < 0) __PYX_ERR(0, 4530, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4530, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; @@ -37872,95 +38041,53 @@ __pyx_v_psd = __pyx_t_13; __pyx_t_13 = 0; - /* "mtrand.pyx":4518 + /* "mtrand.pyx":4531 * * psd = np.allclose(np.dot(v.T * s, v), cov, rtol=tol, atol=tol) * if not psd: # <<<<<<<<<<<<<< * if check_valid == 'warn': * warnings.warn("covariance is not positive-semidefinite.", */ - __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_v_psd); if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 4518, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_v_psd); if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 4531, __pyx_L1_error) __pyx_t_6 = ((!__pyx_t_7) != 0); if (__pyx_t_6) { - /* "mtrand.pyx":4519 + /* "mtrand.pyx":4532 * psd = np.allclose(np.dot(v.T * s, v), cov, rtol=tol, atol=tol) * if not psd: * if check_valid == 'warn': # <<<<<<<<<<<<<< * warnings.warn("covariance is not positive-semidefinite.", * RuntimeWarning) */ - __pyx_t_6 = (__Pyx_PyString_Equals(__pyx_v_check_valid, __pyx_n_s_warn, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 4519, __pyx_L1_error) - if (__pyx_t_6) { + __pyx_t_6 = (__Pyx_PyString_Equals(__pyx_v_check_valid, __pyx_n_s_warn, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 4532, __pyx_L1_error) + if (likely(__pyx_t_6)) { - /* "mtrand.pyx":4520 + /* "mtrand.pyx":4533 * if not psd: * if check_valid == 'warn': * warnings.warn("covariance is not positive-semidefinite.", # <<<<<<<<<<<<<< * RuntimeWarning) * else: */ - __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_warnings); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4520, __pyx_L1_error) + __pyx_t_13 = __Pyx_GetModuleGlobalName(__pyx_n_s_warnings); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4533, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_13); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_13, __pyx_n_s_warn); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4533, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_warn); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4520, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - /* "mtrand.pyx":4521 + /* "mtrand.pyx":4534 * if check_valid == 'warn': * warnings.warn("covariance is not positive-semidefinite.", * RuntimeWarning) # <<<<<<<<<<<<<< * else: * raise ValueError("covariance is not positive-semidefinite.") */ - __pyx_t_4 = NULL; - __pyx_t_11 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_11 = 1; - } - } - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_1)) { - PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_kp_s_covariance_is_not_positive_semid, __pyx_builtin_RuntimeWarning}; - __pyx_t_13 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_11, 2+__pyx_t_11); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4520, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_GOTREF(__pyx_t_13); - } else - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { - PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_kp_s_covariance_is_not_positive_semid, __pyx_builtin_RuntimeWarning}; - __pyx_t_13 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_11, 2+__pyx_t_11); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4520, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_GOTREF(__pyx_t_13); - } else - #endif - { - __pyx_t_3 = PyTuple_New(2+__pyx_t_11); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4520, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (__pyx_t_4) { - __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = NULL; - } - __Pyx_INCREF(__pyx_kp_s_covariance_is_not_positive_semid); - __Pyx_GIVEREF(__pyx_kp_s_covariance_is_not_positive_semid); - PyTuple_SET_ITEM(__pyx_t_3, 0+__pyx_t_11, __pyx_kp_s_covariance_is_not_positive_semid); - __Pyx_INCREF(__pyx_builtin_RuntimeWarning); - __Pyx_GIVEREF(__pyx_builtin_RuntimeWarning); - PyTuple_SET_ITEM(__pyx_t_3, 1+__pyx_t_11, __pyx_builtin_RuntimeWarning); - __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_3, NULL); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4520, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_13); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_tuple__169, NULL); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4533, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_13); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - /* "mtrand.pyx":4519 + /* "mtrand.pyx":4532 * psd = np.allclose(np.dot(v.T * s, v), cov, rtol=tol, atol=tol) * if not psd: * if check_valid == 'warn': # <<<<<<<<<<<<<< @@ -37970,7 +38097,7 @@ goto __pyx_L19; } - /* "mtrand.pyx":4523 + /* "mtrand.pyx":4536 * RuntimeWarning) * else: * raise ValueError("covariance is not positive-semidefinite.") # <<<<<<<<<<<<<< @@ -37978,15 +38105,15 @@ * x = np.dot(x, np.sqrt(s)[:, None] * v) */ /*else*/ { - __pyx_t_13 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__167, NULL); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4523, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__170, NULL); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4536, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_Raise(__pyx_t_13, 0, 0, 0); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - __PYX_ERR(0, 4523, __pyx_L1_error) + __PYX_ERR(0, 4536, __pyx_L1_error) } __pyx_L19:; - /* "mtrand.pyx":4518 + /* "mtrand.pyx":4531 * * psd = np.allclose(np.dot(v.T * s, v), cov, rtol=tol, atol=tol) * if not psd: # <<<<<<<<<<<<<< @@ -37995,7 +38122,7 @@ */ } - /* "mtrand.pyx":4513 + /* "mtrand.pyx":4526 * (u, s, v) = svd(cov) * * if check_valid != 'ignore': # <<<<<<<<<<<<<< @@ -38004,104 +38131,104 @@ */ } - /* "mtrand.pyx":4525 + /* "mtrand.pyx":4538 * raise ValueError("covariance is not positive-semidefinite.") * * x = np.dot(x, np.sqrt(s)[:, None] * v) # <<<<<<<<<<<<<< * x += mean * x.shape = tuple(final_shape) */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4525, __pyx_L1_error) + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4538, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_dot); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4538, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_dot); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4525, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4538, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4525, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_sqrt); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4525, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_sqrt); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4538, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = NULL; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_4)) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } - if (!__pyx_t_4) { - __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_s); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4525, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); + if (!__pyx_t_3) { + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_s); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4538, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { - PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_v_s}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4525, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_GOTREF(__pyx_t_1); + PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_v_s}; + __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4538, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_GOTREF(__pyx_t_4); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { - PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_v_s}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4525, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_GOTREF(__pyx_t_1); + PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_v_s}; + __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4538, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_GOTREF(__pyx_t_4); } else #endif { - __pyx_t_10 = PyTuple_New(1+1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4525, __pyx_L1_error) + __pyx_t_10 = PyTuple_New(1+1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4538, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); - __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_4); __pyx_t_4 = NULL; + __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_v_s); __Pyx_GIVEREF(__pyx_v_s); PyTuple_SET_ITEM(__pyx_t_10, 0+1, __pyx_v_s); - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_10, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4525, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_10, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4538, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_tuple__169); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4525, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_tuple__172); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4538, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Multiply(__pyx_t_2, __pyx_v_v); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4525, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = PyNumber_Multiply(__pyx_t_2, __pyx_v_v); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4538, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; __pyx_t_11 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); + __Pyx_DECREF_SET(__pyx_t_1, function); __pyx_t_11 = 1; } } #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_3)) { - PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_x, __pyx_t_1}; - __pyx_t_13 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_11, 2+__pyx_t_11); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4525, __pyx_L1_error) + if (PyFunction_Check(__pyx_t_1)) { + PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_x, __pyx_t_4}; + __pyx_t_13 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_11, 2+__pyx_t_11); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4538, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_13); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { - PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_x, __pyx_t_1}; - __pyx_t_13 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_11, 2+__pyx_t_11); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4525, __pyx_L1_error) + if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { + PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_x, __pyx_t_4}; + __pyx_t_13 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_11, 2+__pyx_t_11); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4538, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_13); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { - __pyx_t_10 = PyTuple_New(2+__pyx_t_11); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4525, __pyx_L1_error) + __pyx_t_10 = PyTuple_New(2+__pyx_t_11); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4538, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_2); __pyx_t_2 = NULL; @@ -38109,42 +38236,42 @@ __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_10, 0+__pyx_t_11, __pyx_v_x); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_10, 1+__pyx_t_11, __pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_10, NULL); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4525, __pyx_L1_error) + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_10, 1+__pyx_t_11, __pyx_t_4); + __pyx_t_4 = 0; + __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_10, NULL); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4538, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_x, __pyx_t_13); __pyx_t_13 = 0; - /* "mtrand.pyx":4526 + /* "mtrand.pyx":4539 * * x = np.dot(x, np.sqrt(s)[:, None] * v) * x += mean # <<<<<<<<<<<<<< * x.shape = tuple(final_shape) * return x */ - __pyx_t_13 = PyNumber_InPlaceAdd(__pyx_v_x, __pyx_v_mean); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4526, __pyx_L1_error) + __pyx_t_13 = PyNumber_InPlaceAdd(__pyx_v_x, __pyx_v_mean); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4539, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF_SET(__pyx_v_x, __pyx_t_13); __pyx_t_13 = 0; - /* "mtrand.pyx":4527 + /* "mtrand.pyx":4540 * x = np.dot(x, np.sqrt(s)[:, None] * v) * x += mean * x.shape = tuple(final_shape) # <<<<<<<<<<<<<< * return x * */ - __pyx_t_13 = PyList_AsTuple(__pyx_v_final_shape); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4527, __pyx_L1_error) + __pyx_t_13 = PyList_AsTuple(__pyx_v_final_shape); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4540, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); - if (__Pyx_PyObject_SetAttrStr(__pyx_v_x, __pyx_n_s_shape, __pyx_t_13) < 0) __PYX_ERR(0, 4527, __pyx_L1_error) + if (__Pyx_PyObject_SetAttrStr(__pyx_v_x, __pyx_n_s_shape, __pyx_t_13) < 0) __PYX_ERR(0, 4540, __pyx_L1_error) __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - /* "mtrand.pyx":4528 + /* "mtrand.pyx":4541 * x += mean * x.shape = tuple(final_shape) * return x # <<<<<<<<<<<<<< @@ -38156,7 +38283,7 @@ __pyx_r = __pyx_v_x; goto __pyx_L0; - /* "mtrand.pyx":4369 + /* "mtrand.pyx":4382 * * # Multivariate distributions: * def multivariate_normal(self, mean, cov, size=None, check_valid='warn', # <<<<<<<<<<<<<< @@ -38190,7 +38317,7 @@ return __pyx_r; } -/* "mtrand.pyx":4530 +/* "mtrand.pyx":4543 * return x * * def multinomial(self, npy_intp n, object pvals, size=None): # <<<<<<<<<<<<<< @@ -38228,23 +38355,23 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_n)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_n)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: - if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_pvals)) != 0)) kw_args--; + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pvals)) != 0)) kw_args--; else { - __Pyx_RaiseArgtupleInvalid("multinomial", 0, 2, 3, 1); __PYX_ERR(0, 4530, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("multinomial", 0, 2, 3, 1); __PYX_ERR(0, 4543, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "multinomial") < 0)) __PYX_ERR(0, 4530, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "multinomial") < 0)) __PYX_ERR(0, 4543, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -38256,13 +38383,13 @@ default: goto __pyx_L5_argtuple_error; } } - __pyx_v_n = __Pyx_PyInt_As_npy_intp(values[0]); if (unlikely((__pyx_v_n == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4530, __pyx_L3_error) + __pyx_v_n = __Pyx_PyInt_As_npy_intp(values[0]); if (unlikely((__pyx_v_n == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4543, __pyx_L3_error) __pyx_v_pvals = values[1]; __pyx_v_size = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("multinomial", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 4530, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("multinomial", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 4543, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.multinomial", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -38305,24 +38432,24 @@ long __pyx_t_13; __Pyx_RefNannySetupContext("multinomial", 0); - /* "mtrand.pyx":4612 + /* "mtrand.pyx":4625 * cdef double Sum * * d = len(pvals) # <<<<<<<<<<<<<< * parr = PyArray_ContiguousFromObject(pvals, NPY_DOUBLE, 1, 1) * pix = PyArray_DATA(parr) */ - __pyx_t_1 = PyObject_Length(__pyx_v_pvals); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 4612, __pyx_L1_error) + __pyx_t_1 = PyObject_Length(__pyx_v_pvals); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 4625, __pyx_L1_error) __pyx_v_d = __pyx_t_1; - /* "mtrand.pyx":4613 + /* "mtrand.pyx":4626 * * d = len(pvals) * parr = PyArray_ContiguousFromObject(pvals, NPY_DOUBLE, 1, 1) # <<<<<<<<<<<<<< * pix = PyArray_DATA(parr) * */ - __pyx_t_2 = PyArray_ContiguousFromObject(__pyx_v_pvals, NPY_DOUBLE, 1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4613, __pyx_L1_error) + __pyx_t_2 = PyArray_ContiguousFromObject(__pyx_v_pvals, NPY_DOUBLE, 1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4626, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __pyx_t_2; __Pyx_INCREF(__pyx_t_3); @@ -38330,7 +38457,7 @@ arrayObject_parr = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; - /* "mtrand.pyx":4614 + /* "mtrand.pyx":4627 * d = len(pvals) * parr = PyArray_ContiguousFromObject(pvals, NPY_DOUBLE, 1, 1) * pix = PyArray_DATA(parr) # <<<<<<<<<<<<<< @@ -38339,7 +38466,7 @@ */ __pyx_v_pix = ((double *)PyArray_DATA(arrayObject_parr)); - /* "mtrand.pyx":4616 + /* "mtrand.pyx":4629 * pix = PyArray_DATA(parr) * * if kahan_sum(pix, d-1) > (1.0 + 1e-12): # <<<<<<<<<<<<<< @@ -38347,22 +38474,22 @@ * */ __pyx_t_4 = ((__pyx_f_6mtrand_kahan_sum(__pyx_v_pix, (__pyx_v_d - 1)) > (1.0 + 1e-12)) != 0); - if (__pyx_t_4) { + if (unlikely(__pyx_t_4)) { - /* "mtrand.pyx":4617 + /* "mtrand.pyx":4630 * * if kahan_sum(pix, d-1) > (1.0 + 1e-12): * raise ValueError("sum(pvals[:-1]) > 1.0") # <<<<<<<<<<<<<< * * shape = _shape_from_size(size, d) */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__170, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4617, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__173, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4630, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(0, 4617, __pyx_L1_error) + __PYX_ERR(0, 4630, __pyx_L1_error) - /* "mtrand.pyx":4616 + /* "mtrand.pyx":4629 * pix = PyArray_DATA(parr) * * if kahan_sum(pix, d-1) > (1.0 + 1e-12): # <<<<<<<<<<<<<< @@ -38371,16 +38498,16 @@ */ } - /* "mtrand.pyx":4619 + /* "mtrand.pyx":4632 * raise ValueError("sum(pvals[:-1]) > 1.0") * * shape = _shape_from_size(size, d) # <<<<<<<<<<<<<< * * multin = np.zeros(shape, int) */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_shape_from_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4619, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_shape_from_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4632, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = __Pyx_PyInt_From_npy_intp(__pyx_v_d); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4619, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyInt_From_npy_intp(__pyx_v_d); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4632, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; __pyx_t_7 = 0; @@ -38397,7 +38524,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_v_size, __pyx_t_5}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4619, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4632, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; @@ -38406,14 +38533,14 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_v_size, __pyx_t_5}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4619, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4632, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else #endif { - __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4619, __pyx_L1_error) + __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4632, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; @@ -38424,7 +38551,7 @@ __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_t_5); __pyx_t_5 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4619, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4632, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } @@ -38432,16 +38559,16 @@ __pyx_v_shape = __pyx_t_3; __pyx_t_3 = 0; - /* "mtrand.pyx":4621 + /* "mtrand.pyx":4634 * shape = _shape_from_size(size, d) * * multin = np.zeros(shape, int) # <<<<<<<<<<<<<< * mnarr = multin * mnix = PyArray_DATA(mnarr) */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4621, __pyx_L1_error) + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4634, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4621, __pyx_L1_error) + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4634, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; @@ -38459,7 +38586,7 @@ #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_8)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_shape, ((PyObject *)(&PyInt_Type))}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_8, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4621, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_8, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4634, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else @@ -38467,13 +38594,13 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_8)) { PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_shape, ((PyObject *)(&PyInt_Type))}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_8, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4621, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_8, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4634, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { - __pyx_t_5 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4621, __pyx_L1_error) + __pyx_t_5 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4634, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__pyx_t_2) { __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; @@ -38484,7 +38611,7 @@ __Pyx_INCREF(((PyObject *)(&PyInt_Type))); __Pyx_GIVEREF(((PyObject *)(&PyInt_Type))); PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_7, ((PyObject *)(&PyInt_Type))); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4621, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4634, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } @@ -38492,7 +38619,7 @@ __pyx_v_multin = __pyx_t_3; __pyx_t_3 = 0; - /* "mtrand.pyx":4622 + /* "mtrand.pyx":4635 * * multin = np.zeros(shape, int) * mnarr = multin # <<<<<<<<<<<<<< @@ -38504,7 +38631,7 @@ arrayObject_mnarr = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; - /* "mtrand.pyx":4623 + /* "mtrand.pyx":4636 * multin = np.zeros(shape, int) * mnarr = multin * mnix = PyArray_DATA(mnarr) # <<<<<<<<<<<<<< @@ -38513,7 +38640,7 @@ */ __pyx_v_mnix = ((long *)PyArray_DATA(arrayObject_mnarr)); - /* "mtrand.pyx":4624 + /* "mtrand.pyx":4637 * mnarr = multin * mnix = PyArray_DATA(mnarr) * sz = PyArray_SIZE(mnarr) # <<<<<<<<<<<<<< @@ -38522,7 +38649,7 @@ */ __pyx_v_sz = PyArray_SIZE(arrayObject_mnarr); - /* "mtrand.pyx":4625 + /* "mtrand.pyx":4638 * mnix = PyArray_DATA(mnarr) * sz = PyArray_SIZE(mnarr) * with self.lock, nogil, cython.cdivision(True): # <<<<<<<<<<<<<< @@ -38530,9 +38657,9 @@ * while i < sz: */ /*with:*/ { - __pyx_t_9 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4625, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4638, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_8 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4625, __pyx_L4_error) + __pyx_t_8 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4638, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) { @@ -38545,17 +38672,17 @@ } } if (__pyx_t_5) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4625, __pyx_L4_error) + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4638, __pyx_L4_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else { - __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4625, __pyx_L4_error) + __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4638, __pyx_L4_error) } __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /*try:*/ { { - if (__pyx_t_10||__pyx_t_11||__pyx_t_12); else {/*mark used*/} + (void)__pyx_t_10; (void)__pyx_t_11; (void)__pyx_t_12; /* mark used */ /*try:*/ { { #ifdef WITH_THREAD @@ -38565,7 +38692,7 @@ #endif /*try:*/ { - /* "mtrand.pyx":4626 + /* "mtrand.pyx":4639 * sz = PyArray_SIZE(mnarr) * with self.lock, nogil, cython.cdivision(True): * i = 0 # <<<<<<<<<<<<<< @@ -38574,7 +38701,7 @@ */ __pyx_v_i = 0; - /* "mtrand.pyx":4627 + /* "mtrand.pyx":4640 * with self.lock, nogil, cython.cdivision(True): * i = 0 * while i < sz: # <<<<<<<<<<<<<< @@ -38585,7 +38712,7 @@ __pyx_t_4 = ((__pyx_v_i < __pyx_v_sz) != 0); if (!__pyx_t_4) break; - /* "mtrand.pyx":4628 + /* "mtrand.pyx":4641 * i = 0 * while i < sz: * Sum = 1.0 # <<<<<<<<<<<<<< @@ -38594,7 +38721,7 @@ */ __pyx_v_Sum = 1.0; - /* "mtrand.pyx":4629 + /* "mtrand.pyx":4642 * while i < sz: * Sum = 1.0 * dn = n # <<<<<<<<<<<<<< @@ -38603,7 +38730,7 @@ */ __pyx_v_dn = __pyx_v_n; - /* "mtrand.pyx":4630 + /* "mtrand.pyx":4643 * Sum = 1.0 * dn = n * for j from 0 <= j < d-1: # <<<<<<<<<<<<<< @@ -38613,7 +38740,7 @@ __pyx_t_13 = (__pyx_v_d - 1); for (__pyx_v_j = 0; __pyx_v_j < __pyx_t_13; __pyx_v_j++) { - /* "mtrand.pyx":4631 + /* "mtrand.pyx":4644 * dn = n * for j from 0 <= j < d-1: * mnix[i+j] = rk_binomial(self.internal_state, dn, pix[j]/Sum) # <<<<<<<<<<<<<< @@ -38622,7 +38749,7 @@ */ (__pyx_v_mnix[(__pyx_v_i + __pyx_v_j)]) = rk_binomial(__pyx_v_self->internal_state, __pyx_v_dn, ((__pyx_v_pix[__pyx_v_j]) / __pyx_v_Sum)); - /* "mtrand.pyx":4632 + /* "mtrand.pyx":4645 * for j from 0 <= j < d-1: * mnix[i+j] = rk_binomial(self.internal_state, dn, pix[j]/Sum) * dn = dn - mnix[i+j] # <<<<<<<<<<<<<< @@ -38631,7 +38758,7 @@ */ __pyx_v_dn = (__pyx_v_dn - (__pyx_v_mnix[(__pyx_v_i + __pyx_v_j)])); - /* "mtrand.pyx":4633 + /* "mtrand.pyx":4646 * mnix[i+j] = rk_binomial(self.internal_state, dn, pix[j]/Sum) * dn = dn - mnix[i+j] * if dn <= 0: # <<<<<<<<<<<<<< @@ -38641,7 +38768,7 @@ __pyx_t_4 = ((__pyx_v_dn <= 0) != 0); if (__pyx_t_4) { - /* "mtrand.pyx":4634 + /* "mtrand.pyx":4647 * dn = dn - mnix[i+j] * if dn <= 0: * break # <<<<<<<<<<<<<< @@ -38650,7 +38777,7 @@ */ goto __pyx_L20_break; - /* "mtrand.pyx":4633 + /* "mtrand.pyx":4646 * mnix[i+j] = rk_binomial(self.internal_state, dn, pix[j]/Sum) * dn = dn - mnix[i+j] * if dn <= 0: # <<<<<<<<<<<<<< @@ -38659,7 +38786,7 @@ */ } - /* "mtrand.pyx":4635 + /* "mtrand.pyx":4648 * if dn <= 0: * break * Sum = Sum - pix[j] # <<<<<<<<<<<<<< @@ -38670,7 +38797,7 @@ } __pyx_L20_break:; - /* "mtrand.pyx":4636 + /* "mtrand.pyx":4649 * break * Sum = Sum - pix[j] * if dn > 0: # <<<<<<<<<<<<<< @@ -38680,7 +38807,7 @@ __pyx_t_4 = ((__pyx_v_dn > 0) != 0); if (__pyx_t_4) { - /* "mtrand.pyx":4637 + /* "mtrand.pyx":4650 * Sum = Sum - pix[j] * if dn > 0: * mnix[i+d-1] = dn # <<<<<<<<<<<<<< @@ -38689,7 +38816,7 @@ */ (__pyx_v_mnix[((__pyx_v_i + __pyx_v_d) - 1)]) = __pyx_v_dn; - /* "mtrand.pyx":4636 + /* "mtrand.pyx":4649 * break * Sum = Sum - pix[j] * if dn > 0: # <<<<<<<<<<<<<< @@ -38698,7 +38825,7 @@ */ } - /* "mtrand.pyx":4639 + /* "mtrand.pyx":4652 * mnix[i+d-1] = dn * * i = i + d # <<<<<<<<<<<<<< @@ -38709,7 +38836,7 @@ } } - /* "mtrand.pyx":4625 + /* "mtrand.pyx":4638 * mnix = PyArray_DATA(mnarr) * sz = PyArray_SIZE(mnarr) * with self.lock, nogil, cython.cdivision(True): # <<<<<<<<<<<<<< @@ -38733,9 +38860,9 @@ /*finally:*/ { /*normal exit:*/{ if (__pyx_t_9) { - __pyx_t_12 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_tuple__171, NULL); + __pyx_t_12 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_tuple__174, NULL); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 4625, __pyx_L1_error) + if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 4638, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } @@ -38750,7 +38877,7 @@ __pyx_L23:; } - /* "mtrand.pyx":4641 + /* "mtrand.pyx":4654 * i = i + d * * return multin # <<<<<<<<<<<<<< @@ -38762,7 +38889,7 @@ __pyx_r = __pyx_v_multin; goto __pyx_L0; - /* "mtrand.pyx":4530 + /* "mtrand.pyx":4543 * return x * * def multinomial(self, npy_intp n, object pvals, size=None): # <<<<<<<<<<<<<< @@ -38789,7 +38916,7 @@ return __pyx_r; } -/* "mtrand.pyx":4643 +/* "mtrand.pyx":4656 * return multin * * def dirichlet(self, object alpha, size=None): # <<<<<<<<<<<<<< @@ -38799,7 +38926,7 @@ /* Python wrapper */ static PyObject *__pyx_pw_6mtrand_11RandomState_101dirichlet(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static char __pyx_doc_6mtrand_11RandomState_100dirichlet[] = "\n dirichlet(alpha, size=None)\n\n Draw samples from the Dirichlet distribution.\n\n Draw `size` samples of dimension k from a Dirichlet distribution. A\n Dirichlet-distributed random variable can be seen as a multivariate\n generalization of a Beta distribution. Dirichlet pdf is the conjugate\n prior of a multinomial in Bayesian inference.\n\n Parameters\n ----------\n alpha : array\n Parameter of the distribution (k dimension for sample of\n dimension k).\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. Default is None, in which case a\n single value is returned.\n\n Returns\n -------\n samples : ndarray,\n The drawn samples, of shape (size, alpha.ndim).\n\n Notes\n -----\n .. math:: X \\approx \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i}\n\n Uses the following property for computation: for each dimension,\n draw a random sample y_i from a standard gamma generator of shape\n `alpha_i`, then\n :math:`X = \\frac{1}{\\sum_{i=1}^k{y_i}} (y_1, \\ldots, y_n)` is\n Dirichlet distributed.\n\n References\n ----------\n .. [1] David McKay, \"Information Theory, Inference and Learning\n Algorithms,\" chapter 23,\n http://www.inference.phy.cam.ac.uk/mackay/\n .. [2] Wikipedia, \"Dirichlet distribution\",\n http://en.wikipedia.org/wiki/Dirichlet_distribution\n\n Examples\n --------\n Taking an example cited in Wikipedia, this distribution can be used if\n one wanted to cut strings (each of initial length 1.0) into K pieces\n with different lengths, where each piece had, on average, a designated\n average length, but allowing some variation in the relative sizes of\n th""e pieces.\n\n >>> s = np.random.dirichlet((10, 5, 3), 20).transpose()\n\n >>> plt.barh(range(20), s[0])\n >>> plt.barh(range(20), s[1], left=s[0], color='g')\n >>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r')\n >>> plt.title(\"Lengths of Strings\")\n\n "; +static char __pyx_doc_6mtrand_11RandomState_100dirichlet[] = "\n dirichlet(alpha, size=None)\n\n Draw samples from the Dirichlet distribution.\n\n Draw `size` samples of dimension k from a Dirichlet distribution. A\n Dirichlet-distributed random variable can be seen as a multivariate\n generalization of a Beta distribution. Dirichlet pdf is the conjugate\n prior of a multinomial in Bayesian inference.\n\n Parameters\n ----------\n alpha : array\n Parameter of the distribution (k dimension for sample of\n dimension k).\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. Default is None, in which case a\n single value is returned.\n\n Returns\n -------\n samples : ndarray,\n The drawn samples, of shape (size, alpha.ndim).\n\n Raises\n -------\n ValueError\n If any value in alpha is less than or equal to zero\n\n Notes\n -----\n .. math:: X \\approx \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i}\n\n Uses the following property for computation: for each dimension,\n draw a random sample y_i from a standard gamma generator of shape\n `alpha_i`, then\n :math:`X = \\frac{1}{\\sum_{i=1}^k{y_i}} (y_1, \\ldots, y_n)` is\n Dirichlet distributed.\n\n References\n ----------\n .. [1] David McKay, \"Information Theory, Inference and Learning\n Algorithms,\" chapter 23,\n http://www.inference.phy.cam.ac.uk/mackay/\n .. [2] Wikipedia, \"Dirichlet distribution\",\n http://en.wikipedia.org/wiki/Dirichlet_distribution\n\n Examples\n --------\n Taking an example cited in Wikipedia, this distribution can be used if\n one wanted to cut strings (each of initial length 1.0) into K pieces\n with different lengths, where each piece"" had, on average, a designated\n average length, but allowing some variation in the relative sizes of\n the pieces.\n\n >>> s = np.random.dirichlet((10, 5, 3), 20).transpose()\n\n >>> plt.barh(range(20), s[0])\n >>> plt.barh(range(20), s[1], left=s[0], color='g')\n >>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r')\n >>> plt.title(\"Lengths of Strings\")\n\n "; static PyObject *__pyx_pw_6mtrand_11RandomState_101dirichlet(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_alpha = 0; PyObject *__pyx_v_size = 0; @@ -38824,17 +38951,17 @@ kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: - if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_alpha)) != 0)) kw_args--; + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_alpha)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { - PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_size); + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "dirichlet") < 0)) __PYX_ERR(0, 4643, __pyx_L3_error) + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "dirichlet") < 0)) __PYX_ERR(0, 4656, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { @@ -38850,7 +38977,7 @@ } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("dirichlet", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 4643, __pyx_L3_error) + __Pyx_RaiseArgtupleInvalid("dirichlet", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 4656, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("mtrand.RandomState.dirichlet", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); @@ -38883,36 +39010,37 @@ PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_6 = NULL; + int __pyx_t_7; PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; + int __pyx_t_9; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; - int __pyx_t_12; - npy_intp __pyx_t_13; - PyObject *__pyx_t_14 = NULL; - int __pyx_t_15; + PyObject *__pyx_t_12 = NULL; + PyObject *__pyx_t_13 = NULL; + npy_intp __pyx_t_14; + PyObject *__pyx_t_15 = NULL; + int __pyx_t_16; __Pyx_RefNannySetupContext("dirichlet", 0); - /* "mtrand.pyx":4732 + /* "mtrand.pyx":4750 * cdef double acc, invacc * * k = len(alpha) # <<<<<<<<<<<<<< * alpha_arr = PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1) - * alpha_data = PyArray_DATA(alpha_arr) + * if np.any(np.less_equal(alpha_arr, 0)): */ - __pyx_t_1 = PyObject_Length(__pyx_v_alpha); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 4732, __pyx_L1_error) + __pyx_t_1 = PyObject_Length(__pyx_v_alpha); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 4750, __pyx_L1_error) __pyx_v_k = __pyx_t_1; - /* "mtrand.pyx":4733 + /* "mtrand.pyx":4751 * * k = len(alpha) * alpha_arr = PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1) # <<<<<<<<<<<<<< - * alpha_data = PyArray_DATA(alpha_arr) - * + * if np.any(np.less_equal(alpha_arr, 0)): + * raise ValueError('alpha <= 0') */ - __pyx_t_2 = PyArray_ContiguousFromObject(__pyx_v_alpha, NPY_DOUBLE, 1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4733, __pyx_L1_error) + __pyx_t_2 = PyArray_ContiguousFromObject(__pyx_v_alpha, NPY_DOUBLE, 1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4751, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __pyx_t_2; __Pyx_INCREF(__pyx_t_3); @@ -38920,144 +39048,278 @@ __pyx_v_alpha_arr = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; - /* "mtrand.pyx":4734 + /* "mtrand.pyx":4752 + * k = len(alpha) + * alpha_arr = PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1) + * if np.any(np.less_equal(alpha_arr, 0)): # <<<<<<<<<<<<<< + * raise ValueError('alpha <= 0') + * alpha_data = PyArray_DATA(alpha_arr) + */ + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4752, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_any); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4752, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4752, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_less_equal); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4752, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_5 = NULL; + __pyx_t_7 = 0; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_6, function); + __pyx_t_7 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_6)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, ((PyObject *)__pyx_v_alpha_arr), __pyx_int_0}; + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4752, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_2); + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, ((PyObject *)__pyx_v_alpha_arr), __pyx_int_0}; + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4752, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_2); + } else + #endif + { + __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4752, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + if (__pyx_t_5) { + __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; + } + __Pyx_INCREF(((PyObject *)__pyx_v_alpha_arr)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_alpha_arr)); + PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, ((PyObject *)__pyx_v_alpha_arr)); + __Pyx_INCREF(__pyx_int_0); + __Pyx_GIVEREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_int_0); + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4752, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + } + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_6 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_6)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_6); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + } + } + if (!__pyx_t_6) { + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4752, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_GOTREF(__pyx_t_3); + } else { + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4752, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_t_2}; + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4752, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } else + #endif + { + __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4752, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL; + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_t_2); + __pyx_t_2 = 0; + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4752, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + } + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 4752, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(__pyx_t_9)) { + + /* "mtrand.pyx":4753 + * alpha_arr = PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1) + * if np.any(np.less_equal(alpha_arr, 0)): + * raise ValueError('alpha <= 0') # <<<<<<<<<<<<<< + * alpha_data = PyArray_DATA(alpha_arr) + * + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__175, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4753, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(0, 4753, __pyx_L1_error) + + /* "mtrand.pyx":4752 * k = len(alpha) * alpha_arr = PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1) + * if np.any(np.less_equal(alpha_arr, 0)): # <<<<<<<<<<<<<< + * raise ValueError('alpha <= 0') + * alpha_data = PyArray_DATA(alpha_arr) + */ + } + + /* "mtrand.pyx":4754 + * if np.any(np.less_equal(alpha_arr, 0)): + * raise ValueError('alpha <= 0') * alpha_data = PyArray_DATA(alpha_arr) # <<<<<<<<<<<<<< * * shape = _shape_from_size(size, k) */ __pyx_v_alpha_data = ((double *)PyArray_DATA(__pyx_v_alpha_arr)); - /* "mtrand.pyx":4736 + /* "mtrand.pyx":4756 * alpha_data = PyArray_DATA(alpha_arr) * * shape = _shape_from_size(size, k) # <<<<<<<<<<<<<< * * diric = np.zeros(shape, np.float64) */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_shape_from_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4736, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyInt_From_npy_intp(__pyx_v_k); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4736, __pyx_L1_error) + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_shape_from_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4756, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_5); + __pyx_t_8 = __Pyx_PyInt_From_npy_intp(__pyx_v_k); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4756, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_2 = NULL; + __pyx_t_7 = 0; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_6 = 1; + __Pyx_DECREF_SET(__pyx_t_4, function); + __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_2)) { - PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_v_size, __pyx_t_4}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4736, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + if (PyFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_size, __pyx_t_8}; + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4756, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } else #endif #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { - PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_v_size, __pyx_t_4}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4736, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_size, __pyx_t_8}; + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4756, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } else #endif { - __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4736, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - if (__pyx_t_5) { - __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; + __pyx_t_6 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4756, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + if (__pyx_t_2) { + __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_2); __pyx_t_2 = NULL; } __Pyx_INCREF(__pyx_v_size); __Pyx_GIVEREF(__pyx_v_size); - PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_v_size); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4736, __pyx_L1_error) + PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_7, __pyx_v_size); + __Pyx_GIVEREF(__pyx_t_8); + PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_7, __pyx_t_8); + __pyx_t_8 = 0; + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4756, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_shape = __pyx_t_3; __pyx_t_3 = 0; - /* "mtrand.pyx":4738 + /* "mtrand.pyx":4758 * shape = _shape_from_size(size, k) * * diric = np.zeros(shape, np.float64) # <<<<<<<<<<<<<< * val_arr = diric * val_data= PyArray_DATA(val_arr) */ - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4738, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4738, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4738, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_float64); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4738, __pyx_L1_error) + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4758, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_7); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); - __Pyx_INCREF(__pyx_t_2); + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4758, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4758, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float64); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4758, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = NULL; + __pyx_t_7 = 0; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); - __pyx_t_6 = 1; + __Pyx_DECREF_SET(__pyx_t_6, function); + __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_7)) { - PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_shape, __pyx_t_4}; - __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4738, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + if (PyFunction_Check(__pyx_t_6)) { + PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_shape, __pyx_t_8}; + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4758, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } else #endif #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { - PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_v_shape, __pyx_t_4}; - __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4738, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { + PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_shape, __pyx_t_8}; + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4758, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } else #endif { - __pyx_t_5 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 4738, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - if (__pyx_t_2) { - __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; + __pyx_t_2 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4758, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (__pyx_t_4) { + __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); - PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_6, __pyx_v_shape); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_6, __pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4738, __pyx_L1_error) + PyTuple_SET_ITEM(__pyx_t_2, 0+__pyx_t_7, __pyx_v_shape); + __Pyx_GIVEREF(__pyx_t_8); + PyTuple_SET_ITEM(__pyx_t_2, 1+__pyx_t_7, __pyx_t_8); + __pyx_t_8 = 0; + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4758, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_diric = __pyx_t_3; __pyx_t_3 = 0; - /* "mtrand.pyx":4739 + /* "mtrand.pyx":4759 * * diric = np.zeros(shape, np.float64) * val_arr = diric # <<<<<<<<<<<<<< @@ -39069,7 +39331,7 @@ __pyx_v_val_arr = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0; - /* "mtrand.pyx":4740 + /* "mtrand.pyx":4760 * diric = np.zeros(shape, np.float64) * val_arr = diric * val_data= PyArray_DATA(val_arr) # <<<<<<<<<<<<<< @@ -39078,7 +39340,7 @@ */ __pyx_v_val_data = ((double *)PyArray_DATA(__pyx_v_val_arr)); - /* "mtrand.pyx":4742 + /* "mtrand.pyx":4762 * val_data= PyArray_DATA(val_arr) * * i = 0 # <<<<<<<<<<<<<< @@ -39087,7 +39349,7 @@ */ __pyx_v_i = 0; - /* "mtrand.pyx":4743 + /* "mtrand.pyx":4763 * * i = 0 * totsize = PyArray_SIZE(val_arr) # <<<<<<<<<<<<<< @@ -39096,7 +39358,7 @@ */ __pyx_v_totsize = PyArray_SIZE(__pyx_v_val_arr); - /* "mtrand.pyx":4744 + /* "mtrand.pyx":4764 * i = 0 * totsize = PyArray_SIZE(val_arr) * with self.lock, nogil: # <<<<<<<<<<<<<< @@ -39104,37 +39366,37 @@ * acc = 0.0 */ /*with:*/ { - __pyx_t_8 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4744, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_7 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4744, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_5 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_7); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); - __Pyx_INCREF(__pyx_t_5); + __pyx_t_10 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4764, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __pyx_t_6 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4764, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_2 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); + __Pyx_DECREF_SET(__pyx_t_6, function); } } - if (__pyx_t_5) { - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4744, __pyx_L3_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (__pyx_t_2) { + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4764, __pyx_L4_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { - __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4744, __pyx_L3_error) + __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4764, __pyx_L4_error) } __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /*try:*/ { { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); - __Pyx_XGOTREF(__pyx_t_9); - __Pyx_XGOTREF(__pyx_t_10); + __Pyx_ExceptionSave(&__pyx_t_11, &__pyx_t_12, &__pyx_t_13); __Pyx_XGOTREF(__pyx_t_11); + __Pyx_XGOTREF(__pyx_t_12); + __Pyx_XGOTREF(__pyx_t_13); /*try:*/ { { #ifdef WITH_THREAD @@ -39144,7 +39406,7 @@ #endif /*try:*/ { - /* "mtrand.pyx":4745 + /* "mtrand.pyx":4765 * totsize = PyArray_SIZE(val_arr) * with self.lock, nogil: * while i < totsize: # <<<<<<<<<<<<<< @@ -39152,10 +39414,10 @@ * for j from 0 <= j < k: */ while (1) { - __pyx_t_12 = ((__pyx_v_i < __pyx_v_totsize) != 0); - if (!__pyx_t_12) break; + __pyx_t_9 = ((__pyx_v_i < __pyx_v_totsize) != 0); + if (!__pyx_t_9) break; - /* "mtrand.pyx":4746 + /* "mtrand.pyx":4766 * with self.lock, nogil: * while i < totsize: * acc = 0.0 # <<<<<<<<<<<<<< @@ -39164,17 +39426,17 @@ */ __pyx_v_acc = 0.0; - /* "mtrand.pyx":4747 + /* "mtrand.pyx":4767 * while i < totsize: * acc = 0.0 * for j from 0 <= j < k: # <<<<<<<<<<<<<< * val_data[i+j] = rk_standard_gamma(self.internal_state, * alpha_data[j]) */ - __pyx_t_13 = __pyx_v_k; - for (__pyx_v_j = 0; __pyx_v_j < __pyx_t_13; __pyx_v_j++) { + __pyx_t_14 = __pyx_v_k; + for (__pyx_v_j = 0; __pyx_v_j < __pyx_t_14; __pyx_v_j++) { - /* "mtrand.pyx":4748 + /* "mtrand.pyx":4768 * acc = 0.0 * for j from 0 <= j < k: * val_data[i+j] = rk_standard_gamma(self.internal_state, # <<<<<<<<<<<<<< @@ -39183,7 +39445,7 @@ */ (__pyx_v_val_data[(__pyx_v_i + __pyx_v_j)]) = rk_standard_gamma(__pyx_v_self->internal_state, (__pyx_v_alpha_data[__pyx_v_j])); - /* "mtrand.pyx":4750 + /* "mtrand.pyx":4770 * val_data[i+j] = rk_standard_gamma(self.internal_state, * alpha_data[j]) * acc = acc + val_data[i+j] # <<<<<<<<<<<<<< @@ -39193,7 +39455,7 @@ __pyx_v_acc = (__pyx_v_acc + (__pyx_v_val_data[(__pyx_v_i + __pyx_v_j)])); } - /* "mtrand.pyx":4751 + /* "mtrand.pyx":4771 * alpha_data[j]) * acc = acc + val_data[i+j] * invacc = 1/acc # <<<<<<<<<<<<<< @@ -39208,21 +39470,21 @@ #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif - __PYX_ERR(0, 4751, __pyx_L14_error) + __PYX_ERR(0, 4771, __pyx_L15_error) } __pyx_v_invacc = (1.0 / __pyx_v_acc); - /* "mtrand.pyx":4752 + /* "mtrand.pyx":4772 * acc = acc + val_data[i+j] * invacc = 1/acc * for j from 0 <= j < k: # <<<<<<<<<<<<<< * val_data[i+j] = val_data[i+j] * invacc * i = i + k */ - __pyx_t_13 = __pyx_v_k; - for (__pyx_v_j = 0; __pyx_v_j < __pyx_t_13; __pyx_v_j++) { + __pyx_t_14 = __pyx_v_k; + for (__pyx_v_j = 0; __pyx_v_j < __pyx_t_14; __pyx_v_j++) { - /* "mtrand.pyx":4753 + /* "mtrand.pyx":4773 * invacc = 1/acc * for j from 0 <= j < k: * val_data[i+j] = val_data[i+j] * invacc # <<<<<<<<<<<<<< @@ -39232,7 +39494,7 @@ (__pyx_v_val_data[(__pyx_v_i + __pyx_v_j)]) = ((__pyx_v_val_data[(__pyx_v_i + __pyx_v_j)]) * __pyx_v_invacc); } - /* "mtrand.pyx":4754 + /* "mtrand.pyx":4774 * for j from 0 <= j < k: * val_data[i+j] = val_data[i+j] * invacc * i = i + k # <<<<<<<<<<<<<< @@ -39243,7 +39505,7 @@ } } - /* "mtrand.pyx":4744 + /* "mtrand.pyx":4764 * i = 0 * totsize = PyArray_SIZE(val_arr) * with self.lock, nogil: # <<<<<<<<<<<<<< @@ -39256,97 +39518,95 @@ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif - goto __pyx_L15; + goto __pyx_L16; } - __pyx_L14_error: { + __pyx_L15_error: { #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif - goto __pyx_L7_error; + goto __pyx_L8_error; } - __pyx_L15:; + __pyx_L16:; } } } - __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; - goto __pyx_L12_try_end; - __pyx_L7_error:; - __Pyx_PyThreadState_assign - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; + __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0; + goto __pyx_L13_try_end; + __pyx_L8_error:; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; /*except:*/ { __Pyx_AddTraceback("mtrand.RandomState.dirichlet", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_7, &__pyx_t_5) < 0) __PYX_ERR(0, 4744, __pyx_L9_except_error) + if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_6, &__pyx_t_2) < 0) __PYX_ERR(0, 4764, __pyx_L10_except_error) __Pyx_GOTREF(__pyx_t_3); - __Pyx_GOTREF(__pyx_t_7); - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_4 = PyTuple_Pack(3, __pyx_t_3, __pyx_t_7, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4744, __pyx_L9_except_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_4, NULL); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_8 = PyTuple_Pack(3, __pyx_t_3, __pyx_t_6, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 4764, __pyx_L10_except_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_15 = __Pyx_PyObject_Call(__pyx_t_10, __pyx_t_8, NULL); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 4744, __pyx_L9_except_error) - __Pyx_GOTREF(__pyx_t_14); - __pyx_t_12 = __Pyx_PyObject_IsTrue(__pyx_t_14); - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - if (__pyx_t_12 < 0) __PYX_ERR(0, 4744, __pyx_L9_except_error) - __pyx_t_15 = ((!(__pyx_t_12 != 0)) != 0); - if (__pyx_t_15) { + if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 4764, __pyx_L10_except_error) + __Pyx_GOTREF(__pyx_t_15); + __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_15); + __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; + if (__pyx_t_9 < 0) __PYX_ERR(0, 4764, __pyx_L10_except_error) + __pyx_t_16 = ((!(__pyx_t_9 != 0)) != 0); + if (__pyx_t_16) { __Pyx_GIVEREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_7); - __Pyx_XGIVEREF(__pyx_t_5); - __Pyx_ErrRestoreWithState(__pyx_t_3, __pyx_t_7, __pyx_t_5); - __pyx_t_3 = 0; __pyx_t_7 = 0; __pyx_t_5 = 0; - __PYX_ERR(0, 4744, __pyx_L9_except_error) + __Pyx_GIVEREF(__pyx_t_6); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_ErrRestoreWithState(__pyx_t_3, __pyx_t_6, __pyx_t_2); + __pyx_t_3 = 0; __pyx_t_6 = 0; __pyx_t_2 = 0; + __PYX_ERR(0, 4764, __pyx_L10_except_error) } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - goto __pyx_L8_exception_handled; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + goto __pyx_L9_exception_handled; } - __pyx_L9_except_error:; - __Pyx_PyThreadState_assign - __Pyx_XGIVEREF(__pyx_t_9); - __Pyx_XGIVEREF(__pyx_t_10); + __pyx_L10_except_error:; __Pyx_XGIVEREF(__pyx_t_11); - __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); + __Pyx_XGIVEREF(__pyx_t_12); + __Pyx_XGIVEREF(__pyx_t_13); + __Pyx_ExceptionReset(__pyx_t_11, __pyx_t_12, __pyx_t_13); goto __pyx_L1_error; - __pyx_L8_exception_handled:; - __Pyx_PyThreadState_assign - __Pyx_XGIVEREF(__pyx_t_9); - __Pyx_XGIVEREF(__pyx_t_10); + __pyx_L9_exception_handled:; __Pyx_XGIVEREF(__pyx_t_11); - __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); - __pyx_L12_try_end:; + __Pyx_XGIVEREF(__pyx_t_12); + __Pyx_XGIVEREF(__pyx_t_13); + __Pyx_ExceptionReset(__pyx_t_11, __pyx_t_12, __pyx_t_13); + __pyx_L13_try_end:; } } /*finally:*/ { /*normal exit:*/{ - if (__pyx_t_8) { - __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_tuple__172, NULL); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4744, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + if (__pyx_t_10) { + __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_10, __pyx_tuple__176, NULL); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4764, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_13); + __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; } - goto __pyx_L6; + goto __pyx_L7; } - __pyx_L6:; + __pyx_L7:; } - goto __pyx_L25; - __pyx_L3_error:; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + goto __pyx_L26; + __pyx_L4_error:; + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; goto __pyx_L1_error; - __pyx_L25:; + __pyx_L26:; } - /* "mtrand.pyx":4756 + /* "mtrand.pyx":4776 * i = i + k * * return diric # <<<<<<<<<<<<<< @@ -39358,7 +39618,7 @@ __pyx_r = __pyx_v_diric; goto __pyx_L0; - /* "mtrand.pyx":4643 + /* "mtrand.pyx":4656 * return multin * * def dirichlet(self, object alpha, size=None): # <<<<<<<<<<<<<< @@ -39372,7 +39632,8 @@ __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("mtrand.RandomState.dirichlet", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; @@ -39385,7 +39646,7 @@ return __pyx_r; } -/* "mtrand.pyx":4759 +/* "mtrand.pyx":4779 * * # Shuffling and permutations: * def shuffle(self, object x): # <<<<<<<<<<<<<< @@ -39436,26 +39697,26 @@ PyObject *__pyx_t_16 = NULL; __Pyx_RefNannySetupContext("shuffle", 0); - /* "mtrand.pyx":4796 + /* "mtrand.pyx":4816 * """ * cdef: * npy_intp i, j, n = len(x), stride, itemsize # <<<<<<<<<<<<<< * char* x_ptr * char* buf_ptr */ - __pyx_t_1 = PyObject_Length(__pyx_v_x); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(0, 4796, __pyx_L1_error) + __pyx_t_1 = PyObject_Length(__pyx_v_x); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 4816, __pyx_L1_error) __pyx_v_n = __pyx_t_1; - /* "mtrand.pyx":4800 + /* "mtrand.pyx":4820 * char* buf_ptr * * if type(x) is np.ndarray and x.ndim == 1 and x.size: # <<<<<<<<<<<<<< * # Fast, statically typed path: shuffle the underlying buffer. * # Only for non-empty, 1d objects of class ndarray (subclasses such */ - __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4800, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4800, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = (((PyObject *)Py_TYPE(__pyx_v_x)) == __pyx_t_4); @@ -39466,103 +39727,103 @@ __pyx_t_2 = __pyx_t_6; goto __pyx_L4_bool_binop_done; } - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_ndim); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4800, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_ndim); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyInt_EqObjC(__pyx_t_4, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4800, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyInt_EqObjC(__pyx_t_4, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 4800, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 4820, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { } else { __pyx_t_2 = __pyx_t_6; goto __pyx_L4_bool_binop_done; } - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4800, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 4800, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 4820, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __pyx_t_6; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { - /* "mtrand.pyx":4804 + /* "mtrand.pyx":4824 * # Only for non-empty, 1d objects of class ndarray (subclasses such * # as MaskedArrays may not support this approach). * x_ptr = x.ctypes.data # <<<<<<<<<<<<<< * stride = x.strides[0] * itemsize = x.dtype.itemsize */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_ctypes); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4804, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_ctypes); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4824, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4804, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4824, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_7 = __Pyx_PyInt_As_size_t(__pyx_t_4); if (unlikely((__pyx_t_7 == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 4804, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyInt_As_size_t(__pyx_t_4); if (unlikely((__pyx_t_7 == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 4824, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_x_ptr = ((char *)((size_t)__pyx_t_7)); - /* "mtrand.pyx":4805 + /* "mtrand.pyx":4825 * # as MaskedArrays may not support this approach). * x_ptr = x.ctypes.data * stride = x.strides[0] # <<<<<<<<<<<<<< * itemsize = x.dtype.itemsize * # As the array x could contain python objects we use a buffer */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_strides); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4805, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_strides); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4825, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_4, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4805, __pyx_L1_error) + __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_4, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4825, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_8 = __Pyx_PyInt_As_npy_intp(__pyx_t_3); if (unlikely((__pyx_t_8 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4805, __pyx_L1_error) + __pyx_t_8 = __Pyx_PyInt_As_npy_intp(__pyx_t_3); if (unlikely((__pyx_t_8 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4825, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_stride = __pyx_t_8; - /* "mtrand.pyx":4806 + /* "mtrand.pyx":4826 * x_ptr = x.ctypes.data * stride = x.strides[0] * itemsize = x.dtype.itemsize # <<<<<<<<<<<<<< * # As the array x could contain python objects we use a buffer * # of bytes for the swaps to avoid leaving one of the objects */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4806, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4826, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4806, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4826, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_8 = __Pyx_PyInt_As_npy_intp(__pyx_t_4); if (unlikely((__pyx_t_8 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4806, __pyx_L1_error) + __pyx_t_8 = __Pyx_PyInt_As_npy_intp(__pyx_t_4); if (unlikely((__pyx_t_8 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 4826, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_itemsize = __pyx_t_8; - /* "mtrand.pyx":4811 + /* "mtrand.pyx":4831 * # within the buffer and erroneously decrementing it's refcount * # when the function exits. * buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit # <<<<<<<<<<<<<< * buf_ptr = buf.ctypes.data * with self.lock: */ - __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4811, __pyx_L1_error) + __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_empty); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4811, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_empty); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyInt_From_npy_intp(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4811, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyInt_From_npy_intp(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4811, __pyx_L1_error) + __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyDict_New(); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4811, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); - __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4811, __pyx_L1_error) + __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s_int8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4811, __pyx_L1_error) + __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s_int8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_11) < 0) __PYX_ERR(0, 4811, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_11) < 0) __PYX_ERR(0, 4831, __pyx_L1_error) __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_9, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4811, __pyx_L1_error) + __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_9, __pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -39570,23 +39831,23 @@ __pyx_v_buf = __pyx_t_11; __pyx_t_11 = 0; - /* "mtrand.pyx":4812 + /* "mtrand.pyx":4832 * # when the function exits. * buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit * buf_ptr = buf.ctypes.data # <<<<<<<<<<<<<< * with self.lock: * # We trick gcc into providing a specialized implementation for */ - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_buf, __pyx_n_s_ctypes); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4812, __pyx_L1_error) + __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_buf, __pyx_n_s_ctypes); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4832, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_11, __pyx_n_s_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4812, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_11, __pyx_n_s_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4832, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_7 = __Pyx_PyInt_As_size_t(__pyx_t_4); if (unlikely((__pyx_t_7 == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 4812, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyInt_As_size_t(__pyx_t_4); if (unlikely((__pyx_t_7 == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 4832, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_buf_ptr = ((char *)((size_t)__pyx_t_7)); - /* "mtrand.pyx":4813 + /* "mtrand.pyx":4833 * buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit * buf_ptr = buf.ctypes.data * with self.lock: # <<<<<<<<<<<<<< @@ -39594,9 +39855,9 @@ * # the most common case, yielding a ~33% performance improvement. */ /*with:*/ { - __pyx_t_12 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 4813, __pyx_L1_error) + __pyx_t_12 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 4833, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); - __pyx_t_11 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4813, __pyx_L7_error) + __pyx_t_11 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4833, __pyx_L7_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_9 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { @@ -39609,10 +39870,10 @@ } } if (__pyx_t_9) { - __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_9); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4813, __pyx_L7_error) + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_9); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4833, __pyx_L7_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { - __pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4813, __pyx_L7_error) + __pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4833, __pyx_L7_error) } __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; @@ -39627,7 +39888,7 @@ __Pyx_XGOTREF(__pyx_t_15); /*try:*/ { - /* "mtrand.pyx":4817 + /* "mtrand.pyx":4837 * # the most common case, yielding a ~33% performance improvement. * # Note that apparently, only one branch can ever be specialized. * if itemsize == sizeof(npy_intp): # <<<<<<<<<<<<<< @@ -39637,18 +39898,18 @@ __pyx_t_2 = ((__pyx_v_itemsize == (sizeof(npy_intp))) != 0); if (__pyx_t_2) { - /* "mtrand.pyx":4818 + /* "mtrand.pyx":4838 * # Note that apparently, only one branch can ever be specialized. * if itemsize == sizeof(npy_intp): * self._shuffle_raw(n, sizeof(npy_intp), stride, x_ptr, buf_ptr) # <<<<<<<<<<<<<< * else: * self._shuffle_raw(n, itemsize, stride, x_ptr, buf_ptr) */ - __pyx_t_4 = __pyx_f_6mtrand_11RandomState__shuffle_raw(__pyx_v_self, __pyx_v_n, (sizeof(npy_intp)), __pyx_v_stride, __pyx_v_x_ptr, __pyx_v_buf_ptr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4818, __pyx_L11_error) + __pyx_t_4 = __pyx_f_6mtrand_11RandomState__shuffle_raw(__pyx_v_self, __pyx_v_n, (sizeof(npy_intp)), __pyx_v_stride, __pyx_v_x_ptr, __pyx_v_buf_ptr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4838, __pyx_L11_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - /* "mtrand.pyx":4817 + /* "mtrand.pyx":4837 * # the most common case, yielding a ~33% performance improvement. * # Note that apparently, only one branch can ever be specialized. * if itemsize == sizeof(npy_intp): # <<<<<<<<<<<<<< @@ -39658,7 +39919,7 @@ goto __pyx_L17; } - /* "mtrand.pyx":4820 + /* "mtrand.pyx":4840 * self._shuffle_raw(n, sizeof(npy_intp), stride, x_ptr, buf_ptr) * else: * self._shuffle_raw(n, itemsize, stride, x_ptr, buf_ptr) # <<<<<<<<<<<<<< @@ -39666,13 +39927,13 @@ * # Multidimensional ndarrays require a bounce buffer. */ /*else*/ { - __pyx_t_4 = __pyx_f_6mtrand_11RandomState__shuffle_raw(__pyx_v_self, __pyx_v_n, __pyx_v_itemsize, __pyx_v_stride, __pyx_v_x_ptr, __pyx_v_buf_ptr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4820, __pyx_L11_error) + __pyx_t_4 = __pyx_f_6mtrand_11RandomState__shuffle_raw(__pyx_v_self, __pyx_v_n, __pyx_v_itemsize, __pyx_v_stride, __pyx_v_x_ptr, __pyx_v_buf_ptr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4840, __pyx_L11_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __pyx_L17:; - /* "mtrand.pyx":4813 + /* "mtrand.pyx":4833 * buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit * buf_ptr = buf.ctypes.data * with self.lock: # <<<<<<<<<<<<<< @@ -39685,7 +39946,6 @@ __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; goto __pyx_L16_try_end; __pyx_L11_error:; - __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -39693,20 +39953,20 @@ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; /*except:*/ { __Pyx_AddTraceback("mtrand.RandomState.shuffle", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_4, &__pyx_t_11, &__pyx_t_9) < 0) __PYX_ERR(0, 4813, __pyx_L13_except_error) + if (__Pyx_GetException(&__pyx_t_4, &__pyx_t_11, &__pyx_t_9) < 0) __PYX_ERR(0, 4833, __pyx_L13_except_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_11); __Pyx_GOTREF(__pyx_t_9); - __pyx_t_3 = PyTuple_Pack(3, __pyx_t_4, __pyx_t_11, __pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4813, __pyx_L13_except_error) + __pyx_t_3 = PyTuple_Pack(3, __pyx_t_4, __pyx_t_11, __pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 4833, __pyx_L13_except_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_16 = __Pyx_PyObject_Call(__pyx_t_12, __pyx_t_3, NULL); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 4813, __pyx_L13_except_error) + if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 4833, __pyx_L13_except_error) __Pyx_GOTREF(__pyx_t_16); __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_16); __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; - if (__pyx_t_2 < 0) __PYX_ERR(0, 4813, __pyx_L13_except_error) + if (__pyx_t_2 < 0) __PYX_ERR(0, 4833, __pyx_L13_except_error) __pyx_t_6 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_4); @@ -39714,7 +39974,7 @@ __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestoreWithState(__pyx_t_4, __pyx_t_11, __pyx_t_9); __pyx_t_4 = 0; __pyx_t_11 = 0; __pyx_t_9 = 0; - __PYX_ERR(0, 4813, __pyx_L13_except_error) + __PYX_ERR(0, 4833, __pyx_L13_except_error) } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; @@ -39722,14 +39982,12 @@ goto __pyx_L12_exception_handled; } __pyx_L13_except_error:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_13); __Pyx_XGIVEREF(__pyx_t_14); __Pyx_XGIVEREF(__pyx_t_15); __Pyx_ExceptionReset(__pyx_t_13, __pyx_t_14, __pyx_t_15); goto __pyx_L1_error; __pyx_L12_exception_handled:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_13); __Pyx_XGIVEREF(__pyx_t_14); __Pyx_XGIVEREF(__pyx_t_15); @@ -39740,9 +39998,9 @@ /*finally:*/ { /*normal exit:*/{ if (__pyx_t_12) { - __pyx_t_15 = __Pyx_PyObject_Call(__pyx_t_12, __pyx_tuple__173, NULL); + __pyx_t_15 = __Pyx_PyObject_Call(__pyx_t_12, __pyx_tuple__177, NULL); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 4813, __pyx_L1_error) + if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 4833, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_15); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; } @@ -39757,7 +40015,7 @@ __pyx_L21:; } - /* "mtrand.pyx":4800 + /* "mtrand.pyx":4820 * char* buf_ptr * * if type(x) is np.ndarray and x.ndim == 1 and x.size: # <<<<<<<<<<<<<< @@ -39767,19 +40025,19 @@ goto __pyx_L3; } - /* "mtrand.pyx":4821 + /* "mtrand.pyx":4841 * else: * self._shuffle_raw(n, itemsize, stride, x_ptr, buf_ptr) * elif isinstance(x, np.ndarray) and x.ndim > 1 and x.size: # <<<<<<<<<<<<<< * # Multidimensional ndarrays require a bounce buffer. * buf = np.empty_like(x[0]) */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4821, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4841, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4821, __pyx_L1_error) + __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_ndarray); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4841, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_2 = PyObject_IsInstance(__pyx_v_x, __pyx_t_11); if (unlikely(__pyx_t_2 == -1)) __PYX_ERR(0, 4821, __pyx_L1_error) + __pyx_t_2 = PyObject_IsInstance(__pyx_v_x, __pyx_t_11); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 4841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_5 = (__pyx_t_2 != 0); if (__pyx_t_5) { @@ -39787,38 +40045,38 @@ __pyx_t_6 = __pyx_t_5; goto __pyx_L22_bool_binop_done; } - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_ndim); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4821, __pyx_L1_error) + __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_ndim); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4841, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); - __pyx_t_9 = PyObject_RichCompare(__pyx_t_11, __pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4821, __pyx_L1_error) + __pyx_t_9 = PyObject_RichCompare(__pyx_t_11, __pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 4821, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 4841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (__pyx_t_5) { } else { __pyx_t_6 = __pyx_t_5; goto __pyx_L22_bool_binop_done; } - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_size); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4821, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_size); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4841, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 4821, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 4841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_6 = __pyx_t_5; __pyx_L22_bool_binop_done:; if (__pyx_t_6) { - /* "mtrand.pyx":4823 + /* "mtrand.pyx":4843 * elif isinstance(x, np.ndarray) and x.ndim > 1 and x.size: * # Multidimensional ndarrays require a bounce buffer. * buf = np.empty_like(x[0]) # <<<<<<<<<<<<<< * with self.lock: * for i in reversed(range(1, n)): */ - __pyx_t_11 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4823, __pyx_L1_error) + __pyx_t_11 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4843, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_11, __pyx_n_s_empty_like); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4823, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_11, __pyx_n_s_empty_like); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4843, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_11 = __Pyx_GetItemInt(__pyx_v_x, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4823, __pyx_L1_error) + __pyx_t_11 = __Pyx_GetItemInt(__pyx_v_x, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4843, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { @@ -39831,14 +40089,14 @@ } } if (!__pyx_t_3) { - __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4823, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4843, __pyx_L1_error) __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_GOTREF(__pyx_t_9); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_11}; - __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4823, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4843, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; @@ -39847,20 +40105,20 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_11}; - __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4823, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4843, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } else #endif { - __pyx_t_10 = PyTuple_New(1+1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4823, __pyx_L1_error) + __pyx_t_10 = PyTuple_New(1+1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4843, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_10, 0+1, __pyx_t_11); __pyx_t_11 = 0; - __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4823, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4843, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } @@ -39869,7 +40127,7 @@ __pyx_v_buf = __pyx_t_9; __pyx_t_9 = 0; - /* "mtrand.pyx":4824 + /* "mtrand.pyx":4844 * # Multidimensional ndarrays require a bounce buffer. * buf = np.empty_like(x[0]) * with self.lock: # <<<<<<<<<<<<<< @@ -39877,9 +40135,9 @@ * j = rk_interval(i, self.internal_state) */ /*with:*/ { - __pyx_t_12 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 4824, __pyx_L1_error) + __pyx_t_12 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 4844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); - __pyx_t_4 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4824, __pyx_L25_error) + __pyx_t_4 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4844, __pyx_L25_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_10 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { @@ -39892,10 +40150,10 @@ } } if (__pyx_t_10) { - __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4824, __pyx_L25_error) + __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4844, __pyx_L25_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } else { - __pyx_t_9 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4824, __pyx_L25_error) + __pyx_t_9 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4844, __pyx_L25_error) } __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; @@ -39910,7 +40168,7 @@ __Pyx_XGOTREF(__pyx_t_13); /*try:*/ { - /* "mtrand.pyx":4825 + /* "mtrand.pyx":4845 * buf = np.empty_like(x[0]) * with self.lock: * for i in reversed(range(1, n)): # <<<<<<<<<<<<<< @@ -39920,7 +40178,7 @@ for (__pyx_t_8 = __pyx_v_n-1; __pyx_t_8 >= 1; __pyx_t_8-=1) { __pyx_v_i = __pyx_t_8; - /* "mtrand.pyx":4826 + /* "mtrand.pyx":4846 * with self.lock: * for i in reversed(range(1, n)): * j = rk_interval(i, self.internal_state) # <<<<<<<<<<<<<< @@ -39929,41 +40187,41 @@ */ __pyx_v_j = rk_interval(__pyx_v_i, __pyx_v_self->internal_state); - /* "mtrand.pyx":4827 + /* "mtrand.pyx":4847 * for i in reversed(range(1, n)): * j = rk_interval(i, self.internal_state) * buf[...] = x[j] # <<<<<<<<<<<<<< * x[j] = x[i] * x[i] = buf */ - __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_j, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4827, __pyx_L29_error) + __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_j, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4847, __pyx_L29_error) __Pyx_GOTREF(__pyx_t_9); - if (unlikely(PyObject_SetItem(__pyx_v_buf, Py_Ellipsis, __pyx_t_9) < 0)) __PYX_ERR(0, 4827, __pyx_L29_error) + if (unlikely(PyObject_SetItem(__pyx_v_buf, Py_Ellipsis, __pyx_t_9) < 0)) __PYX_ERR(0, 4847, __pyx_L29_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4828 + /* "mtrand.pyx":4848 * j = rk_interval(i, self.internal_state) * buf[...] = x[j] * x[j] = x[i] # <<<<<<<<<<<<<< * x[i] = buf * else: */ - __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_i, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4828, __pyx_L29_error) + __pyx_t_9 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_i, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4848, __pyx_L29_error) __Pyx_GOTREF(__pyx_t_9); - if (unlikely(__Pyx_SetItemInt(__pyx_v_x, __pyx_v_j, __pyx_t_9, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1) < 0)) __PYX_ERR(0, 4828, __pyx_L29_error) + if (unlikely(__Pyx_SetItemInt(__pyx_v_x, __pyx_v_j, __pyx_t_9, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1) < 0)) __PYX_ERR(0, 4848, __pyx_L29_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4829 + /* "mtrand.pyx":4849 * buf[...] = x[j] * x[j] = x[i] * x[i] = buf # <<<<<<<<<<<<<< * else: * # Untyped path. */ - if (unlikely(__Pyx_SetItemInt(__pyx_v_x, __pyx_v_i, __pyx_v_buf, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1) < 0)) __PYX_ERR(0, 4829, __pyx_L29_error) + if (unlikely(__Pyx_SetItemInt(__pyx_v_x, __pyx_v_i, __pyx_v_buf, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1) < 0)) __PYX_ERR(0, 4849, __pyx_L29_error) } - /* "mtrand.pyx":4824 + /* "mtrand.pyx":4844 * # Multidimensional ndarrays require a bounce buffer. * buf = np.empty_like(x[0]) * with self.lock: # <<<<<<<<<<<<<< @@ -39976,7 +40234,6 @@ __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0; goto __pyx_L34_try_end; __pyx_L29_error:; - __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; @@ -39984,20 +40241,20 @@ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /*except:*/ { __Pyx_AddTraceback("mtrand.RandomState.shuffle", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_4, &__pyx_t_10) < 0) __PYX_ERR(0, 4824, __pyx_L31_except_error) + if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_4, &__pyx_t_10) < 0) __PYX_ERR(0, 4844, __pyx_L31_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_10); - __pyx_t_11 = PyTuple_Pack(3, __pyx_t_9, __pyx_t_4, __pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4824, __pyx_L31_except_error) + __pyx_t_11 = PyTuple_Pack(3, __pyx_t_9, __pyx_t_4, __pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4844, __pyx_L31_except_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_16 = __Pyx_PyObject_Call(__pyx_t_12, __pyx_t_11, NULL); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 4824, __pyx_L31_except_error) + if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 4844, __pyx_L31_except_error) __Pyx_GOTREF(__pyx_t_16); __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_16); __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; - if (__pyx_t_6 < 0) __PYX_ERR(0, 4824, __pyx_L31_except_error) + if (__pyx_t_6 < 0) __PYX_ERR(0, 4844, __pyx_L31_except_error) __pyx_t_5 = ((!(__pyx_t_6 != 0)) != 0); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_9); @@ -40005,7 +40262,7 @@ __Pyx_XGIVEREF(__pyx_t_10); __Pyx_ErrRestoreWithState(__pyx_t_9, __pyx_t_4, __pyx_t_10); __pyx_t_9 = 0; __pyx_t_4 = 0; __pyx_t_10 = 0; - __PYX_ERR(0, 4824, __pyx_L31_except_error) + __PYX_ERR(0, 4844, __pyx_L31_except_error) } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; @@ -40013,14 +40270,12 @@ goto __pyx_L30_exception_handled; } __pyx_L31_except_error:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_15); __Pyx_XGIVEREF(__pyx_t_14); __Pyx_XGIVEREF(__pyx_t_13); __Pyx_ExceptionReset(__pyx_t_15, __pyx_t_14, __pyx_t_13); goto __pyx_L1_error; __pyx_L30_exception_handled:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_15); __Pyx_XGIVEREF(__pyx_t_14); __Pyx_XGIVEREF(__pyx_t_13); @@ -40031,9 +40286,9 @@ /*finally:*/ { /*normal exit:*/{ if (__pyx_t_12) { - __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_12, __pyx_tuple__174, NULL); + __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_12, __pyx_tuple__178, NULL); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4824, __pyx_L1_error) + if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 4844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; } @@ -40048,7 +40303,7 @@ __pyx_L40:; } - /* "mtrand.pyx":4821 + /* "mtrand.pyx":4841 * else: * self._shuffle_raw(n, itemsize, stride, x_ptr, buf_ptr) * elif isinstance(x, np.ndarray) and x.ndim > 1 and x.size: # <<<<<<<<<<<<<< @@ -40058,7 +40313,7 @@ goto __pyx_L3; } - /* "mtrand.pyx":4832 + /* "mtrand.pyx":4852 * else: * # Untyped path. * with self.lock: # <<<<<<<<<<<<<< @@ -40067,9 +40322,9 @@ */ /*else*/ { /*with:*/ { - __pyx_t_12 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 4832, __pyx_L1_error) + __pyx_t_12 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_exit); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 4852, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); - __pyx_t_4 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4832, __pyx_L41_error) + __pyx_t_4 = __Pyx_PyObject_LookupSpecial(__pyx_v_self->lock, __pyx_n_s_enter); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4852, __pyx_L41_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_9 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { @@ -40082,10 +40337,10 @@ } } if (__pyx_t_9) { - __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4832, __pyx_L41_error) + __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4852, __pyx_L41_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { - __pyx_t_10 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4832, __pyx_L41_error) + __pyx_t_10 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4852, __pyx_L41_error) } __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; @@ -40100,7 +40355,7 @@ __Pyx_XGOTREF(__pyx_t_15); /*try:*/ { - /* "mtrand.pyx":4833 + /* "mtrand.pyx":4853 * # Untyped path. * with self.lock: * for i in reversed(range(1, n)): # <<<<<<<<<<<<<< @@ -40110,7 +40365,7 @@ for (__pyx_t_8 = __pyx_v_n-1; __pyx_t_8 >= 1; __pyx_t_8-=1) { __pyx_v_i = __pyx_t_8; - /* "mtrand.pyx":4834 + /* "mtrand.pyx":4854 * with self.lock: * for i in reversed(range(1, n)): * j = rk_interval(i, self.internal_state) # <<<<<<<<<<<<<< @@ -40119,24 +40374,24 @@ */ __pyx_v_j = rk_interval(__pyx_v_i, __pyx_v_self->internal_state); - /* "mtrand.pyx":4835 + /* "mtrand.pyx":4855 * for i in reversed(range(1, n)): * j = rk_interval(i, self.internal_state) * x[i], x[j] = x[j], x[i] # <<<<<<<<<<<<<< * * cdef inline _shuffle_raw(self, npy_intp n, npy_intp itemsize, */ - __pyx_t_10 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_j, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4835, __pyx_L45_error) + __pyx_t_10 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_j, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 4855, __pyx_L45_error) __Pyx_GOTREF(__pyx_t_10); - __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_i, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4835, __pyx_L45_error) + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_x, __pyx_v_i, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 4855, __pyx_L45_error) __Pyx_GOTREF(__pyx_t_4); - if (unlikely(__Pyx_SetItemInt(__pyx_v_x, __pyx_v_i, __pyx_t_10, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1) < 0)) __PYX_ERR(0, 4835, __pyx_L45_error) + if (unlikely(__Pyx_SetItemInt(__pyx_v_x, __pyx_v_i, __pyx_t_10, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1) < 0)) __PYX_ERR(0, 4855, __pyx_L45_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - if (unlikely(__Pyx_SetItemInt(__pyx_v_x, __pyx_v_j, __pyx_t_4, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1) < 0)) __PYX_ERR(0, 4835, __pyx_L45_error) + if (unlikely(__Pyx_SetItemInt(__pyx_v_x, __pyx_v_j, __pyx_t_4, npy_intp, 1, __Pyx_PyInt_From_npy_intp, 0, 1, 1) < 0)) __PYX_ERR(0, 4855, __pyx_L45_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } - /* "mtrand.pyx":4832 + /* "mtrand.pyx":4852 * else: * # Untyped path. * with self.lock: # <<<<<<<<<<<<<< @@ -40149,7 +40404,6 @@ __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; goto __pyx_L50_try_end; __pyx_L45_error:; - __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -40157,20 +40411,20 @@ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; /*except:*/ { __Pyx_AddTraceback("mtrand.RandomState.shuffle", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_4, &__pyx_t_10, &__pyx_t_9) < 0) __PYX_ERR(0, 4832, __pyx_L47_except_error) + if (__Pyx_GetException(&__pyx_t_4, &__pyx_t_10, &__pyx_t_9) < 0) __PYX_ERR(0, 4852, __pyx_L47_except_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GOTREF(__pyx_t_10); __Pyx_GOTREF(__pyx_t_9); - __pyx_t_11 = PyTuple_Pack(3, __pyx_t_4, __pyx_t_10, __pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4832, __pyx_L47_except_error) + __pyx_t_11 = PyTuple_Pack(3, __pyx_t_4, __pyx_t_10, __pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 4852, __pyx_L47_except_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_16 = __Pyx_PyObject_Call(__pyx_t_12, __pyx_t_11, NULL); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 4832, __pyx_L47_except_error) + if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 4852, __pyx_L47_except_error) __Pyx_GOTREF(__pyx_t_16); __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_16); __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; - if (__pyx_t_5 < 0) __PYX_ERR(0, 4832, __pyx_L47_except_error) + if (__pyx_t_5 < 0) __PYX_ERR(0, 4852, __pyx_L47_except_error) __pyx_t_6 = ((!(__pyx_t_5 != 0)) != 0); if (__pyx_t_6) { __Pyx_GIVEREF(__pyx_t_4); @@ -40178,7 +40432,7 @@ __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestoreWithState(__pyx_t_4, __pyx_t_10, __pyx_t_9); __pyx_t_4 = 0; __pyx_t_10 = 0; __pyx_t_9 = 0; - __PYX_ERR(0, 4832, __pyx_L47_except_error) + __PYX_ERR(0, 4852, __pyx_L47_except_error) } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; @@ -40186,14 +40440,12 @@ goto __pyx_L46_exception_handled; } __pyx_L47_except_error:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_13); __Pyx_XGIVEREF(__pyx_t_14); __Pyx_XGIVEREF(__pyx_t_15); __Pyx_ExceptionReset(__pyx_t_13, __pyx_t_14, __pyx_t_15); goto __pyx_L1_error; __pyx_L46_exception_handled:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_13); __Pyx_XGIVEREF(__pyx_t_14); __Pyx_XGIVEREF(__pyx_t_15); @@ -40204,9 +40456,9 @@ /*finally:*/ { /*normal exit:*/{ if (__pyx_t_12) { - __pyx_t_15 = __Pyx_PyObject_Call(__pyx_t_12, __pyx_tuple__175, NULL); + __pyx_t_15 = __Pyx_PyObject_Call(__pyx_t_12, __pyx_tuple__179, NULL); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 4832, __pyx_L1_error) + if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 4852, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_15); __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; } @@ -40223,7 +40475,7 @@ } __pyx_L3:; - /* "mtrand.pyx":4759 + /* "mtrand.pyx":4779 * * # Shuffling and permutations: * def shuffle(self, object x): # <<<<<<<<<<<<<< @@ -40249,7 +40501,7 @@ return __pyx_r; } -/* "mtrand.pyx":4837 +/* "mtrand.pyx":4857 * x[i], x[j] = x[j], x[i] * * cdef inline _shuffle_raw(self, npy_intp n, npy_intp itemsize, # <<<<<<<<<<<<<< @@ -40266,7 +40518,7 @@ int __pyx_t_2; __Pyx_RefNannySetupContext("_shuffle_raw", 0); - /* "mtrand.pyx":4840 + /* "mtrand.pyx":4860 * npy_intp stride, char* data, char* buf): * cdef npy_intp i, j * for i in reversed(range(1, n)): # <<<<<<<<<<<<<< @@ -40276,7 +40528,7 @@ for (__pyx_t_1 = __pyx_v_n-1; __pyx_t_1 >= 1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; - /* "mtrand.pyx":4841 + /* "mtrand.pyx":4861 * cdef npy_intp i, j * for i in reversed(range(1, n)): * j = rk_interval(i, self.internal_state) # <<<<<<<<<<<<<< @@ -40285,7 +40537,7 @@ */ __pyx_v_j = rk_interval(__pyx_v_i, __pyx_v_self->internal_state); - /* "mtrand.pyx":4842 + /* "mtrand.pyx":4862 * for i in reversed(range(1, n)): * j = rk_interval(i, self.internal_state) * if i == j : continue # i == j is not needed and memcpy is undefined. # <<<<<<<<<<<<<< @@ -40297,36 +40549,36 @@ goto __pyx_L3_continue; } - /* "mtrand.pyx":4843 + /* "mtrand.pyx":4863 * j = rk_interval(i, self.internal_state) * if i == j : continue # i == j is not needed and memcpy is undefined. * string.memcpy(buf, data + j * stride, itemsize) # <<<<<<<<<<<<<< * string.memcpy(data + j * stride, data + i * stride, itemsize) * string.memcpy(data + i * stride, buf, itemsize) */ - memcpy(__pyx_v_buf, (__pyx_v_data + (__pyx_v_j * __pyx_v_stride)), __pyx_v_itemsize); + (void)(memcpy(__pyx_v_buf, (__pyx_v_data + (__pyx_v_j * __pyx_v_stride)), __pyx_v_itemsize)); - /* "mtrand.pyx":4844 + /* "mtrand.pyx":4864 * if i == j : continue # i == j is not needed and memcpy is undefined. * string.memcpy(buf, data + j * stride, itemsize) * string.memcpy(data + j * stride, data + i * stride, itemsize) # <<<<<<<<<<<<<< * string.memcpy(data + i * stride, buf, itemsize) * */ - memcpy((__pyx_v_data + (__pyx_v_j * __pyx_v_stride)), (__pyx_v_data + (__pyx_v_i * __pyx_v_stride)), __pyx_v_itemsize); + (void)(memcpy((__pyx_v_data + (__pyx_v_j * __pyx_v_stride)), (__pyx_v_data + (__pyx_v_i * __pyx_v_stride)), __pyx_v_itemsize)); - /* "mtrand.pyx":4845 + /* "mtrand.pyx":4865 * string.memcpy(buf, data + j * stride, itemsize) * string.memcpy(data + j * stride, data + i * stride, itemsize) * string.memcpy(data + i * stride, buf, itemsize) # <<<<<<<<<<<<<< * * def permutation(self, object x): */ - memcpy((__pyx_v_data + (__pyx_v_i * __pyx_v_stride)), __pyx_v_buf, __pyx_v_itemsize); + (void)(memcpy((__pyx_v_data + (__pyx_v_i * __pyx_v_stride)), __pyx_v_buf, __pyx_v_itemsize)); __pyx_L3_continue:; } - /* "mtrand.pyx":4837 + /* "mtrand.pyx":4857 * x[i], x[j] = x[j], x[i] * * cdef inline _shuffle_raw(self, npy_intp n, npy_intp itemsize, # <<<<<<<<<<<<<< @@ -40341,7 +40593,7 @@ return __pyx_r; } -/* "mtrand.pyx":4847 +/* "mtrand.pyx":4867 * string.memcpy(data + i * stride, buf, itemsize) * * def permutation(self, object x): # <<<<<<<<<<<<<< @@ -40376,16 +40628,16 @@ PyObject *__pyx_t_7 = NULL; __Pyx_RefNannySetupContext("permutation", 0); - /* "mtrand.pyx":4883 + /* "mtrand.pyx":4903 * * """ * if isinstance(x, (int, long, np.integer)): # <<<<<<<<<<<<<< * arr = np.arange(x) * else: */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4883, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4903, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_integer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4883, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_integer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4903, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_4 = PyInt_Check(__pyx_v_x); @@ -40410,16 +40662,16 @@ __pyx_t_5 = (__pyx_t_3 != 0); if (__pyx_t_5) { - /* "mtrand.pyx":4884 + /* "mtrand.pyx":4904 * """ * if isinstance(x, (int, long, np.integer)): * arr = np.arange(x) # <<<<<<<<<<<<<< * else: * arr = np.array(x) */ - __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4884, __pyx_L1_error) + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4904, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_arange); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4884, __pyx_L1_error) + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_arange); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4904, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; @@ -40433,13 +40685,13 @@ } } if (!__pyx_t_1) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_x); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4884, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_v_x); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4904, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_v_x}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4884, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4904, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else @@ -40447,19 +40699,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_v_x}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4884, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4904, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif { - __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4884, __pyx_L1_error) + __pyx_t_7 = PyTuple_New(1+1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4904, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_1); __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_7, 0+1, __pyx_v_x); - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_7, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4884, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_7, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4904, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } @@ -40468,7 +40720,7 @@ __pyx_v_arr = __pyx_t_2; __pyx_t_2 = 0; - /* "mtrand.pyx":4883 + /* "mtrand.pyx":4903 * * """ * if isinstance(x, (int, long, np.integer)): # <<<<<<<<<<<<<< @@ -40478,7 +40730,7 @@ goto __pyx_L3; } - /* "mtrand.pyx":4886 + /* "mtrand.pyx":4906 * arr = np.arange(x) * else: * arr = np.array(x) # <<<<<<<<<<<<<< @@ -40486,9 +40738,9 @@ * return arr */ /*else*/ { - __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4886, __pyx_L1_error) + __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4906, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_array); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4886, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_array); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4906, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = NULL; @@ -40502,13 +40754,13 @@ } } if (!__pyx_t_6) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_v_x); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4886, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_v_x); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4906, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_v_x}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4886, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4906, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_2); } else @@ -40516,19 +40768,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_v_x}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4886, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4906, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif { - __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4886, __pyx_L1_error) + __pyx_t_1 = PyTuple_New(1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4906, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_INCREF(__pyx_v_x); __Pyx_GIVEREF(__pyx_v_x); PyTuple_SET_ITEM(__pyx_t_1, 0+1, __pyx_v_x); - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4886, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_1, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4906, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } @@ -40539,14 +40791,14 @@ } __pyx_L3:; - /* "mtrand.pyx":4887 + /* "mtrand.pyx":4907 * else: * arr = np.array(x) * self.shuffle(arr) # <<<<<<<<<<<<<< * return arr * */ - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_shuffle); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4887, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_shuffle); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { @@ -40559,13 +40811,13 @@ } } if (!__pyx_t_1) { - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_v_arr); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4887, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_v_arr); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_v_arr}; - __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4887, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4907, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else @@ -40573,19 +40825,19 @@ #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_1, __pyx_v_arr}; - __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4887, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4907, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else #endif { - __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4887, __pyx_L1_error) + __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 4907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_v_arr); __Pyx_GIVEREF(__pyx_v_arr); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_v_arr); - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4887, __pyx_L1_error) + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } @@ -40593,7 +40845,7 @@ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - /* "mtrand.pyx":4888 + /* "mtrand.pyx":4908 * arr = np.array(x) * self.shuffle(arr) * return arr # <<<<<<<<<<<<<< @@ -40605,7 +40857,7 @@ __pyx_r = __pyx_v_arr; goto __pyx_L0; - /* "mtrand.pyx":4847 + /* "mtrand.pyx":4867 * string.memcpy(data + i * stride, buf, itemsize) * * def permutation(self, object x): # <<<<<<<<<<<<<< @@ -40647,7 +40899,7 @@ static void __pyx_tp_dealloc_6mtrand_RandomState(PyObject *o) { struct __pyx_obj_6mtrand_RandomState *p = (struct __pyx_obj_6mtrand_RandomState *)o; - #if PY_VERSION_HEX >= 0x030400a1 + #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } @@ -40808,17 +41060,31 @@ }; #if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_mtrand(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_mtrand}, + {0, NULL} +}; +#endif + static struct PyModuleDef __pyx_moduledef = { - #if PY_VERSION_HEX < 0x03020000 - { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, - #else PyModuleDef_HEAD_INIT, - #endif "mtrand", 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else -1, /* m_size */ + #endif __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else NULL, /* m_reload */ + #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ @@ -40834,53 +41100,56 @@ {&__pyx_n_s_Lock, __pyx_k_Lock, sizeof(__pyx_k_Lock), 0, 0, 1, 1}, {&__pyx_n_s_MT19937, __pyx_k_MT19937, sizeof(__pyx_k_MT19937), 0, 0, 1, 1}, {&__pyx_n_s_OverflowError, __pyx_k_OverflowError, sizeof(__pyx_k_OverflowError), 0, 0, 1, 1}, - {&__pyx_kp_u_RandomState_binomial_line_3686, __pyx_k_RandomState_binomial_line_3686, sizeof(__pyx_k_RandomState_binomial_line_3686), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_bytes_line_999, __pyx_k_RandomState_bytes_line_999, sizeof(__pyx_k_RandomState_bytes_line_999), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_chisquare_line_2196, __pyx_k_RandomState_chisquare_line_2196, sizeof(__pyx_k_RandomState_chisquare_line_2196), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_choice_line_1028, __pyx_k_RandomState_choice_line_1028, sizeof(__pyx_k_RandomState_choice_line_1028), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_binomial_line_3697, __pyx_k_RandomState_binomial_line_3697, sizeof(__pyx_k_RandomState_binomial_line_3697), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_bytes_line_1004, __pyx_k_RandomState_bytes_line_1004, sizeof(__pyx_k_RandomState_bytes_line_1004), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_chisquare_line_2205, __pyx_k_RandomState_chisquare_line_2205, sizeof(__pyx_k_RandomState_chisquare_line_2205), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_choice_line_1033, __pyx_k_RandomState_choice_line_1033, sizeof(__pyx_k_RandomState_choice_line_1033), 0, 1, 0, 0}, {&__pyx_n_s_RandomState_ctor, __pyx_k_RandomState_ctor, sizeof(__pyx_k_RandomState_ctor), 0, 0, 1, 1}, - {&__pyx_kp_u_RandomState_dirichlet_line_4643, __pyx_k_RandomState_dirichlet_line_4643, sizeof(__pyx_k_RandomState_dirichlet_line_4643), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_f_line_1992, __pyx_k_RandomState_f_line_1992, sizeof(__pyx_k_RandomState_f_line_1992), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_gamma_line_1896, __pyx_k_RandomState_gamma_line_1896, sizeof(__pyx_k_RandomState_gamma_line_1896), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_geometric_line_4082, __pyx_k_RandomState_geometric_line_4082, sizeof(__pyx_k_RandomState_geometric_line_4082), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_gumbel_line_3078, __pyx_k_RandomState_gumbel_line_3078, sizeof(__pyx_k_RandomState_gumbel_line_3078), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_dirichlet_line_4656, __pyx_k_RandomState_dirichlet_line_4656, sizeof(__pyx_k_RandomState_dirichlet_line_4656), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_f_line_1997, __pyx_k_RandomState_f_line_1997, sizeof(__pyx_k_RandomState_f_line_1997), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_gamma_line_1901, __pyx_k_RandomState_gamma_line_1901, sizeof(__pyx_k_RandomState_gamma_line_1901), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_geometric_line_4095, __pyx_k_RandomState_geometric_line_4095, sizeof(__pyx_k_RandomState_geometric_line_4095), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_gumbel_line_3089, __pyx_k_RandomState_gumbel_line_3089, sizeof(__pyx_k_RandomState_gumbel_line_3089), 0, 1, 0, 0}, {&__pyx_kp_u_RandomState_hypergeometric_line, __pyx_k_RandomState_hypergeometric_line, sizeof(__pyx_k_RandomState_hypergeometric_line), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_laplace_line_2980, __pyx_k_RandomState_laplace_line_2980, sizeof(__pyx_k_RandomState_laplace_line_2980), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_logistic_line_3209, __pyx_k_RandomState_logistic_line_3209, sizeof(__pyx_k_RandomState_logistic_line_3209), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_lognormal_line_3302, __pyx_k_RandomState_lognormal_line_3302, sizeof(__pyx_k_RandomState_lognormal_line_3302), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_logseries_line_4272, __pyx_k_RandomState_logseries_line_4272, sizeof(__pyx_k_RandomState_logseries_line_4272), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_multinomial_line_453, __pyx_k_RandomState_multinomial_line_453, sizeof(__pyx_k_RandomState_multinomial_line_453), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_laplace_line_2991, __pyx_k_RandomState_laplace_line_2991, sizeof(__pyx_k_RandomState_laplace_line_2991), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_logistic_line_3220, __pyx_k_RandomState_logistic_line_3220, sizeof(__pyx_k_RandomState_logistic_line_3220), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_lognormal_line_3313, __pyx_k_RandomState_lognormal_line_3313, sizeof(__pyx_k_RandomState_lognormal_line_3313), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_logseries_line_4285, __pyx_k_RandomState_logseries_line_4285, sizeof(__pyx_k_RandomState_logseries_line_4285), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_multinomial_line_454, __pyx_k_RandomState_multinomial_line_454, sizeof(__pyx_k_RandomState_multinomial_line_454), 0, 1, 0, 0}, {&__pyx_kp_u_RandomState_multivariate_normal, __pyx_k_RandomState_multivariate_normal, sizeof(__pyx_k_RandomState_multivariate_normal), 0, 1, 0, 0}, {&__pyx_kp_u_RandomState_negative_binomial_li, __pyx_k_RandomState_negative_binomial_li, sizeof(__pyx_k_RandomState_negative_binomial_li), 0, 1, 0, 0}, {&__pyx_kp_u_RandomState_noncentral_chisquare, __pyx_k_RandomState_noncentral_chisquare, sizeof(__pyx_k_RandomState_noncentral_chisquare), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_noncentral_f_line_20, __pyx_k_RandomState_noncentral_f_line_20, sizeof(__pyx_k_RandomState_noncentral_f_line_20), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_normal_line_1547, __pyx_k_RandomState_normal_line_1547, sizeof(__pyx_k_RandomState_normal_line_1547), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_pareto_line_2649, __pyx_k_RandomState_pareto_line_2649, sizeof(__pyx_k_RandomState_pareto_line_2649), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_permutation_line_484, __pyx_k_RandomState_permutation_line_484, sizeof(__pyx_k_RandomState_permutation_line_484), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_poisson_line_3903, __pyx_k_RandomState_poisson_line_3903, sizeof(__pyx_k_RandomState_poisson_line_3903), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_power_line_2869, __pyx_k_RandomState_power_line_2869, sizeof(__pyx_k_RandomState_power_line_2869), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_rand_line_1316, __pyx_k_RandomState_rand_line_1316, sizeof(__pyx_k_RandomState_rand_line_1316), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_randint_line_905, __pyx_k_RandomState_randint_line_905, sizeof(__pyx_k_RandomState_randint_line_905), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_randn_line_1360, __pyx_k_RandomState_randn_line_1360, sizeof(__pyx_k_RandomState_randn_line_1360), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_noncentral_f_line_21, __pyx_k_RandomState_noncentral_f_line_21, sizeof(__pyx_k_RandomState_noncentral_f_line_21), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_normal_line_1552, __pyx_k_RandomState_normal_line_1552, sizeof(__pyx_k_RandomState_normal_line_1552), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_pareto_line_2660, __pyx_k_RandomState_pareto_line_2660, sizeof(__pyx_k_RandomState_pareto_line_2660), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_permutation_line_486, __pyx_k_RandomState_permutation_line_486, sizeof(__pyx_k_RandomState_permutation_line_486), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_poisson_line_3914, __pyx_k_RandomState_poisson_line_3914, sizeof(__pyx_k_RandomState_poisson_line_3914), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_power_line_2880, __pyx_k_RandomState_power_line_2880, sizeof(__pyx_k_RandomState_power_line_2880), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_rand_line_1321, __pyx_k_RandomState_rand_line_1321, sizeof(__pyx_k_RandomState_rand_line_1321), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_randint_line_910, __pyx_k_RandomState_randint_line_910, sizeof(__pyx_k_RandomState_randint_line_910), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_randn_line_1365, __pyx_k_RandomState_randn_line_1365, sizeof(__pyx_k_RandomState_randn_line_1365), 0, 1, 0, 0}, {&__pyx_kp_u_RandomState_random_integers_line, __pyx_k_RandomState_random_integers_line, sizeof(__pyx_k_RandomState_random_integers_line), 0, 1, 0, 0}, {&__pyx_kp_u_RandomState_random_sample_line_8, __pyx_k_RandomState_random_sample_line_8, sizeof(__pyx_k_RandomState_random_sample_line_8), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_rayleigh_line_3426, __pyx_k_RandomState_rayleigh_line_3426, sizeof(__pyx_k_RandomState_rayleigh_line_3426), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_shuffle_line_4759, __pyx_k_RandomState_shuffle_line_4759, sizeof(__pyx_k_RandomState_shuffle_line_4759), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_rayleigh_line_3437, __pyx_k_RandomState_rayleigh_line_3437, sizeof(__pyx_k_RandomState_rayleigh_line_3437), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_shuffle_line_4779, __pyx_k_RandomState_shuffle_line_4779, sizeof(__pyx_k_RandomState_shuffle_line_4779), 0, 1, 0, 0}, {&__pyx_kp_u_RandomState_standard_cauchy_line, __pyx_k_RandomState_standard_cauchy_line, sizeof(__pyx_k_RandomState_standard_cauchy_line), 0, 1, 0, 0}, {&__pyx_kp_u_RandomState_standard_exponential, __pyx_k_RandomState_standard_exponential, sizeof(__pyx_k_RandomState_standard_exponential), 0, 1, 0, 0}, {&__pyx_kp_u_RandomState_standard_gamma_line, __pyx_k_RandomState_standard_gamma_line, sizeof(__pyx_k_RandomState_standard_gamma_line), 0, 1, 0, 0}, {&__pyx_kp_u_RandomState_standard_normal_line, __pyx_k_RandomState_standard_normal_line, sizeof(__pyx_k_RandomState_standard_normal_line), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_standard_t_line_2445, __pyx_k_RandomState_standard_t_line_2445, sizeof(__pyx_k_RandomState_standard_t_line_2445), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_tomaxint_line_858, __pyx_k_RandomState_tomaxint_line_858, sizeof(__pyx_k_RandomState_tomaxint_line_858), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_triangular_line_3592, __pyx_k_RandomState_triangular_line_3592, sizeof(__pyx_k_RandomState_triangular_line_3592), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_uniform_line_1210, __pyx_k_RandomState_uniform_line_1210, sizeof(__pyx_k_RandomState_uniform_line_1210), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_vonmises_line_2551, __pyx_k_RandomState_vonmises_line_2551, sizeof(__pyx_k_RandomState_vonmises_line_2551), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_wald_line_3505, __pyx_k_RandomState_wald_line_3505, sizeof(__pyx_k_RandomState_wald_line_3505), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_weibull_line_2759, __pyx_k_RandomState_weibull_line_2759, sizeof(__pyx_k_RandomState_weibull_line_2759), 0, 1, 0, 0}, - {&__pyx_kp_u_RandomState_zipf_line_3991, __pyx_k_RandomState_zipf_line_3991, sizeof(__pyx_k_RandomState_zipf_line_3991), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_standard_t_line_2456, __pyx_k_RandomState_standard_t_line_2456, sizeof(__pyx_k_RandomState_standard_t_line_2456), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_tomaxint_line_863, __pyx_k_RandomState_tomaxint_line_863, sizeof(__pyx_k_RandomState_tomaxint_line_863), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_triangular_line_3603, __pyx_k_RandomState_triangular_line_3603, sizeof(__pyx_k_RandomState_triangular_line_3603), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_uniform_line_1215, __pyx_k_RandomState_uniform_line_1215, sizeof(__pyx_k_RandomState_uniform_line_1215), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_vonmises_line_2562, __pyx_k_RandomState_vonmises_line_2562, sizeof(__pyx_k_RandomState_vonmises_line_2562), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_wald_line_3516, __pyx_k_RandomState_wald_line_3516, sizeof(__pyx_k_RandomState_wald_line_3516), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_weibull_line_2770, __pyx_k_RandomState_weibull_line_2770, sizeof(__pyx_k_RandomState_weibull_line_2770), 0, 1, 0, 0}, + {&__pyx_kp_u_RandomState_zipf_line_4002, __pyx_k_RandomState_zipf_line_4002, sizeof(__pyx_k_RandomState_zipf_line_4002), 0, 1, 0, 0}, {&__pyx_kp_s_Range_exceeds_valid_bounds, __pyx_k_Range_exceeds_valid_bounds, sizeof(__pyx_k_Range_exceeds_valid_bounds), 0, 0, 1, 0}, {&__pyx_n_s_RuntimeWarning, __pyx_k_RuntimeWarning, sizeof(__pyx_k_RuntimeWarning), 0, 0, 1, 1}, + {&__pyx_kp_s_Seed_array_must_be_1_d, __pyx_k_Seed_array_must_be_1_d, sizeof(__pyx_k_Seed_array_must_be_1_d), 0, 0, 1, 0}, {&__pyx_kp_s_Seed_must_be_between_0_and_2_32, __pyx_k_Seed_must_be_between_0_and_2_32, sizeof(__pyx_k_Seed_must_be_between_0_and_2_32), 0, 0, 1, 0}, + {&__pyx_kp_s_Seed_must_be_non_empty, __pyx_k_Seed_must_be_non_empty, sizeof(__pyx_k_Seed_must_be_non_empty), 0, 0, 1, 0}, + {&__pyx_kp_s_Seed_values_must_be_between_0_an, __pyx_k_Seed_values_must_be_between_0_an, sizeof(__pyx_k_Seed_values_must_be_between_0_an), 0, 0, 1, 0}, {&__pyx_n_s_T, __pyx_k_T, sizeof(__pyx_k_T), 0, 0, 1, 1}, {&__pyx_kp_s_This_function_is_deprecated_Plea, __pyx_k_This_function_is_deprecated_Plea, sizeof(__pyx_k_This_function_is_deprecated_Plea), 0, 0, 1, 0}, {&__pyx_kp_s_This_function_is_deprecated_Plea_2, __pyx_k_This_function_is_deprecated_Plea_2, sizeof(__pyx_k_This_function_is_deprecated_Plea_2), 0, 0, 1, 0}, @@ -40890,17 +41159,19 @@ {&__pyx_n_s_a, __pyx_k_a, sizeof(__pyx_k_a), 0, 0, 1, 1}, {&__pyx_kp_s_a_0, __pyx_k_a_0, sizeof(__pyx_k_a_0), 0, 0, 1, 0}, {&__pyx_kp_s_a_0_2, __pyx_k_a_0_2, sizeof(__pyx_k_a_0_2), 0, 0, 1, 0}, - {&__pyx_kp_s_a_1_0, __pyx_k_a_1_0, sizeof(__pyx_k_a_1_0), 0, 0, 1, 0}, {&__pyx_kp_s_a_and_p_must_have_same_size, __pyx_k_a_and_p_must_have_same_size, sizeof(__pyx_k_a_and_p_must_have_same_size), 0, 0, 1, 0}, {&__pyx_kp_s_a_must_be_1_dimensional, __pyx_k_a_must_be_1_dimensional, sizeof(__pyx_k_a_must_be_1_dimensional), 0, 0, 1, 0}, {&__pyx_kp_s_a_must_be_1_dimensional_or_an_in, __pyx_k_a_must_be_1_dimensional_or_an_in, sizeof(__pyx_k_a_must_be_1_dimensional_or_an_in), 0, 0, 1, 0}, + {&__pyx_kp_s_a_must_be_a_valid_float_1_0, __pyx_k_a_must_be_a_valid_float_1_0, sizeof(__pyx_k_a_must_be_a_valid_float_1_0), 0, 0, 1, 0}, {&__pyx_kp_s_a_must_be_greater_than_0, __pyx_k_a_must_be_greater_than_0, sizeof(__pyx_k_a_must_be_greater_than_0), 0, 0, 1, 0}, {&__pyx_kp_s_a_must_be_non_empty, __pyx_k_a_must_be_non_empty, sizeof(__pyx_k_a_must_be_non_empty), 0, 0, 1, 0}, + {&__pyx_kp_s_a_must_contain_valid_floats_1_0, __pyx_k_a_must_contain_valid_floats_1_0, sizeof(__pyx_k_a_must_contain_valid_floats_1_0), 0, 0, 1, 0}, {&__pyx_n_s_add, __pyx_k_add, sizeof(__pyx_k_add), 0, 0, 1, 1}, {&__pyx_kp_s_algorithm_must_be_MT19937, __pyx_k_algorithm_must_be_MT19937, sizeof(__pyx_k_algorithm_must_be_MT19937), 0, 0, 1, 0}, {&__pyx_n_s_all, __pyx_k_all, sizeof(__pyx_k_all), 0, 0, 1, 1}, {&__pyx_n_s_allclose, __pyx_k_allclose, sizeof(__pyx_k_allclose), 0, 0, 1, 1}, {&__pyx_n_s_alpha, __pyx_k_alpha, sizeof(__pyx_k_alpha), 0, 0, 1, 1}, + {&__pyx_kp_s_alpha_0, __pyx_k_alpha_0, sizeof(__pyx_k_alpha_0), 0, 0, 1, 0}, {&__pyx_n_s_any, __pyx_k_any, sizeof(__pyx_k_any), 0, 0, 1, 1}, {&__pyx_n_s_arange, __pyx_k_arange, sizeof(__pyx_k_arange), 0, 0, 1, 1}, {&__pyx_n_s_array, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1}, @@ -40943,7 +41214,6 @@ {&__pyx_kp_s_dfden_0, __pyx_k_dfden_0, sizeof(__pyx_k_dfden_0), 0, 0, 1, 0}, {&__pyx_n_s_dfnum, __pyx_k_dfnum, sizeof(__pyx_k_dfnum), 0, 0, 1, 1}, {&__pyx_kp_s_dfnum_0, __pyx_k_dfnum_0, sizeof(__pyx_k_dfnum_0), 0, 0, 1, 0}, - {&__pyx_kp_s_dfnum_1, __pyx_k_dfnum_1, sizeof(__pyx_k_dfnum_1), 0, 0, 1, 0}, {&__pyx_n_s_dirichlet, __pyx_k_dirichlet, sizeof(__pyx_k_dirichlet), 0, 0, 1, 1}, {&__pyx_kp_u_dirichlet_alpha_size_None_Draw, __pyx_k_dirichlet_alpha_size_None_Draw, sizeof(__pyx_k_dirichlet_alpha_size_None_Draw), 0, 1, 0, 0}, {&__pyx_n_s_dot, __pyx_k_dot, sizeof(__pyx_k_dot), 0, 0, 1, 1}, @@ -41196,10 +41466,10 @@ __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 222, __pyx_L1_error) __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 272, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(0, 572, __pyx_L1_error) - __pyx_builtin_OverflowError = __Pyx_GetBuiltinName(__pyx_n_s_OverflowError); if (!__pyx_builtin_OverflowError) __PYX_ERR(0, 1300, __pyx_L1_error) - __pyx_builtin_DeprecationWarning = __Pyx_GetBuiltinName(__pyx_n_s_DeprecationWarning); if (!__pyx_builtin_DeprecationWarning) __PYX_ERR(0, 1500, __pyx_L1_error) - __pyx_builtin_RuntimeWarning = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeWarning); if (!__pyx_builtin_RuntimeWarning) __PYX_ERR(0, 4521, __pyx_L1_error) - __pyx_builtin_reversed = __Pyx_GetBuiltinName(__pyx_n_s_reversed); if (!__pyx_builtin_reversed) __PYX_ERR(0, 4825, __pyx_L1_error) + __pyx_builtin_OverflowError = __Pyx_GetBuiltinName(__pyx_n_s_OverflowError); if (!__pyx_builtin_OverflowError) __PYX_ERR(0, 1305, __pyx_L1_error) + __pyx_builtin_DeprecationWarning = __Pyx_GetBuiltinName(__pyx_n_s_DeprecationWarning); if (!__pyx_builtin_DeprecationWarning) __PYX_ERR(0, 1505, __pyx_L1_error) + __pyx_builtin_RuntimeWarning = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeWarning); if (!__pyx_builtin_RuntimeWarning) __PYX_ERR(0, 4534, __pyx_L1_error) + __pyx_builtin_reversed = __Pyx_GetBuiltinName(__pyx_n_s_reversed); if (!__pyx_builtin_reversed) __PYX_ERR(0, 4845, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; @@ -41209,12 +41479,12 @@ __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); - /* "numpy.pxd":162 + /* "numpy.pxd":163 * except Exception: * PyErr_Print() * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< */ - __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple_)) __PYX_ERR(2, 162, __pyx_L1_error) + __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple_)) __PYX_ERR(2, 163, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); @@ -41267,7 +41537,7 @@ * itera = PyArray_IterNew(oa) * with lock, nogil: # <<<<<<<<<<<<<< * for i from 0 <= i < length: - * array_data[i] = func(state, ((itera.dataptr))[0]) + * array_data[i] = func(state, (PyArray_ITER_DATA(itera))[0]) */ __pyx_tuple__6 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 212, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); @@ -41564,7 +41834,7 @@ * itera = PyArray_IterNew(oa) * with lock, nogil: # <<<<<<<<<<<<<< * for i from 0 <= i < length: - * array_data[i] = func(state, ((itera.dataptr))[0]) + * array_data[i] = func(state, (PyArray_ITER_DATA(itera))[0]) */ __pyx_tuple__33 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__33)) __PYX_ERR(0, 537, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__33); @@ -41605,7 +41875,7 @@ /* "mtrand.pyx":680 * idx = operator.index(seed) - * if idx > int(2**32 - 1) or idx < 0: + * if (idx >= 2**32) or (idx < 0): * raise ValueError("Seed must be between 0 and 2**32 - 1") # <<<<<<<<<<<<<< * with self.lock: * rk_seed(idx, self.internal_state) @@ -41615,7 +41885,7 @@ __Pyx_GIVEREF(__pyx_tuple__37); /* "mtrand.pyx":681 - * if idx > int(2**32 - 1) or idx < 0: + * if (idx >= 2**32) or (idx < 0): * raise ValueError("Seed must be between 0 and 2**32 - 1") * with self.lock: # <<<<<<<<<<<<<< * rk_seed(idx, self.internal_state) @@ -41626,1484 +41896,1528 @@ __Pyx_GIVEREF(__pyx_tuple__38); /* "mtrand.pyx":686 - * obj = np.asarray(seed).astype(np.int64, casting='safe') - * if ((obj > int(2**32 - 1)) | (obj < 0)).any(): - * raise ValueError("Seed must be between 0 and 2**32 - 1") # <<<<<<<<<<<<<< - * obj = obj.astype('L', casting='unsafe') - * with self.lock: + * obj = np.asarray(seed) + * if obj.size == 0: + * raise ValueError("Seed must be non-empty") # <<<<<<<<<<<<<< + * obj = obj.astype(np.int64, casting='safe') + * if obj.ndim != 1: */ - __pyx_tuple__39 = PyTuple_Pack(1, __pyx_kp_s_Seed_must_be_between_0_and_2_32); if (unlikely(!__pyx_tuple__39)) __PYX_ERR(0, 686, __pyx_L1_error) + __pyx_tuple__39 = PyTuple_Pack(1, __pyx_kp_s_Seed_must_be_non_empty); if (unlikely(!__pyx_tuple__39)) __PYX_ERR(0, 686, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__39); __Pyx_GIVEREF(__pyx_tuple__39); - /* "mtrand.pyx":687 - * if ((obj > int(2**32 - 1)) | (obj < 0)).any(): - * raise ValueError("Seed must be between 0 and 2**32 - 1") + /* "mtrand.pyx":689 + * obj = obj.astype(np.int64, casting='safe') + * if obj.ndim != 1: + * raise ValueError("Seed array must be 1-d") # <<<<<<<<<<<<<< + * if ((obj >= 2**32) | (obj < 0)).any(): + * raise ValueError("Seed values must be between 0 and 2**32 - 1") + */ + __pyx_tuple__40 = PyTuple_Pack(1, __pyx_kp_s_Seed_array_must_be_1_d); if (unlikely(!__pyx_tuple__40)) __PYX_ERR(0, 689, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__40); + __Pyx_GIVEREF(__pyx_tuple__40); + + /* "mtrand.pyx":691 + * raise ValueError("Seed array must be 1-d") + * if ((obj >= 2**32) | (obj < 0)).any(): + * raise ValueError("Seed values must be between 0 and 2**32 - 1") # <<<<<<<<<<<<<< + * obj = obj.astype('L', casting='unsafe') + * with self.lock: + */ + __pyx_tuple__41 = PyTuple_Pack(1, __pyx_kp_s_Seed_values_must_be_between_0_an); if (unlikely(!__pyx_tuple__41)) __PYX_ERR(0, 691, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__41); + __Pyx_GIVEREF(__pyx_tuple__41); + + /* "mtrand.pyx":692 + * if ((obj >= 2**32) | (obj < 0)).any(): + * raise ValueError("Seed values must be between 0 and 2**32 - 1") * obj = obj.astype('L', casting='unsafe') # <<<<<<<<<<<<<< * with self.lock: * init_by_array(self.internal_state, PyArray_DATA(obj), */ - __pyx_tuple__40 = PyTuple_Pack(1, __pyx_n_s_L); if (unlikely(!__pyx_tuple__40)) __PYX_ERR(0, 687, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__40); - __Pyx_GIVEREF(__pyx_tuple__40); + __pyx_tuple__42 = PyTuple_Pack(1, __pyx_n_s_L); if (unlikely(!__pyx_tuple__42)) __PYX_ERR(0, 692, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__42); + __Pyx_GIVEREF(__pyx_tuple__42); - /* "mtrand.pyx":688 - * raise ValueError("Seed must be between 0 and 2**32 - 1") + /* "mtrand.pyx":693 + * raise ValueError("Seed values must be between 0 and 2**32 - 1") * obj = obj.astype('L', casting='unsafe') * with self.lock: # <<<<<<<<<<<<<< * init_by_array(self.internal_state, PyArray_DATA(obj), * PyArray_DIM(obj, 0)) */ - __pyx_tuple__41 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__41)) __PYX_ERR(0, 688, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__41); - __Pyx_GIVEREF(__pyx_tuple__41); + __pyx_tuple__43 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__43)) __PYX_ERR(0, 693, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__43); + __Pyx_GIVEREF(__pyx_tuple__43); - /* "mtrand.pyx":724 + /* "mtrand.pyx":729 * cdef ndarray state "arrayObject_state" * state = np.empty(624, np.uint) * with self.lock: # <<<<<<<<<<<<<< * memcpy(PyArray_DATA(state), (self.internal_state.key), 624*sizeof(long)) * has_gauss = self.internal_state.has_gauss */ - __pyx_tuple__42 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__42)) __PYX_ERR(0, 724, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__42); - __Pyx_GIVEREF(__pyx_tuple__42); + __pyx_tuple__44 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__44)) __PYX_ERR(0, 729, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__44); + __Pyx_GIVEREF(__pyx_tuple__44); - /* "mtrand.pyx":783 + /* "mtrand.pyx":788 * algorithm_name = state[0] * if algorithm_name != 'MT19937': * raise ValueError("algorithm must be 'MT19937'") # <<<<<<<<<<<<<< * key, pos = state[1:3] * if len(state) == 3: */ - __pyx_tuple__43 = PyTuple_Pack(1, __pyx_kp_s_algorithm_must_be_MT19937); if (unlikely(!__pyx_tuple__43)) __PYX_ERR(0, 783, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__43); - __Pyx_GIVEREF(__pyx_tuple__43); + __pyx_tuple__45 = PyTuple_Pack(1, __pyx_kp_s_algorithm_must_be_MT19937); if (unlikely(!__pyx_tuple__45)) __PYX_ERR(0, 788, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__45); + __Pyx_GIVEREF(__pyx_tuple__45); - /* "mtrand.pyx":784 + /* "mtrand.pyx":789 * if algorithm_name != 'MT19937': * raise ValueError("algorithm must be 'MT19937'") * key, pos = state[1:3] # <<<<<<<<<<<<<< * if len(state) == 3: * has_gauss = 0 */ - __pyx_slice__44 = PySlice_New(__pyx_int_1, __pyx_int_3, Py_None); if (unlikely(!__pyx_slice__44)) __PYX_ERR(0, 784, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__44); - __Pyx_GIVEREF(__pyx_slice__44); + __pyx_slice__46 = PySlice_New(__pyx_int_1, __pyx_int_3, Py_None); if (unlikely(!__pyx_slice__46)) __PYX_ERR(0, 789, __pyx_L1_error) + __Pyx_GOTREF(__pyx_slice__46); + __Pyx_GIVEREF(__pyx_slice__46); - /* "mtrand.pyx":789 + /* "mtrand.pyx":794 * cached_gaussian = 0.0 * else: * has_gauss, cached_gaussian = state[3:5] # <<<<<<<<<<<<<< * try: * obj = PyArray_ContiguousFromObject(key, NPY_ULONG, 1, 1) */ - __pyx_slice__45 = PySlice_New(__pyx_int_3, __pyx_int_5, Py_None); if (unlikely(!__pyx_slice__45)) __PYX_ERR(0, 789, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__45); - __Pyx_GIVEREF(__pyx_slice__45); + __pyx_slice__47 = PySlice_New(__pyx_int_3, __pyx_int_5, Py_None); if (unlikely(!__pyx_slice__47)) __PYX_ERR(0, 794, __pyx_L1_error) + __Pyx_GOTREF(__pyx_slice__47); + __Pyx_GIVEREF(__pyx_slice__47); - /* "mtrand.pyx":796 + /* "mtrand.pyx":801 * obj = PyArray_ContiguousFromObject(key, NPY_LONG, 1, 1) * if PyArray_DIM(obj, 0) != 624: * raise ValueError("state must be 624 longs") # <<<<<<<<<<<<<< * with self.lock: * memcpy((self.internal_state.key), PyArray_DATA(obj), 624*sizeof(long)) */ - __pyx_tuple__46 = PyTuple_Pack(1, __pyx_kp_s_state_must_be_624_longs); if (unlikely(!__pyx_tuple__46)) __PYX_ERR(0, 796, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__46); - __Pyx_GIVEREF(__pyx_tuple__46); + __pyx_tuple__48 = PyTuple_Pack(1, __pyx_kp_s_state_must_be_624_longs); if (unlikely(!__pyx_tuple__48)) __PYX_ERR(0, 801, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__48); + __Pyx_GIVEREF(__pyx_tuple__48); - /* "mtrand.pyx":797 + /* "mtrand.pyx":802 * if PyArray_DIM(obj, 0) != 624: * raise ValueError("state must be 624 longs") * with self.lock: # <<<<<<<<<<<<<< * memcpy((self.internal_state.key), PyArray_DATA(obj), 624*sizeof(long)) * self.internal_state.pos = pos */ - __pyx_tuple__47 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__47)) __PYX_ERR(0, 797, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__47); - __Pyx_GIVEREF(__pyx_tuple__47); + __pyx_tuple__49 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__49)) __PYX_ERR(0, 802, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__49); + __Pyx_GIVEREF(__pyx_tuple__49); - /* "mtrand.pyx":988 + /* "mtrand.pyx":993 * raise ValueError("high is out of bounds for %s" % (key,)) * if ilow >= ihigh: * raise ValueError("low >= high") # <<<<<<<<<<<<<< * * with self.lock: */ - __pyx_tuple__49 = PyTuple_Pack(1, __pyx_kp_s_low_high); if (unlikely(!__pyx_tuple__49)) __PYX_ERR(0, 988, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__49); - __Pyx_GIVEREF(__pyx_tuple__49); + __pyx_tuple__51 = PyTuple_Pack(1, __pyx_kp_s_low_high); if (unlikely(!__pyx_tuple__51)) __PYX_ERR(0, 993, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__51); + __Pyx_GIVEREF(__pyx_tuple__51); - /* "mtrand.pyx":990 + /* "mtrand.pyx":995 * raise ValueError("low >= high") * * with self.lock: # <<<<<<<<<<<<<< * ret = randfunc(ilow, ihigh - 1, size, self.state_address) * */ - __pyx_tuple__50 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__50)) __PYX_ERR(0, 990, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__50); - __Pyx_GIVEREF(__pyx_tuple__50); - __pyx_tuple__51 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__51)) __PYX_ERR(0, 990, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__51); - __Pyx_GIVEREF(__pyx_tuple__51); + __pyx_tuple__52 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__52)) __PYX_ERR(0, 995, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__52); + __Pyx_GIVEREF(__pyx_tuple__52); + __pyx_tuple__53 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__53)) __PYX_ERR(0, 995, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__53); + __Pyx_GIVEREF(__pyx_tuple__53); - /* "mtrand.pyx":1023 + /* "mtrand.pyx":1028 * cdef void *bytes * bytestring = empty_py_bytes(length, &bytes) * with self.lock, nogil: # <<<<<<<<<<<<<< * rk_fill(bytes, length, self.internal_state) * return bytestring */ - __pyx_tuple__52 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__52)) __PYX_ERR(0, 1023, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__52); - __Pyx_GIVEREF(__pyx_tuple__52); + __pyx_tuple__54 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__54)) __PYX_ERR(0, 1028, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__54); + __Pyx_GIVEREF(__pyx_tuple__54); - /* "mtrand.pyx":1113 + /* "mtrand.pyx":1118 * pop_size = operator.index(a.item()) * except TypeError: * raise ValueError("a must be 1-dimensional or an integer") # <<<<<<<<<<<<<< * if pop_size <= 0: * raise ValueError("a must be greater than 0") */ - __pyx_tuple__53 = PyTuple_Pack(1, __pyx_kp_s_a_must_be_1_dimensional_or_an_in); if (unlikely(!__pyx_tuple__53)) __PYX_ERR(0, 1113, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__53); - __Pyx_GIVEREF(__pyx_tuple__53); + __pyx_tuple__55 = PyTuple_Pack(1, __pyx_kp_s_a_must_be_1_dimensional_or_an_in); if (unlikely(!__pyx_tuple__55)) __PYX_ERR(0, 1118, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__55); + __Pyx_GIVEREF(__pyx_tuple__55); - /* "mtrand.pyx":1115 + /* "mtrand.pyx":1120 * raise ValueError("a must be 1-dimensional or an integer") * if pop_size <= 0: * raise ValueError("a must be greater than 0") # <<<<<<<<<<<<<< * elif a.ndim != 1: * raise ValueError("a must be 1-dimensional") */ - __pyx_tuple__54 = PyTuple_Pack(1, __pyx_kp_s_a_must_be_greater_than_0); if (unlikely(!__pyx_tuple__54)) __PYX_ERR(0, 1115, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__54); - __Pyx_GIVEREF(__pyx_tuple__54); + __pyx_tuple__56 = PyTuple_Pack(1, __pyx_kp_s_a_must_be_greater_than_0); if (unlikely(!__pyx_tuple__56)) __PYX_ERR(0, 1120, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__56); + __Pyx_GIVEREF(__pyx_tuple__56); - /* "mtrand.pyx":1117 + /* "mtrand.pyx":1122 * raise ValueError("a must be greater than 0") * elif a.ndim != 1: * raise ValueError("a must be 1-dimensional") # <<<<<<<<<<<<<< * else: * pop_size = a.shape[0] */ - __pyx_tuple__55 = PyTuple_Pack(1, __pyx_kp_s_a_must_be_1_dimensional); if (unlikely(!__pyx_tuple__55)) __PYX_ERR(0, 1117, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__55); - __Pyx_GIVEREF(__pyx_tuple__55); + __pyx_tuple__57 = PyTuple_Pack(1, __pyx_kp_s_a_must_be_1_dimensional); if (unlikely(!__pyx_tuple__57)) __PYX_ERR(0, 1122, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__57); + __Pyx_GIVEREF(__pyx_tuple__57); - /* "mtrand.pyx":1121 + /* "mtrand.pyx":1126 * pop_size = a.shape[0] * if pop_size is 0: * raise ValueError("a must be non-empty") # <<<<<<<<<<<<<< * * if p is not None: */ - __pyx_tuple__56 = PyTuple_Pack(1, __pyx_kp_s_a_must_be_non_empty); if (unlikely(!__pyx_tuple__56)) __PYX_ERR(0, 1121, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__56); - __Pyx_GIVEREF(__pyx_tuple__56); + __pyx_tuple__58 = PyTuple_Pack(1, __pyx_kp_s_a_must_be_non_empty); if (unlikely(!__pyx_tuple__58)) __PYX_ERR(0, 1126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__58); + __Pyx_GIVEREF(__pyx_tuple__58); - /* "mtrand.pyx":1135 + /* "mtrand.pyx":1140 * * if p.ndim != 1: * raise ValueError("p must be 1-dimensional") # <<<<<<<<<<<<<< * if p.size != pop_size: * raise ValueError("a and p must have same size") */ - __pyx_tuple__57 = PyTuple_Pack(1, __pyx_kp_s_p_must_be_1_dimensional); if (unlikely(!__pyx_tuple__57)) __PYX_ERR(0, 1135, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__57); - __Pyx_GIVEREF(__pyx_tuple__57); + __pyx_tuple__59 = PyTuple_Pack(1, __pyx_kp_s_p_must_be_1_dimensional); if (unlikely(!__pyx_tuple__59)) __PYX_ERR(0, 1140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__59); + __Pyx_GIVEREF(__pyx_tuple__59); - /* "mtrand.pyx":1137 + /* "mtrand.pyx":1142 * raise ValueError("p must be 1-dimensional") * if p.size != pop_size: * raise ValueError("a and p must have same size") # <<<<<<<<<<<<<< * if np.logical_or.reduce(p < 0): * raise ValueError("probabilities are not non-negative") */ - __pyx_tuple__58 = PyTuple_Pack(1, __pyx_kp_s_a_and_p_must_have_same_size); if (unlikely(!__pyx_tuple__58)) __PYX_ERR(0, 1137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__58); - __Pyx_GIVEREF(__pyx_tuple__58); + __pyx_tuple__60 = PyTuple_Pack(1, __pyx_kp_s_a_and_p_must_have_same_size); if (unlikely(!__pyx_tuple__60)) __PYX_ERR(0, 1142, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__60); + __Pyx_GIVEREF(__pyx_tuple__60); - /* "mtrand.pyx":1139 + /* "mtrand.pyx":1144 * raise ValueError("a and p must have same size") * if np.logical_or.reduce(p < 0): * raise ValueError("probabilities are not non-negative") # <<<<<<<<<<<<<< * if abs(kahan_sum(pix, d) - 1.) > atol: * raise ValueError("probabilities do not sum to 1") */ - __pyx_tuple__59 = PyTuple_Pack(1, __pyx_kp_s_probabilities_are_not_non_negati); if (unlikely(!__pyx_tuple__59)) __PYX_ERR(0, 1139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__59); - __Pyx_GIVEREF(__pyx_tuple__59); + __pyx_tuple__61 = PyTuple_Pack(1, __pyx_kp_s_probabilities_are_not_non_negati); if (unlikely(!__pyx_tuple__61)) __PYX_ERR(0, 1144, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__61); + __Pyx_GIVEREF(__pyx_tuple__61); - /* "mtrand.pyx":1141 + /* "mtrand.pyx":1146 * raise ValueError("probabilities are not non-negative") * if abs(kahan_sum(pix, d) - 1.) > atol: * raise ValueError("probabilities do not sum to 1") # <<<<<<<<<<<<<< * * shape = size */ - __pyx_tuple__60 = PyTuple_Pack(1, __pyx_kp_s_probabilities_do_not_sum_to_1); if (unlikely(!__pyx_tuple__60)) __PYX_ERR(0, 1141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__60); - __Pyx_GIVEREF(__pyx_tuple__60); + __pyx_tuple__62 = PyTuple_Pack(1, __pyx_kp_s_probabilities_do_not_sum_to_1); if (unlikely(!__pyx_tuple__62)) __PYX_ERR(0, 1146, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__62); + __Pyx_GIVEREF(__pyx_tuple__62); - /* "mtrand.pyx":1161 + /* "mtrand.pyx":1166 * else: * if size > pop_size: * raise ValueError("Cannot take a larger sample than " # <<<<<<<<<<<<<< * "population when 'replace=False'") * */ - __pyx_tuple__61 = PyTuple_Pack(1, __pyx_kp_s_Cannot_take_a_larger_sample_than); if (unlikely(!__pyx_tuple__61)) __PYX_ERR(0, 1161, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__61); - __Pyx_GIVEREF(__pyx_tuple__61); + __pyx_tuple__63 = PyTuple_Pack(1, __pyx_kp_s_Cannot_take_a_larger_sample_than); if (unlikely(!__pyx_tuple__63)) __PYX_ERR(0, 1166, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__63); + __Pyx_GIVEREF(__pyx_tuple__63); - /* "mtrand.pyx":1166 + /* "mtrand.pyx":1171 * if p is not None: * if np.count_nonzero(p > 0) < size: * raise ValueError("Fewer non-zero entries in p than size") # <<<<<<<<<<<<<< * n_uniq = 0 * p = p.copy() */ - __pyx_tuple__62 = PyTuple_Pack(1, __pyx_kp_s_Fewer_non_zero_entries_in_p_than); if (unlikely(!__pyx_tuple__62)) __PYX_ERR(0, 1166, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__62); - __Pyx_GIVEREF(__pyx_tuple__62); + __pyx_tuple__64 = PyTuple_Pack(1, __pyx_kp_s_Fewer_non_zero_entries_in_p_than); if (unlikely(!__pyx_tuple__64)) __PYX_ERR(0, 1171, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__64); + __Pyx_GIVEREF(__pyx_tuple__64); - /* "mtrand.pyx":1191 + /* "mtrand.pyx":1196 * if shape is None and isinstance(idx, np.ndarray): * # In most cases a scalar will have been made an array * idx = idx.item(0) # <<<<<<<<<<<<<< * * #Use samples as indices for a if a is array-like */ - __pyx_tuple__63 = PyTuple_Pack(1, __pyx_int_0); if (unlikely(!__pyx_tuple__63)) __PYX_ERR(0, 1191, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__63); - __Pyx_GIVEREF(__pyx_tuple__63); + __pyx_tuple__65 = PyTuple_Pack(1, __pyx_int_0); if (unlikely(!__pyx_tuple__65)) __PYX_ERR(0, 1196, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__65); + __Pyx_GIVEREF(__pyx_tuple__65); - /* "mtrand.pyx":1203 + /* "mtrand.pyx":1208 * # array, taking into account that np.array(item) may not work * # for object arrays. * res = np.empty((), dtype=a.dtype) # <<<<<<<<<<<<<< * res[()] = a[idx] * return res */ - __pyx_tuple__64 = PyTuple_Pack(1, __pyx_empty_tuple); if (unlikely(!__pyx_tuple__64)) __PYX_ERR(0, 1203, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__64); - __Pyx_GIVEREF(__pyx_tuple__64); + __pyx_tuple__66 = PyTuple_Pack(1, __pyx_empty_tuple); if (unlikely(!__pyx_tuple__66)) __PYX_ERR(0, 1208, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__66); + __Pyx_GIVEREF(__pyx_tuple__66); - /* "mtrand.pyx":1300 + /* "mtrand.pyx":1305 * * if not npy_isfinite(fscale): * raise OverflowError('Range exceeds valid bounds') # <<<<<<<<<<<<<< * * return cont2_array_sc(self.internal_state, rk_uniform, size, flow, */ - __pyx_tuple__65 = PyTuple_Pack(1, __pyx_kp_s_Range_exceeds_valid_bounds); if (unlikely(!__pyx_tuple__65)) __PYX_ERR(0, 1300, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__65); - __Pyx_GIVEREF(__pyx_tuple__65); + __pyx_tuple__67 = PyTuple_Pack(1, __pyx_kp_s_Range_exceeds_valid_bounds); if (unlikely(!__pyx_tuple__67)) __PYX_ERR(0, 1305, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__67); + __Pyx_GIVEREF(__pyx_tuple__67); - /* "mtrand.pyx":1311 + /* "mtrand.pyx":1316 * * if not np.all(np.isfinite(odiff)): * raise OverflowError('Range exceeds valid bounds') # <<<<<<<<<<<<<< * * return cont2_array(self.internal_state, rk_uniform, size, olow, odiff, */ - __pyx_tuple__66 = PyTuple_Pack(1, __pyx_kp_s_Range_exceeds_valid_bounds); if (unlikely(!__pyx_tuple__66)) __PYX_ERR(0, 1311, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__66); - __Pyx_GIVEREF(__pyx_tuple__66); + __pyx_tuple__68 = PyTuple_Pack(1, __pyx_kp_s_Range_exceeds_valid_bounds); if (unlikely(!__pyx_tuple__68)) __PYX_ERR(0, 1316, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__68); + __Pyx_GIVEREF(__pyx_tuple__68); - /* "mtrand.pyx":1646 + /* "mtrand.pyx":1651 * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_normal, size, floc, * fscale, self.lock) */ - __pyx_tuple__67 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__67)) __PYX_ERR(0, 1646, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__67); - __Pyx_GIVEREF(__pyx_tuple__67); + __pyx_tuple__69 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__69)) __PYX_ERR(0, 1651, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__69); + __Pyx_GIVEREF(__pyx_tuple__69); - /* "mtrand.pyx":1651 + /* "mtrand.pyx":1656 * * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_normal, size, oloc, oscale, * self.lock) */ - __pyx_tuple__68 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__68)) __PYX_ERR(0, 1651, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__68); - __Pyx_GIVEREF(__pyx_tuple__68); + __pyx_tuple__70 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__70)) __PYX_ERR(0, 1656, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__70); + __Pyx_GIVEREF(__pyx_tuple__70); - /* "mtrand.pyx":1704 + /* "mtrand.pyx":1709 * * if fa <= 0: * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * if fb <= 0: * raise ValueError("b <= 0") */ - __pyx_tuple__69 = PyTuple_Pack(1, __pyx_kp_s_a_0); if (unlikely(!__pyx_tuple__69)) __PYX_ERR(0, 1704, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__69); - __Pyx_GIVEREF(__pyx_tuple__69); + __pyx_tuple__71 = PyTuple_Pack(1, __pyx_kp_s_a_0); if (unlikely(!__pyx_tuple__71)) __PYX_ERR(0, 1709, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__71); + __Pyx_GIVEREF(__pyx_tuple__71); - /* "mtrand.pyx":1706 + /* "mtrand.pyx":1711 * raise ValueError("a <= 0") * if fb <= 0: * raise ValueError("b <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb, * self.lock) */ - __pyx_tuple__70 = PyTuple_Pack(1, __pyx_kp_s_b_0); if (unlikely(!__pyx_tuple__70)) __PYX_ERR(0, 1706, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__70); - __Pyx_GIVEREF(__pyx_tuple__70); + __pyx_tuple__72 = PyTuple_Pack(1, __pyx_kp_s_b_0); if (unlikely(!__pyx_tuple__72)) __PYX_ERR(0, 1711, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__72); + __Pyx_GIVEREF(__pyx_tuple__72); - /* "mtrand.pyx":1711 + /* "mtrand.pyx":1716 * * if np.any(np.less_equal(oa, 0)): * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * if np.any(np.less_equal(ob, 0)): * raise ValueError("b <= 0") */ - __pyx_tuple__71 = PyTuple_Pack(1, __pyx_kp_s_a_0); if (unlikely(!__pyx_tuple__71)) __PYX_ERR(0, 1711, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__71); - __Pyx_GIVEREF(__pyx_tuple__71); + __pyx_tuple__73 = PyTuple_Pack(1, __pyx_kp_s_a_0); if (unlikely(!__pyx_tuple__73)) __PYX_ERR(0, 1716, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__73); + __Pyx_GIVEREF(__pyx_tuple__73); - /* "mtrand.pyx":1713 + /* "mtrand.pyx":1718 * raise ValueError("a <= 0") * if np.any(np.less_equal(ob, 0)): * raise ValueError("b <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_beta, size, oa, ob, * self.lock) */ - __pyx_tuple__72 = PyTuple_Pack(1, __pyx_kp_s_b_0); if (unlikely(!__pyx_tuple__72)) __PYX_ERR(0, 1713, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__72); - __Pyx_GIVEREF(__pyx_tuple__72); + __pyx_tuple__74 = PyTuple_Pack(1, __pyx_kp_s_b_0); if (unlikely(!__pyx_tuple__74)) __PYX_ERR(0, 1718, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__74); + __Pyx_GIVEREF(__pyx_tuple__74); - /* "mtrand.pyx":1770 + /* "mtrand.pyx":1775 * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_exponential, size, * fscale, self.lock) */ - __pyx_tuple__73 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__73)) __PYX_ERR(0, 1770, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__73); - __Pyx_GIVEREF(__pyx_tuple__73); + __pyx_tuple__75 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__75)) __PYX_ERR(0, 1775, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__75); + __Pyx_GIVEREF(__pyx_tuple__75); - /* "mtrand.pyx":1775 + /* "mtrand.pyx":1780 * * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_exponential, size, oscale, * self.lock) */ - __pyx_tuple__74 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__74)) __PYX_ERR(0, 1775, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__74); - __Pyx_GIVEREF(__pyx_tuple__74); + __pyx_tuple__76 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__76)) __PYX_ERR(0, 1780, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__76); + __Pyx_GIVEREF(__pyx_tuple__76); - /* "mtrand.pyx":1887 + /* "mtrand.pyx":1892 * fshape = PyFloat_AsDouble(shape) * if np.signbit(fshape): * raise ValueError("shape < 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_standard_gamma, * size, fshape, self.lock) */ - __pyx_tuple__75 = PyTuple_Pack(1, __pyx_kp_s_shape_0); if (unlikely(!__pyx_tuple__75)) __PYX_ERR(0, 1887, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__75); - __Pyx_GIVEREF(__pyx_tuple__75); + __pyx_tuple__77 = PyTuple_Pack(1, __pyx_kp_s_shape_0); if (unlikely(!__pyx_tuple__77)) __PYX_ERR(0, 1892, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__77); + __Pyx_GIVEREF(__pyx_tuple__77); - /* "mtrand.pyx":1892 + /* "mtrand.pyx":1897 * * if np.any(np.signbit(oshape)): * raise ValueError("shape < 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_standard_gamma, size, * oshape, self.lock) */ - __pyx_tuple__76 = PyTuple_Pack(1, __pyx_kp_s_shape_0); if (unlikely(!__pyx_tuple__76)) __PYX_ERR(0, 1892, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__76); - __Pyx_GIVEREF(__pyx_tuple__76); + __pyx_tuple__78 = PyTuple_Pack(1, __pyx_kp_s_shape_0); if (unlikely(!__pyx_tuple__78)) __PYX_ERR(0, 1897, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__78); + __Pyx_GIVEREF(__pyx_tuple__78); - /* "mtrand.pyx":1979 + /* "mtrand.pyx":1984 * fscale = PyFloat_AsDouble(scale) * if np.signbit(fshape): * raise ValueError("shape < 0") # <<<<<<<<<<<<<< * if np.signbit(fscale): * raise ValueError("scale < 0") */ - __pyx_tuple__77 = PyTuple_Pack(1, __pyx_kp_s_shape_0); if (unlikely(!__pyx_tuple__77)) __PYX_ERR(0, 1979, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__77); - __Pyx_GIVEREF(__pyx_tuple__77); + __pyx_tuple__79 = PyTuple_Pack(1, __pyx_kp_s_shape_0); if (unlikely(!__pyx_tuple__79)) __PYX_ERR(0, 1984, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__79); + __Pyx_GIVEREF(__pyx_tuple__79); - /* "mtrand.pyx":1981 + /* "mtrand.pyx":1986 * raise ValueError("shape < 0") * if np.signbit(fscale): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_gamma, size, fshape, * fscale, self.lock) */ - __pyx_tuple__78 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__78)) __PYX_ERR(0, 1981, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__78); - __Pyx_GIVEREF(__pyx_tuple__78); + __pyx_tuple__80 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__80)) __PYX_ERR(0, 1986, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__80); + __Pyx_GIVEREF(__pyx_tuple__80); - /* "mtrand.pyx":1986 + /* "mtrand.pyx":1991 * * if np.any(np.signbit(oshape)): * raise ValueError("shape < 0") # <<<<<<<<<<<<<< * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") */ - __pyx_tuple__79 = PyTuple_Pack(1, __pyx_kp_s_shape_0); if (unlikely(!__pyx_tuple__79)) __PYX_ERR(0, 1986, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__79); - __Pyx_GIVEREF(__pyx_tuple__79); + __pyx_tuple__81 = PyTuple_Pack(1, __pyx_kp_s_shape_0); if (unlikely(!__pyx_tuple__81)) __PYX_ERR(0, 1991, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__81); + __Pyx_GIVEREF(__pyx_tuple__81); - /* "mtrand.pyx":1988 + /* "mtrand.pyx":1993 * raise ValueError("shape < 0") * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale, * self.lock) */ - __pyx_tuple__80 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__80)) __PYX_ERR(0, 1988, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__80); - __Pyx_GIVEREF(__pyx_tuple__80); + __pyx_tuple__82 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__82)) __PYX_ERR(0, 1993, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__82); + __Pyx_GIVEREF(__pyx_tuple__82); - /* "mtrand.pyx":2086 + /* "mtrand.pyx":2091 * * if fdfnum <= 0: * raise ValueError("dfnum <= 0") # <<<<<<<<<<<<<< * if fdfden <= 0: * raise ValueError("dfden <= 0") */ - __pyx_tuple__81 = PyTuple_Pack(1, __pyx_kp_s_dfnum_0); if (unlikely(!__pyx_tuple__81)) __PYX_ERR(0, 2086, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__81); - __Pyx_GIVEREF(__pyx_tuple__81); + __pyx_tuple__83 = PyTuple_Pack(1, __pyx_kp_s_dfnum_0); if (unlikely(!__pyx_tuple__83)) __PYX_ERR(0, 2091, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__83); + __Pyx_GIVEREF(__pyx_tuple__83); - /* "mtrand.pyx":2088 + /* "mtrand.pyx":2093 * raise ValueError("dfnum <= 0") * if fdfden <= 0: * raise ValueError("dfden <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_f, size, fdfnum, * fdfden, self.lock) */ - __pyx_tuple__82 = PyTuple_Pack(1, __pyx_kp_s_dfden_0); if (unlikely(!__pyx_tuple__82)) __PYX_ERR(0, 2088, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__82); - __Pyx_GIVEREF(__pyx_tuple__82); + __pyx_tuple__84 = PyTuple_Pack(1, __pyx_kp_s_dfden_0); if (unlikely(!__pyx_tuple__84)) __PYX_ERR(0, 2093, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__84); + __Pyx_GIVEREF(__pyx_tuple__84); - /* "mtrand.pyx":2093 + /* "mtrand.pyx":2098 * * if np.any(np.less_equal(odfnum, 0.0)): * raise ValueError("dfnum <= 0") # <<<<<<<<<<<<<< * if np.any(np.less_equal(odfden, 0.0)): * raise ValueError("dfden <= 0") */ - __pyx_tuple__83 = PyTuple_Pack(1, __pyx_kp_s_dfnum_0); if (unlikely(!__pyx_tuple__83)) __PYX_ERR(0, 2093, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__83); - __Pyx_GIVEREF(__pyx_tuple__83); + __pyx_tuple__85 = PyTuple_Pack(1, __pyx_kp_s_dfnum_0); if (unlikely(!__pyx_tuple__85)) __PYX_ERR(0, 2098, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__85); + __Pyx_GIVEREF(__pyx_tuple__85); - /* "mtrand.pyx":2095 + /* "mtrand.pyx":2100 * raise ValueError("dfnum <= 0") * if np.any(np.less_equal(odfden, 0.0)): * raise ValueError("dfden <= 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_f, size, odfnum, odfden, * self.lock) */ - __pyx_tuple__84 = PyTuple_Pack(1, __pyx_kp_s_dfden_0); if (unlikely(!__pyx_tuple__84)) __PYX_ERR(0, 2095, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__84); - __Pyx_GIVEREF(__pyx_tuple__84); + __pyx_tuple__86 = PyTuple_Pack(1, __pyx_kp_s_dfden_0); if (unlikely(!__pyx_tuple__86)) __PYX_ERR(0, 2100, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__86); + __Pyx_GIVEREF(__pyx_tuple__86); - /* "mtrand.pyx":2179 + /* "mtrand.pyx":2188 * - * if fdfnum <= 1: - * raise ValueError("dfnum <= 1") # <<<<<<<<<<<<<< + * if fdfnum <= 0: + * raise ValueError("dfnum <= 0") # <<<<<<<<<<<<<< * if fdfden <= 0: * raise ValueError("dfden <= 0") */ - __pyx_tuple__85 = PyTuple_Pack(1, __pyx_kp_s_dfnum_1); if (unlikely(!__pyx_tuple__85)) __PYX_ERR(0, 2179, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__85); - __Pyx_GIVEREF(__pyx_tuple__85); + __pyx_tuple__87 = PyTuple_Pack(1, __pyx_kp_s_dfnum_0); if (unlikely(!__pyx_tuple__87)) __PYX_ERR(0, 2188, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__87); + __Pyx_GIVEREF(__pyx_tuple__87); - /* "mtrand.pyx":2181 - * raise ValueError("dfnum <= 1") + /* "mtrand.pyx":2190 + * raise ValueError("dfnum <= 0") * if fdfden <= 0: * raise ValueError("dfden <= 0") # <<<<<<<<<<<<<< * if fnonc < 0: * raise ValueError("nonc < 0") */ - __pyx_tuple__86 = PyTuple_Pack(1, __pyx_kp_s_dfden_0); if (unlikely(!__pyx_tuple__86)) __PYX_ERR(0, 2181, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__86); - __Pyx_GIVEREF(__pyx_tuple__86); + __pyx_tuple__88 = PyTuple_Pack(1, __pyx_kp_s_dfden_0); if (unlikely(!__pyx_tuple__88)) __PYX_ERR(0, 2190, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__88); + __Pyx_GIVEREF(__pyx_tuple__88); - /* "mtrand.pyx":2183 + /* "mtrand.pyx":2192 * raise ValueError("dfden <= 0") * if fnonc < 0: * raise ValueError("nonc < 0") # <<<<<<<<<<<<<< * return cont3_array_sc(self.internal_state, rk_noncentral_f, size, * fdfnum, fdfden, fnonc, self.lock) */ - __pyx_tuple__87 = PyTuple_Pack(1, __pyx_kp_s_nonc_0); if (unlikely(!__pyx_tuple__87)) __PYX_ERR(0, 2183, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__87); - __Pyx_GIVEREF(__pyx_tuple__87); + __pyx_tuple__89 = PyTuple_Pack(1, __pyx_kp_s_nonc_0); if (unlikely(!__pyx_tuple__89)) __PYX_ERR(0, 2192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__89); + __Pyx_GIVEREF(__pyx_tuple__89); - /* "mtrand.pyx":2188 + /* "mtrand.pyx":2197 * - * if np.any(np.less_equal(odfnum, 1.0)): - * raise ValueError("dfnum <= 1") # <<<<<<<<<<<<<< + * if np.any(np.less_equal(odfnum, 0.0)): + * raise ValueError("dfnum <= 0") # <<<<<<<<<<<<<< * if np.any(np.less_equal(odfden, 0.0)): * raise ValueError("dfden <= 0") */ - __pyx_tuple__88 = PyTuple_Pack(1, __pyx_kp_s_dfnum_1); if (unlikely(!__pyx_tuple__88)) __PYX_ERR(0, 2188, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__88); - __Pyx_GIVEREF(__pyx_tuple__88); + __pyx_tuple__90 = PyTuple_Pack(1, __pyx_kp_s_dfnum_0); if (unlikely(!__pyx_tuple__90)) __PYX_ERR(0, 2197, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__90); + __Pyx_GIVEREF(__pyx_tuple__90); - /* "mtrand.pyx":2190 - * raise ValueError("dfnum <= 1") + /* "mtrand.pyx":2199 + * raise ValueError("dfnum <= 0") * if np.any(np.less_equal(odfden, 0.0)): * raise ValueError("dfden <= 0") # <<<<<<<<<<<<<< * if np.any(np.less(ononc, 0.0)): * raise ValueError("nonc < 0") */ - __pyx_tuple__89 = PyTuple_Pack(1, __pyx_kp_s_dfden_0); if (unlikely(!__pyx_tuple__89)) __PYX_ERR(0, 2190, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__89); - __Pyx_GIVEREF(__pyx_tuple__89); + __pyx_tuple__91 = PyTuple_Pack(1, __pyx_kp_s_dfden_0); if (unlikely(!__pyx_tuple__91)) __PYX_ERR(0, 2199, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__91); + __Pyx_GIVEREF(__pyx_tuple__91); - /* "mtrand.pyx":2192 + /* "mtrand.pyx":2201 * raise ValueError("dfden <= 0") * if np.any(np.less(ononc, 0.0)): * raise ValueError("nonc < 0") # <<<<<<<<<<<<<< * return cont3_array(self.internal_state, rk_noncentral_f, size, odfnum, * odfden, ononc, self.lock) */ - __pyx_tuple__90 = PyTuple_Pack(1, __pyx_kp_s_nonc_0); if (unlikely(!__pyx_tuple__90)) __PYX_ERR(0, 2192, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__90); - __Pyx_GIVEREF(__pyx_tuple__90); + __pyx_tuple__92 = PyTuple_Pack(1, __pyx_kp_s_nonc_0); if (unlikely(!__pyx_tuple__92)) __PYX_ERR(0, 2201, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__92); + __Pyx_GIVEREF(__pyx_tuple__92); - /* "mtrand.pyx":2268 + /* "mtrand.pyx":2277 * * if fdf <= 0: * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf, * self.lock) */ - __pyx_tuple__91 = PyTuple_Pack(1, __pyx_kp_s_df_0); if (unlikely(!__pyx_tuple__91)) __PYX_ERR(0, 2268, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__91); - __Pyx_GIVEREF(__pyx_tuple__91); + __pyx_tuple__93 = PyTuple_Pack(1, __pyx_kp_s_df_0); if (unlikely(!__pyx_tuple__93)) __PYX_ERR(0, 2277, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__93); + __Pyx_GIVEREF(__pyx_tuple__93); - /* "mtrand.pyx":2273 + /* "mtrand.pyx":2282 * * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_chisquare, size, odf, * self.lock) */ - __pyx_tuple__92 = PyTuple_Pack(1, __pyx_kp_s_df_0); if (unlikely(!__pyx_tuple__92)) __PYX_ERR(0, 2273, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__92); - __Pyx_GIVEREF(__pyx_tuple__92); + __pyx_tuple__94 = PyTuple_Pack(1, __pyx_kp_s_df_0); if (unlikely(!__pyx_tuple__94)) __PYX_ERR(0, 2282, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__94); + __Pyx_GIVEREF(__pyx_tuple__94); - /* "mtrand.pyx":2368 + /* "mtrand.pyx":2379 * * if fdf <= 0: * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * if fnonc < 0: * raise ValueError("nonc < 0") */ - __pyx_tuple__93 = PyTuple_Pack(1, __pyx_kp_s_df_0); if (unlikely(!__pyx_tuple__93)) __PYX_ERR(0, 2368, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__93); - __Pyx_GIVEREF(__pyx_tuple__93); + __pyx_tuple__95 = PyTuple_Pack(1, __pyx_kp_s_df_0); if (unlikely(!__pyx_tuple__95)) __PYX_ERR(0, 2379, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__95); + __Pyx_GIVEREF(__pyx_tuple__95); - /* "mtrand.pyx":2370 + /* "mtrand.pyx":2381 * raise ValueError("df <= 0") * if fnonc < 0: * raise ValueError("nonc < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_noncentral_chisquare, * size, fdf, fnonc, self.lock) */ - __pyx_tuple__94 = PyTuple_Pack(1, __pyx_kp_s_nonc_0); if (unlikely(!__pyx_tuple__94)) __PYX_ERR(0, 2370, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__94); - __Pyx_GIVEREF(__pyx_tuple__94); + __pyx_tuple__96 = PyTuple_Pack(1, __pyx_kp_s_nonc_0); if (unlikely(!__pyx_tuple__96)) __PYX_ERR(0, 2381, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__96); + __Pyx_GIVEREF(__pyx_tuple__96); - /* "mtrand.pyx":2375 + /* "mtrand.pyx":2386 * * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * if np.any(np.less(ononc, 0.0)): * raise ValueError("nonc < 0") */ - __pyx_tuple__95 = PyTuple_Pack(1, __pyx_kp_s_df_0); if (unlikely(!__pyx_tuple__95)) __PYX_ERR(0, 2375, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__95); - __Pyx_GIVEREF(__pyx_tuple__95); + __pyx_tuple__97 = PyTuple_Pack(1, __pyx_kp_s_df_0); if (unlikely(!__pyx_tuple__97)) __PYX_ERR(0, 2386, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__97); + __Pyx_GIVEREF(__pyx_tuple__97); - /* "mtrand.pyx":2377 + /* "mtrand.pyx":2388 * raise ValueError("df <= 0") * if np.any(np.less(ononc, 0.0)): * raise ValueError("nonc < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_noncentral_chisquare, size, * odf, ononc, self.lock) */ - __pyx_tuple__96 = PyTuple_Pack(1, __pyx_kp_s_nonc_0); if (unlikely(!__pyx_tuple__96)) __PYX_ERR(0, 2377, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__96); - __Pyx_GIVEREF(__pyx_tuple__96); + __pyx_tuple__98 = PyTuple_Pack(1, __pyx_kp_s_nonc_0); if (unlikely(!__pyx_tuple__98)) __PYX_ERR(0, 2388, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__98); + __Pyx_GIVEREF(__pyx_tuple__98); - /* "mtrand.pyx":2542 + /* "mtrand.pyx":2553 * * if fdf <= 0: * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_standard_t, size, * fdf, self.lock) */ - __pyx_tuple__97 = PyTuple_Pack(1, __pyx_kp_s_df_0); if (unlikely(!__pyx_tuple__97)) __PYX_ERR(0, 2542, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__97); - __Pyx_GIVEREF(__pyx_tuple__97); + __pyx_tuple__99 = PyTuple_Pack(1, __pyx_kp_s_df_0); if (unlikely(!__pyx_tuple__99)) __PYX_ERR(0, 2553, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__99); + __Pyx_GIVEREF(__pyx_tuple__99); - /* "mtrand.pyx":2547 + /* "mtrand.pyx":2558 * * if np.any(np.less_equal(odf, 0.0)): * raise ValueError("df <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_standard_t, size, odf, * self.lock) */ - __pyx_tuple__98 = PyTuple_Pack(1, __pyx_kp_s_df_0); if (unlikely(!__pyx_tuple__98)) __PYX_ERR(0, 2547, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__98); - __Pyx_GIVEREF(__pyx_tuple__98); + __pyx_tuple__100 = PyTuple_Pack(1, __pyx_kp_s_df_0); if (unlikely(!__pyx_tuple__100)) __PYX_ERR(0, 2558, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__100); + __Pyx_GIVEREF(__pyx_tuple__100); - /* "mtrand.pyx":2640 + /* "mtrand.pyx":2651 * * if fkappa < 0: * raise ValueError("kappa < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu, * fkappa, self.lock) */ - __pyx_tuple__99 = PyTuple_Pack(1, __pyx_kp_s_kappa_0); if (unlikely(!__pyx_tuple__99)) __PYX_ERR(0, 2640, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__99); - __Pyx_GIVEREF(__pyx_tuple__99); + __pyx_tuple__101 = PyTuple_Pack(1, __pyx_kp_s_kappa_0); if (unlikely(!__pyx_tuple__101)) __PYX_ERR(0, 2651, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__101); + __Pyx_GIVEREF(__pyx_tuple__101); - /* "mtrand.pyx":2645 + /* "mtrand.pyx":2656 * * if np.any(np.less(okappa, 0.0)): * raise ValueError("kappa < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa, * self.lock) */ - __pyx_tuple__100 = PyTuple_Pack(1, __pyx_kp_s_kappa_0); if (unlikely(!__pyx_tuple__100)) __PYX_ERR(0, 2645, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__100); - __Pyx_GIVEREF(__pyx_tuple__100); + __pyx_tuple__102 = PyTuple_Pack(1, __pyx_kp_s_kappa_0); if (unlikely(!__pyx_tuple__102)) __PYX_ERR(0, 2656, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__102); + __Pyx_GIVEREF(__pyx_tuple__102); - /* "mtrand.pyx":2751 + /* "mtrand.pyx":2762 * * if fa <= 0: * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_pareto, size, fa, * self.lock) */ - __pyx_tuple__101 = PyTuple_Pack(1, __pyx_kp_s_a_0); if (unlikely(!__pyx_tuple__101)) __PYX_ERR(0, 2751, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__101); - __Pyx_GIVEREF(__pyx_tuple__101); + __pyx_tuple__103 = PyTuple_Pack(1, __pyx_kp_s_a_0); if (unlikely(!__pyx_tuple__103)) __PYX_ERR(0, 2762, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__103); + __Pyx_GIVEREF(__pyx_tuple__103); - /* "mtrand.pyx":2756 + /* "mtrand.pyx":2767 * * if np.any(np.less_equal(oa, 0.0)): * raise ValueError("a <= 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_pareto, size, oa, self.lock) * */ - __pyx_tuple__102 = PyTuple_Pack(1, __pyx_kp_s_a_0); if (unlikely(!__pyx_tuple__102)) __PYX_ERR(0, 2756, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__102); - __Pyx_GIVEREF(__pyx_tuple__102); + __pyx_tuple__104 = PyTuple_Pack(1, __pyx_kp_s_a_0); if (unlikely(!__pyx_tuple__104)) __PYX_ERR(0, 2767, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__104); + __Pyx_GIVEREF(__pyx_tuple__104); - /* "mtrand.pyx":2860 + /* "mtrand.pyx":2871 * fa = PyFloat_AsDouble(a) * if np.signbit(fa): * raise ValueError("a < 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_weibull, size, fa, * self.lock) */ - __pyx_tuple__103 = PyTuple_Pack(1, __pyx_kp_s_a_0_2); if (unlikely(!__pyx_tuple__103)) __PYX_ERR(0, 2860, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__103); - __Pyx_GIVEREF(__pyx_tuple__103); + __pyx_tuple__105 = PyTuple_Pack(1, __pyx_kp_s_a_0_2); if (unlikely(!__pyx_tuple__105)) __PYX_ERR(0, 2871, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__105); + __Pyx_GIVEREF(__pyx_tuple__105); - /* "mtrand.pyx":2865 + /* "mtrand.pyx":2876 * * if np.any(np.signbit(oa)): * raise ValueError("a < 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_weibull, size, oa, * self.lock) */ - __pyx_tuple__104 = PyTuple_Pack(1, __pyx_kp_s_a_0_2); if (unlikely(!__pyx_tuple__104)) __PYX_ERR(0, 2865, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__104); - __Pyx_GIVEREF(__pyx_tuple__104); + __pyx_tuple__106 = PyTuple_Pack(1, __pyx_kp_s_a_0_2); if (unlikely(!__pyx_tuple__106)) __PYX_ERR(0, 2876, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__106); + __Pyx_GIVEREF(__pyx_tuple__106); - /* "mtrand.pyx":2972 + /* "mtrand.pyx":2983 * fa = PyFloat_AsDouble(a) * if np.signbit(fa): * raise ValueError("a < 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_power, size, fa, * self.lock) */ - __pyx_tuple__105 = PyTuple_Pack(1, __pyx_kp_s_a_0_2); if (unlikely(!__pyx_tuple__105)) __PYX_ERR(0, 2972, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__105); - __Pyx_GIVEREF(__pyx_tuple__105); + __pyx_tuple__107 = PyTuple_Pack(1, __pyx_kp_s_a_0_2); if (unlikely(!__pyx_tuple__107)) __PYX_ERR(0, 2983, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__107); + __Pyx_GIVEREF(__pyx_tuple__107); - /* "mtrand.pyx":2977 + /* "mtrand.pyx":2988 * * if np.any(np.signbit(oa)): * raise ValueError("a < 0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_power, size, oa, self.lock) * */ - __pyx_tuple__106 = PyTuple_Pack(1, __pyx_kp_s_a_0_2); if (unlikely(!__pyx_tuple__106)) __PYX_ERR(0, 2977, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__106); - __Pyx_GIVEREF(__pyx_tuple__106); + __pyx_tuple__108 = PyTuple_Pack(1, __pyx_kp_s_a_0_2); if (unlikely(!__pyx_tuple__108)) __PYX_ERR(0, 2988, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__108); + __Pyx_GIVEREF(__pyx_tuple__108); - /* "mtrand.pyx":3069 + /* "mtrand.pyx":3080 * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_laplace, size, floc, * fscale, self.lock) */ - __pyx_tuple__107 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__107)) __PYX_ERR(0, 3069, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__107); - __Pyx_GIVEREF(__pyx_tuple__107); + __pyx_tuple__109 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__109)) __PYX_ERR(0, 3080, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__109); + __Pyx_GIVEREF(__pyx_tuple__109); - /* "mtrand.pyx":3074 + /* "mtrand.pyx":3085 * * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale, * self.lock) */ - __pyx_tuple__108 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__108)) __PYX_ERR(0, 3074, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__108); - __Pyx_GIVEREF(__pyx_tuple__108); + __pyx_tuple__110 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__110)) __PYX_ERR(0, 3085, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__110); + __Pyx_GIVEREF(__pyx_tuple__110); - /* "mtrand.pyx":3200 + /* "mtrand.pyx":3211 * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_gumbel, size, floc, * fscale, self.lock) */ - __pyx_tuple__109 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__109)) __PYX_ERR(0, 3200, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__109); - __Pyx_GIVEREF(__pyx_tuple__109); + __pyx_tuple__111 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__111)) __PYX_ERR(0, 3211, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__111); + __Pyx_GIVEREF(__pyx_tuple__111); - /* "mtrand.pyx":3205 + /* "mtrand.pyx":3216 * * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale, * self.lock) */ - __pyx_tuple__110 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__110)) __PYX_ERR(0, 3205, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__110); - __Pyx_GIVEREF(__pyx_tuple__110); + __pyx_tuple__112 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__112)) __PYX_ERR(0, 3216, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__112); + __Pyx_GIVEREF(__pyx_tuple__112); - /* "mtrand.pyx":3293 + /* "mtrand.pyx":3304 * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_logistic, size, floc, * fscale, self.lock) */ - __pyx_tuple__111 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__111)) __PYX_ERR(0, 3293, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__111); - __Pyx_GIVEREF(__pyx_tuple__111); + __pyx_tuple__113 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__113)) __PYX_ERR(0, 3304, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__113); + __Pyx_GIVEREF(__pyx_tuple__113); - /* "mtrand.pyx":3298 + /* "mtrand.pyx":3309 * * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_logistic, size, oloc, * oscale, self.lock) */ - __pyx_tuple__112 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__112)) __PYX_ERR(0, 3298, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__112); - __Pyx_GIVEREF(__pyx_tuple__112); + __pyx_tuple__114 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__114)) __PYX_ERR(0, 3309, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__114); + __Pyx_GIVEREF(__pyx_tuple__114); - /* "mtrand.pyx":3417 + /* "mtrand.pyx":3428 * fsigma = PyFloat_AsDouble(sigma) * if np.signbit(fsigma): * raise ValueError("sigma < 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_lognormal, size, * fmean, fsigma, self.lock) */ - __pyx_tuple__113 = PyTuple_Pack(1, __pyx_kp_s_sigma_0); if (unlikely(!__pyx_tuple__113)) __PYX_ERR(0, 3417, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__113); - __Pyx_GIVEREF(__pyx_tuple__113); + __pyx_tuple__115 = PyTuple_Pack(1, __pyx_kp_s_sigma_0); if (unlikely(!__pyx_tuple__115)) __PYX_ERR(0, 3428, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__115); + __Pyx_GIVEREF(__pyx_tuple__115); - /* "mtrand.pyx":3422 + /* "mtrand.pyx":3433 * * if np.any(np.signbit(osigma)): * raise ValueError("sigma < 0.0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_lognormal, size, omean, * osigma, self.lock) */ - __pyx_tuple__114 = PyTuple_Pack(1, __pyx_kp_s_sigma_0_0); if (unlikely(!__pyx_tuple__114)) __PYX_ERR(0, 3422, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__114); - __Pyx_GIVEREF(__pyx_tuple__114); + __pyx_tuple__116 = PyTuple_Pack(1, __pyx_kp_s_sigma_0_0); if (unlikely(!__pyx_tuple__116)) __PYX_ERR(0, 3433, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__116); + __Pyx_GIVEREF(__pyx_tuple__116); - /* "mtrand.pyx":3496 + /* "mtrand.pyx":3507 * fscale = PyFloat_AsDouble(scale) * if np.signbit(fscale): * raise ValueError("scale < 0") # <<<<<<<<<<<<<< * return cont1_array_sc(self.internal_state, rk_rayleigh, size, * fscale, self.lock) */ - __pyx_tuple__115 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__115)) __PYX_ERR(0, 3496, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__115); - __Pyx_GIVEREF(__pyx_tuple__115); + __pyx_tuple__117 = PyTuple_Pack(1, __pyx_kp_s_scale_0); if (unlikely(!__pyx_tuple__117)) __PYX_ERR(0, 3507, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__117); + __Pyx_GIVEREF(__pyx_tuple__117); - /* "mtrand.pyx":3501 + /* "mtrand.pyx":3512 * * if np.any(np.signbit(oscale)): * raise ValueError("scale < 0.0") # <<<<<<<<<<<<<< * return cont1_array(self.internal_state, rk_rayleigh, size, oscale, * self.lock) */ - __pyx_tuple__116 = PyTuple_Pack(1, __pyx_kp_s_scale_0_0); if (unlikely(!__pyx_tuple__116)) __PYX_ERR(0, 3501, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__116); - __Pyx_GIVEREF(__pyx_tuple__116); + __pyx_tuple__118 = PyTuple_Pack(1, __pyx_kp_s_scale_0_0); if (unlikely(!__pyx_tuple__118)) __PYX_ERR(0, 3512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__118); + __Pyx_GIVEREF(__pyx_tuple__118); - /* "mtrand.pyx":3579 + /* "mtrand.pyx":3590 * * if fmean <= 0: * raise ValueError("mean <= 0") # <<<<<<<<<<<<<< * if fscale <= 0: * raise ValueError("scale <= 0") */ - __pyx_tuple__117 = PyTuple_Pack(1, __pyx_kp_s_mean_0); if (unlikely(!__pyx_tuple__117)) __PYX_ERR(0, 3579, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__117); - __Pyx_GIVEREF(__pyx_tuple__117); + __pyx_tuple__119 = PyTuple_Pack(1, __pyx_kp_s_mean_0); if (unlikely(!__pyx_tuple__119)) __PYX_ERR(0, 3590, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__119); + __Pyx_GIVEREF(__pyx_tuple__119); - /* "mtrand.pyx":3581 + /* "mtrand.pyx":3592 * raise ValueError("mean <= 0") * if fscale <= 0: * raise ValueError("scale <= 0") # <<<<<<<<<<<<<< * return cont2_array_sc(self.internal_state, rk_wald, size, fmean, * fscale, self.lock) */ - __pyx_tuple__118 = PyTuple_Pack(1, __pyx_kp_s_scale_0_2); if (unlikely(!__pyx_tuple__118)) __PYX_ERR(0, 3581, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__118); - __Pyx_GIVEREF(__pyx_tuple__118); + __pyx_tuple__120 = PyTuple_Pack(1, __pyx_kp_s_scale_0_2); if (unlikely(!__pyx_tuple__120)) __PYX_ERR(0, 3592, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__120); + __Pyx_GIVEREF(__pyx_tuple__120); - /* "mtrand.pyx":3586 + /* "mtrand.pyx":3597 * * if np.any(np.less_equal(omean,0.0)): * raise ValueError("mean <= 0.0") # <<<<<<<<<<<<<< * elif np.any(np.less_equal(oscale,0.0)): * raise ValueError("scale <= 0.0") */ - __pyx_tuple__119 = PyTuple_Pack(1, __pyx_kp_s_mean_0_0); if (unlikely(!__pyx_tuple__119)) __PYX_ERR(0, 3586, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__119); - __Pyx_GIVEREF(__pyx_tuple__119); + __pyx_tuple__121 = PyTuple_Pack(1, __pyx_kp_s_mean_0_0); if (unlikely(!__pyx_tuple__121)) __PYX_ERR(0, 3597, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__121); + __Pyx_GIVEREF(__pyx_tuple__121); - /* "mtrand.pyx":3588 + /* "mtrand.pyx":3599 * raise ValueError("mean <= 0.0") * elif np.any(np.less_equal(oscale,0.0)): * raise ValueError("scale <= 0.0") # <<<<<<<<<<<<<< * return cont2_array(self.internal_state, rk_wald, size, omean, oscale, * self.lock) */ - __pyx_tuple__120 = PyTuple_Pack(1, __pyx_kp_s_scale_0_0_2); if (unlikely(!__pyx_tuple__120)) __PYX_ERR(0, 3588, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__120); - __Pyx_GIVEREF(__pyx_tuple__120); + __pyx_tuple__122 = PyTuple_Pack(1, __pyx_kp_s_scale_0_0_2); if (unlikely(!__pyx_tuple__122)) __PYX_ERR(0, 3599, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__122); + __Pyx_GIVEREF(__pyx_tuple__122); - /* "mtrand.pyx":3668 + /* "mtrand.pyx":3679 * * if fleft > fmode: * raise ValueError("left > mode") # <<<<<<<<<<<<<< * if fmode > fright: * raise ValueError("mode > right") */ - __pyx_tuple__121 = PyTuple_Pack(1, __pyx_kp_s_left_mode); if (unlikely(!__pyx_tuple__121)) __PYX_ERR(0, 3668, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__121); - __Pyx_GIVEREF(__pyx_tuple__121); + __pyx_tuple__123 = PyTuple_Pack(1, __pyx_kp_s_left_mode); if (unlikely(!__pyx_tuple__123)) __PYX_ERR(0, 3679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__123); + __Pyx_GIVEREF(__pyx_tuple__123); - /* "mtrand.pyx":3670 + /* "mtrand.pyx":3681 * raise ValueError("left > mode") * if fmode > fright: * raise ValueError("mode > right") # <<<<<<<<<<<<<< * if fleft == fright: * raise ValueError("left == right") */ - __pyx_tuple__122 = PyTuple_Pack(1, __pyx_kp_s_mode_right); if (unlikely(!__pyx_tuple__122)) __PYX_ERR(0, 3670, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__122); - __Pyx_GIVEREF(__pyx_tuple__122); + __pyx_tuple__124 = PyTuple_Pack(1, __pyx_kp_s_mode_right); if (unlikely(!__pyx_tuple__124)) __PYX_ERR(0, 3681, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__124); + __Pyx_GIVEREF(__pyx_tuple__124); - /* "mtrand.pyx":3672 + /* "mtrand.pyx":3683 * raise ValueError("mode > right") * if fleft == fright: * raise ValueError("left == right") # <<<<<<<<<<<<<< * return cont3_array_sc(self.internal_state, rk_triangular, size, * fleft, fmode, fright, self.lock) */ - __pyx_tuple__123 = PyTuple_Pack(1, __pyx_kp_s_left_right); if (unlikely(!__pyx_tuple__123)) __PYX_ERR(0, 3672, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__123); - __Pyx_GIVEREF(__pyx_tuple__123); + __pyx_tuple__125 = PyTuple_Pack(1, __pyx_kp_s_left_right); if (unlikely(!__pyx_tuple__125)) __PYX_ERR(0, 3683, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__125); + __Pyx_GIVEREF(__pyx_tuple__125); - /* "mtrand.pyx":3677 + /* "mtrand.pyx":3688 * * if np.any(np.greater(oleft, omode)): * raise ValueError("left > mode") # <<<<<<<<<<<<<< * if np.any(np.greater(omode, oright)): * raise ValueError("mode > right") */ - __pyx_tuple__124 = PyTuple_Pack(1, __pyx_kp_s_left_mode); if (unlikely(!__pyx_tuple__124)) __PYX_ERR(0, 3677, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__124); - __Pyx_GIVEREF(__pyx_tuple__124); + __pyx_tuple__126 = PyTuple_Pack(1, __pyx_kp_s_left_mode); if (unlikely(!__pyx_tuple__126)) __PYX_ERR(0, 3688, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__126); + __Pyx_GIVEREF(__pyx_tuple__126); - /* "mtrand.pyx":3679 + /* "mtrand.pyx":3690 * raise ValueError("left > mode") * if np.any(np.greater(omode, oright)): * raise ValueError("mode > right") # <<<<<<<<<<<<<< * if np.any(np.equal(oleft, oright)): * raise ValueError("left == right") */ - __pyx_tuple__125 = PyTuple_Pack(1, __pyx_kp_s_mode_right); if (unlikely(!__pyx_tuple__125)) __PYX_ERR(0, 3679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__125); - __Pyx_GIVEREF(__pyx_tuple__125); + __pyx_tuple__127 = PyTuple_Pack(1, __pyx_kp_s_mode_right); if (unlikely(!__pyx_tuple__127)) __PYX_ERR(0, 3690, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__127); + __Pyx_GIVEREF(__pyx_tuple__127); - /* "mtrand.pyx":3681 + /* "mtrand.pyx":3692 * raise ValueError("mode > right") * if np.any(np.equal(oleft, oright)): * raise ValueError("left == right") # <<<<<<<<<<<<<< * return cont3_array(self.internal_state, rk_triangular, size, oleft, * omode, oright, self.lock) */ - __pyx_tuple__126 = PyTuple_Pack(1, __pyx_kp_s_left_right); if (unlikely(!__pyx_tuple__126)) __PYX_ERR(0, 3681, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__126); - __Pyx_GIVEREF(__pyx_tuple__126); + __pyx_tuple__128 = PyTuple_Pack(1, __pyx_kp_s_left_right); if (unlikely(!__pyx_tuple__128)) __PYX_ERR(0, 3692, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__128); + __Pyx_GIVEREF(__pyx_tuple__128); - /* "mtrand.pyx":3783 + /* "mtrand.pyx":3794 * * if ln < 0: * raise ValueError("n < 0") # <<<<<<<<<<<<<< * if fp < 0: * raise ValueError("p < 0") */ - __pyx_tuple__127 = PyTuple_Pack(1, __pyx_kp_s_n_0); if (unlikely(!__pyx_tuple__127)) __PYX_ERR(0, 3783, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__127); - __Pyx_GIVEREF(__pyx_tuple__127); + __pyx_tuple__129 = PyTuple_Pack(1, __pyx_kp_s_n_0); if (unlikely(!__pyx_tuple__129)) __PYX_ERR(0, 3794, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__129); + __Pyx_GIVEREF(__pyx_tuple__129); - /* "mtrand.pyx":3785 + /* "mtrand.pyx":3796 * raise ValueError("n < 0") * if fp < 0: * raise ValueError("p < 0") # <<<<<<<<<<<<<< * elif fp > 1: * raise ValueError("p > 1") */ - __pyx_tuple__128 = PyTuple_Pack(1, __pyx_kp_s_p_0); if (unlikely(!__pyx_tuple__128)) __PYX_ERR(0, 3785, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__128); - __Pyx_GIVEREF(__pyx_tuple__128); + __pyx_tuple__130 = PyTuple_Pack(1, __pyx_kp_s_p_0); if (unlikely(!__pyx_tuple__130)) __PYX_ERR(0, 3796, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__130); + __Pyx_GIVEREF(__pyx_tuple__130); - /* "mtrand.pyx":3787 + /* "mtrand.pyx":3798 * raise ValueError("p < 0") * elif fp > 1: * raise ValueError("p > 1") # <<<<<<<<<<<<<< * elif np.isnan(fp): * raise ValueError("p is nan") */ - __pyx_tuple__129 = PyTuple_Pack(1, __pyx_kp_s_p_1); if (unlikely(!__pyx_tuple__129)) __PYX_ERR(0, 3787, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__129); - __Pyx_GIVEREF(__pyx_tuple__129); + __pyx_tuple__131 = PyTuple_Pack(1, __pyx_kp_s_p_1); if (unlikely(!__pyx_tuple__131)) __PYX_ERR(0, 3798, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__131); + __Pyx_GIVEREF(__pyx_tuple__131); - /* "mtrand.pyx":3789 + /* "mtrand.pyx":3800 * raise ValueError("p > 1") * elif np.isnan(fp): * raise ValueError("p is nan") # <<<<<<<<<<<<<< * return discnp_array_sc(self.internal_state, rk_binomial, size, ln, * fp, self.lock) */ - __pyx_tuple__130 = PyTuple_Pack(1, __pyx_kp_s_p_is_nan); if (unlikely(!__pyx_tuple__130)) __PYX_ERR(0, 3789, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__130); - __Pyx_GIVEREF(__pyx_tuple__130); + __pyx_tuple__132 = PyTuple_Pack(1, __pyx_kp_s_p_is_nan); if (unlikely(!__pyx_tuple__132)) __PYX_ERR(0, 3800, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__132); + __Pyx_GIVEREF(__pyx_tuple__132); - /* "mtrand.pyx":3794 + /* "mtrand.pyx":3805 * * if np.any(np.less(n, 0)): * raise ValueError("n < 0") # <<<<<<<<<<<<<< * if np.any(np.less(p, 0)): * raise ValueError("p < 0") */ - __pyx_tuple__131 = PyTuple_Pack(1, __pyx_kp_s_n_0); if (unlikely(!__pyx_tuple__131)) __PYX_ERR(0, 3794, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__131); - __Pyx_GIVEREF(__pyx_tuple__131); + __pyx_tuple__133 = PyTuple_Pack(1, __pyx_kp_s_n_0); if (unlikely(!__pyx_tuple__133)) __PYX_ERR(0, 3805, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__133); + __Pyx_GIVEREF(__pyx_tuple__133); - /* "mtrand.pyx":3796 + /* "mtrand.pyx":3807 * raise ValueError("n < 0") * if np.any(np.less(p, 0)): * raise ValueError("p < 0") # <<<<<<<<<<<<<< * if np.any(np.greater(p, 1)): * raise ValueError("p > 1") */ - __pyx_tuple__132 = PyTuple_Pack(1, __pyx_kp_s_p_0); if (unlikely(!__pyx_tuple__132)) __PYX_ERR(0, 3796, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__132); - __Pyx_GIVEREF(__pyx_tuple__132); + __pyx_tuple__134 = PyTuple_Pack(1, __pyx_kp_s_p_0); if (unlikely(!__pyx_tuple__134)) __PYX_ERR(0, 3807, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__134); + __Pyx_GIVEREF(__pyx_tuple__134); - /* "mtrand.pyx":3798 + /* "mtrand.pyx":3809 * raise ValueError("p < 0") * if np.any(np.greater(p, 1)): * raise ValueError("p > 1") # <<<<<<<<<<<<<< * return discnp_array(self.internal_state, rk_binomial, size, on, op, * self.lock) */ - __pyx_tuple__133 = PyTuple_Pack(1, __pyx_kp_s_p_1); if (unlikely(!__pyx_tuple__133)) __PYX_ERR(0, 3798, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__133); - __Pyx_GIVEREF(__pyx_tuple__133); + __pyx_tuple__135 = PyTuple_Pack(1, __pyx_kp_s_p_1); if (unlikely(!__pyx_tuple__135)) __PYX_ERR(0, 3809, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__135); + __Pyx_GIVEREF(__pyx_tuple__135); - /* "mtrand.pyx":3886 + /* "mtrand.pyx":3897 * * if fn <= 0: * raise ValueError("n <= 0") # <<<<<<<<<<<<<< * if fp < 0: * raise ValueError("p < 0") */ - __pyx_tuple__134 = PyTuple_Pack(1, __pyx_kp_s_n_0_2); if (unlikely(!__pyx_tuple__134)) __PYX_ERR(0, 3886, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__134); - __Pyx_GIVEREF(__pyx_tuple__134); + __pyx_tuple__136 = PyTuple_Pack(1, __pyx_kp_s_n_0_2); if (unlikely(!__pyx_tuple__136)) __PYX_ERR(0, 3897, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__136); + __Pyx_GIVEREF(__pyx_tuple__136); - /* "mtrand.pyx":3888 + /* "mtrand.pyx":3899 * raise ValueError("n <= 0") * if fp < 0: * raise ValueError("p < 0") # <<<<<<<<<<<<<< * elif fp > 1: * raise ValueError("p > 1") */ - __pyx_tuple__135 = PyTuple_Pack(1, __pyx_kp_s_p_0); if (unlikely(!__pyx_tuple__135)) __PYX_ERR(0, 3888, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__135); - __Pyx_GIVEREF(__pyx_tuple__135); + __pyx_tuple__137 = PyTuple_Pack(1, __pyx_kp_s_p_0); if (unlikely(!__pyx_tuple__137)) __PYX_ERR(0, 3899, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__137); + __Pyx_GIVEREF(__pyx_tuple__137); - /* "mtrand.pyx":3890 + /* "mtrand.pyx":3901 * raise ValueError("p < 0") * elif fp > 1: * raise ValueError("p > 1") # <<<<<<<<<<<<<< * return discdd_array_sc(self.internal_state, rk_negative_binomial, * size, fn, fp, self.lock) */ - __pyx_tuple__136 = PyTuple_Pack(1, __pyx_kp_s_p_1); if (unlikely(!__pyx_tuple__136)) __PYX_ERR(0, 3890, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__136); - __Pyx_GIVEREF(__pyx_tuple__136); + __pyx_tuple__138 = PyTuple_Pack(1, __pyx_kp_s_p_1); if (unlikely(!__pyx_tuple__138)) __PYX_ERR(0, 3901, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__138); + __Pyx_GIVEREF(__pyx_tuple__138); - /* "mtrand.pyx":3895 + /* "mtrand.pyx":3906 * * if np.any(np.less_equal(n, 0)): * raise ValueError("n <= 0") # <<<<<<<<<<<<<< * if np.any(np.less(p, 0)): * raise ValueError("p < 0") */ - __pyx_tuple__137 = PyTuple_Pack(1, __pyx_kp_s_n_0_2); if (unlikely(!__pyx_tuple__137)) __PYX_ERR(0, 3895, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__137); - __Pyx_GIVEREF(__pyx_tuple__137); + __pyx_tuple__139 = PyTuple_Pack(1, __pyx_kp_s_n_0_2); if (unlikely(!__pyx_tuple__139)) __PYX_ERR(0, 3906, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__139); + __Pyx_GIVEREF(__pyx_tuple__139); - /* "mtrand.pyx":3897 + /* "mtrand.pyx":3908 * raise ValueError("n <= 0") * if np.any(np.less(p, 0)): * raise ValueError("p < 0") # <<<<<<<<<<<<<< * if np.any(np.greater(p, 1)): * raise ValueError("p > 1") */ - __pyx_tuple__138 = PyTuple_Pack(1, __pyx_kp_s_p_0); if (unlikely(!__pyx_tuple__138)) __PYX_ERR(0, 3897, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__138); - __Pyx_GIVEREF(__pyx_tuple__138); + __pyx_tuple__140 = PyTuple_Pack(1, __pyx_kp_s_p_0); if (unlikely(!__pyx_tuple__140)) __PYX_ERR(0, 3908, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__140); + __Pyx_GIVEREF(__pyx_tuple__140); - /* "mtrand.pyx":3899 + /* "mtrand.pyx":3910 * raise ValueError("p < 0") * if np.any(np.greater(p, 1)): * raise ValueError("p > 1") # <<<<<<<<<<<<<< * return discdd_array(self.internal_state, rk_negative_binomial, size, * on, op, self.lock) */ - __pyx_tuple__139 = PyTuple_Pack(1, __pyx_kp_s_p_1); if (unlikely(!__pyx_tuple__139)) __PYX_ERR(0, 3899, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__139); - __Pyx_GIVEREF(__pyx_tuple__139); + __pyx_tuple__141 = PyTuple_Pack(1, __pyx_kp_s_p_1); if (unlikely(!__pyx_tuple__141)) __PYX_ERR(0, 3910, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__141); + __Pyx_GIVEREF(__pyx_tuple__141); - /* "mtrand.pyx":3978 + /* "mtrand.pyx":3989 * * if lam < 0: * raise ValueError("lam < 0") # <<<<<<<<<<<<<< * if lam > self.poisson_lam_max: * raise ValueError("lam value too large") */ - __pyx_tuple__140 = PyTuple_Pack(1, __pyx_kp_s_lam_0); if (unlikely(!__pyx_tuple__140)) __PYX_ERR(0, 3978, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__140); - __Pyx_GIVEREF(__pyx_tuple__140); + __pyx_tuple__142 = PyTuple_Pack(1, __pyx_kp_s_lam_0); if (unlikely(!__pyx_tuple__142)) __PYX_ERR(0, 3989, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__142); + __Pyx_GIVEREF(__pyx_tuple__142); - /* "mtrand.pyx":3980 + /* "mtrand.pyx":3991 * raise ValueError("lam < 0") * if lam > self.poisson_lam_max: * raise ValueError("lam value too large") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_poisson, size, flam, * self.lock) */ - __pyx_tuple__141 = PyTuple_Pack(1, __pyx_kp_s_lam_value_too_large); if (unlikely(!__pyx_tuple__141)) __PYX_ERR(0, 3980, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__141); - __Pyx_GIVEREF(__pyx_tuple__141); + __pyx_tuple__143 = PyTuple_Pack(1, __pyx_kp_s_lam_value_too_large); if (unlikely(!__pyx_tuple__143)) __PYX_ERR(0, 3991, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__143); + __Pyx_GIVEREF(__pyx_tuple__143); - /* "mtrand.pyx":3985 + /* "mtrand.pyx":3996 * * if np.any(np.less(olam, 0)): * raise ValueError("lam < 0") # <<<<<<<<<<<<<< * if np.any(np.greater(olam, self.poisson_lam_max)): * raise ValueError("lam value too large.") */ - __pyx_tuple__142 = PyTuple_Pack(1, __pyx_kp_s_lam_0); if (unlikely(!__pyx_tuple__142)) __PYX_ERR(0, 3985, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__142); - __Pyx_GIVEREF(__pyx_tuple__142); + __pyx_tuple__144 = PyTuple_Pack(1, __pyx_kp_s_lam_0); if (unlikely(!__pyx_tuple__144)) __PYX_ERR(0, 3996, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__144); + __Pyx_GIVEREF(__pyx_tuple__144); - /* "mtrand.pyx":3987 + /* "mtrand.pyx":3998 * raise ValueError("lam < 0") * if np.any(np.greater(olam, self.poisson_lam_max)): * raise ValueError("lam value too large.") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_poisson, size, olam, * self.lock) */ - __pyx_tuple__143 = PyTuple_Pack(1, __pyx_kp_s_lam_value_too_large_2); if (unlikely(!__pyx_tuple__143)) __PYX_ERR(0, 3987, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__143); - __Pyx_GIVEREF(__pyx_tuple__143); + __pyx_tuple__145 = PyTuple_Pack(1, __pyx_kp_s_lam_value_too_large_2); if (unlikely(!__pyx_tuple__145)) __PYX_ERR(0, 3998, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__145); + __Pyx_GIVEREF(__pyx_tuple__145); - /* "mtrand.pyx":4074 - * - * if fa <= 1.0: - * raise ValueError("a <= 1.0") # <<<<<<<<<<<<<< + /* "mtrand.pyx":4086 + * # use logic that ensures NaN is rejected. + * if not fa > 1.0: + * raise ValueError("'a' must be a valid float > 1.0") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_zipf, size, fa, * self.lock) */ - __pyx_tuple__144 = PyTuple_Pack(1, __pyx_kp_s_a_1_0); if (unlikely(!__pyx_tuple__144)) __PYX_ERR(0, 4074, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__144); - __Pyx_GIVEREF(__pyx_tuple__144); + __pyx_tuple__146 = PyTuple_Pack(1, __pyx_kp_s_a_must_be_a_valid_float_1_0); if (unlikely(!__pyx_tuple__146)) __PYX_ERR(0, 4086, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__146); + __Pyx_GIVEREF(__pyx_tuple__146); - /* "mtrand.pyx":4079 - * - * if np.any(np.less_equal(oa, 1.0)): - * raise ValueError("a <= 1.0") # <<<<<<<<<<<<<< + /* "mtrand.pyx":4092 + * # use logic that ensures NaN is rejected. + * if not np.all(np.greater(oa, 1.0)): + * raise ValueError("'a' must contain valid floats > 1.0") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_zipf, size, oa, self.lock) * */ - __pyx_tuple__145 = PyTuple_Pack(1, __pyx_kp_s_a_1_0); if (unlikely(!__pyx_tuple__145)) __PYX_ERR(0, 4079, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__145); - __Pyx_GIVEREF(__pyx_tuple__145); + __pyx_tuple__147 = PyTuple_Pack(1, __pyx_kp_s_a_must_contain_valid_floats_1_0); if (unlikely(!__pyx_tuple__147)) __PYX_ERR(0, 4092, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__147); + __Pyx_GIVEREF(__pyx_tuple__147); - /* "mtrand.pyx":4137 + /* "mtrand.pyx":4150 * * if fp < 0.0: * raise ValueError("p < 0.0") # <<<<<<<<<<<<<< * if fp > 1.0: * raise ValueError("p > 1.0") */ - __pyx_tuple__146 = PyTuple_Pack(1, __pyx_kp_s_p_0_0); if (unlikely(!__pyx_tuple__146)) __PYX_ERR(0, 4137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__146); - __Pyx_GIVEREF(__pyx_tuple__146); + __pyx_tuple__148 = PyTuple_Pack(1, __pyx_kp_s_p_0_0); if (unlikely(!__pyx_tuple__148)) __PYX_ERR(0, 4150, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__148); + __Pyx_GIVEREF(__pyx_tuple__148); - /* "mtrand.pyx":4139 + /* "mtrand.pyx":4152 * raise ValueError("p < 0.0") * if fp > 1.0: * raise ValueError("p > 1.0") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_geometric, size, fp, * self.lock) */ - __pyx_tuple__147 = PyTuple_Pack(1, __pyx_kp_s_p_1_0); if (unlikely(!__pyx_tuple__147)) __PYX_ERR(0, 4139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__147); - __Pyx_GIVEREF(__pyx_tuple__147); + __pyx_tuple__149 = PyTuple_Pack(1, __pyx_kp_s_p_1_0); if (unlikely(!__pyx_tuple__149)) __PYX_ERR(0, 4152, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__149); + __Pyx_GIVEREF(__pyx_tuple__149); - /* "mtrand.pyx":4144 + /* "mtrand.pyx":4157 * * if np.any(np.less(op, 0.0)): * raise ValueError("p < 0.0") # <<<<<<<<<<<<<< * if np.any(np.greater(op, 1.0)): * raise ValueError("p > 1.0") */ - __pyx_tuple__148 = PyTuple_Pack(1, __pyx_kp_s_p_0_0); if (unlikely(!__pyx_tuple__148)) __PYX_ERR(0, 4144, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__148); - __Pyx_GIVEREF(__pyx_tuple__148); + __pyx_tuple__150 = PyTuple_Pack(1, __pyx_kp_s_p_0_0); if (unlikely(!__pyx_tuple__150)) __PYX_ERR(0, 4157, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__150); + __Pyx_GIVEREF(__pyx_tuple__150); - /* "mtrand.pyx":4146 + /* "mtrand.pyx":4159 * raise ValueError("p < 0.0") * if np.any(np.greater(op, 1.0)): * raise ValueError("p > 1.0") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_geometric, size, op, * self.lock) */ - __pyx_tuple__149 = PyTuple_Pack(1, __pyx_kp_s_p_1_0); if (unlikely(!__pyx_tuple__149)) __PYX_ERR(0, 4146, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__149); - __Pyx_GIVEREF(__pyx_tuple__149); + __pyx_tuple__151 = PyTuple_Pack(1, __pyx_kp_s_p_1_0); if (unlikely(!__pyx_tuple__151)) __PYX_ERR(0, 4159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__151); + __Pyx_GIVEREF(__pyx_tuple__151); - /* "mtrand.pyx":4251 + /* "mtrand.pyx":4264 * * if lngood < 0: * raise ValueError("ngood < 0") # <<<<<<<<<<<<<< * if lnbad < 0: * raise ValueError("nbad < 0") */ - __pyx_tuple__150 = PyTuple_Pack(1, __pyx_kp_s_ngood_0); if (unlikely(!__pyx_tuple__150)) __PYX_ERR(0, 4251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__150); - __Pyx_GIVEREF(__pyx_tuple__150); + __pyx_tuple__152 = PyTuple_Pack(1, __pyx_kp_s_ngood_0); if (unlikely(!__pyx_tuple__152)) __PYX_ERR(0, 4264, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__152); + __Pyx_GIVEREF(__pyx_tuple__152); - /* "mtrand.pyx":4253 + /* "mtrand.pyx":4266 * raise ValueError("ngood < 0") * if lnbad < 0: * raise ValueError("nbad < 0") # <<<<<<<<<<<<<< * if lnsample < 1: * raise ValueError("nsample < 1") */ - __pyx_tuple__151 = PyTuple_Pack(1, __pyx_kp_s_nbad_0); if (unlikely(!__pyx_tuple__151)) __PYX_ERR(0, 4253, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__151); - __Pyx_GIVEREF(__pyx_tuple__151); + __pyx_tuple__153 = PyTuple_Pack(1, __pyx_kp_s_nbad_0); if (unlikely(!__pyx_tuple__153)) __PYX_ERR(0, 4266, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__153); + __Pyx_GIVEREF(__pyx_tuple__153); - /* "mtrand.pyx":4255 + /* "mtrand.pyx":4268 * raise ValueError("nbad < 0") * if lnsample < 1: * raise ValueError("nsample < 1") # <<<<<<<<<<<<<< * if lngood + lnbad < lnsample: * raise ValueError("ngood + nbad < nsample") */ - __pyx_tuple__152 = PyTuple_Pack(1, __pyx_kp_s_nsample_1); if (unlikely(!__pyx_tuple__152)) __PYX_ERR(0, 4255, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__152); - __Pyx_GIVEREF(__pyx_tuple__152); + __pyx_tuple__154 = PyTuple_Pack(1, __pyx_kp_s_nsample_1); if (unlikely(!__pyx_tuple__154)) __PYX_ERR(0, 4268, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__154); + __Pyx_GIVEREF(__pyx_tuple__154); - /* "mtrand.pyx":4257 + /* "mtrand.pyx":4270 * raise ValueError("nsample < 1") * if lngood + lnbad < lnsample: * raise ValueError("ngood + nbad < nsample") # <<<<<<<<<<<<<< * return discnmN_array_sc(self.internal_state, rk_hypergeometric, * size, lngood, lnbad, lnsample, self.lock) */ - __pyx_tuple__153 = PyTuple_Pack(1, __pyx_kp_s_ngood_nbad_nsample); if (unlikely(!__pyx_tuple__153)) __PYX_ERR(0, 4257, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__153); - __Pyx_GIVEREF(__pyx_tuple__153); + __pyx_tuple__155 = PyTuple_Pack(1, __pyx_kp_s_ngood_nbad_nsample); if (unlikely(!__pyx_tuple__155)) __PYX_ERR(0, 4270, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__155); + __Pyx_GIVEREF(__pyx_tuple__155); - /* "mtrand.pyx":4262 + /* "mtrand.pyx":4275 * * if np.any(np.less(ongood, 0)): * raise ValueError("ngood < 0") # <<<<<<<<<<<<<< * if np.any(np.less(onbad, 0)): * raise ValueError("nbad < 0") */ - __pyx_tuple__154 = PyTuple_Pack(1, __pyx_kp_s_ngood_0); if (unlikely(!__pyx_tuple__154)) __PYX_ERR(0, 4262, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__154); - __Pyx_GIVEREF(__pyx_tuple__154); + __pyx_tuple__156 = PyTuple_Pack(1, __pyx_kp_s_ngood_0); if (unlikely(!__pyx_tuple__156)) __PYX_ERR(0, 4275, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__156); + __Pyx_GIVEREF(__pyx_tuple__156); - /* "mtrand.pyx":4264 + /* "mtrand.pyx":4277 * raise ValueError("ngood < 0") * if np.any(np.less(onbad, 0)): * raise ValueError("nbad < 0") # <<<<<<<<<<<<<< * if np.any(np.less(onsample, 1)): * raise ValueError("nsample < 1") */ - __pyx_tuple__155 = PyTuple_Pack(1, __pyx_kp_s_nbad_0); if (unlikely(!__pyx_tuple__155)) __PYX_ERR(0, 4264, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__155); - __Pyx_GIVEREF(__pyx_tuple__155); + __pyx_tuple__157 = PyTuple_Pack(1, __pyx_kp_s_nbad_0); if (unlikely(!__pyx_tuple__157)) __PYX_ERR(0, 4277, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__157); + __Pyx_GIVEREF(__pyx_tuple__157); - /* "mtrand.pyx":4266 + /* "mtrand.pyx":4279 * raise ValueError("nbad < 0") * if np.any(np.less(onsample, 1)): * raise ValueError("nsample < 1") # <<<<<<<<<<<<<< * if np.any(np.less(np.add(ongood, onbad),onsample)): * raise ValueError("ngood + nbad < nsample") */ - __pyx_tuple__156 = PyTuple_Pack(1, __pyx_kp_s_nsample_1); if (unlikely(!__pyx_tuple__156)) __PYX_ERR(0, 4266, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__156); - __Pyx_GIVEREF(__pyx_tuple__156); + __pyx_tuple__158 = PyTuple_Pack(1, __pyx_kp_s_nsample_1); if (unlikely(!__pyx_tuple__158)) __PYX_ERR(0, 4279, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__158); + __Pyx_GIVEREF(__pyx_tuple__158); - /* "mtrand.pyx":4268 + /* "mtrand.pyx":4281 * raise ValueError("nsample < 1") * if np.any(np.less(np.add(ongood, onbad),onsample)): * raise ValueError("ngood + nbad < nsample") # <<<<<<<<<<<<<< * return discnmN_array(self.internal_state, rk_hypergeometric, size, * ongood, onbad, onsample, self.lock) */ - __pyx_tuple__157 = PyTuple_Pack(1, __pyx_kp_s_ngood_nbad_nsample); if (unlikely(!__pyx_tuple__157)) __PYX_ERR(0, 4268, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__157); - __Pyx_GIVEREF(__pyx_tuple__157); + __pyx_tuple__159 = PyTuple_Pack(1, __pyx_kp_s_ngood_nbad_nsample); if (unlikely(!__pyx_tuple__159)) __PYX_ERR(0, 4281, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__159); + __Pyx_GIVEREF(__pyx_tuple__159); - /* "mtrand.pyx":4355 + /* "mtrand.pyx":4368 * * if fp <= 0.0: * raise ValueError("p <= 0.0") # <<<<<<<<<<<<<< * if fp >= 1.0: * raise ValueError("p >= 1.0") */ - __pyx_tuple__158 = PyTuple_Pack(1, __pyx_kp_s_p_0_0_2); if (unlikely(!__pyx_tuple__158)) __PYX_ERR(0, 4355, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__158); - __Pyx_GIVEREF(__pyx_tuple__158); + __pyx_tuple__160 = PyTuple_Pack(1, __pyx_kp_s_p_0_0_2); if (unlikely(!__pyx_tuple__160)) __PYX_ERR(0, 4368, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__160); + __Pyx_GIVEREF(__pyx_tuple__160); - /* "mtrand.pyx":4357 + /* "mtrand.pyx":4370 * raise ValueError("p <= 0.0") * if fp >= 1.0: * raise ValueError("p >= 1.0") # <<<<<<<<<<<<<< * return discd_array_sc(self.internal_state, rk_logseries, size, fp, * self.lock) */ - __pyx_tuple__159 = PyTuple_Pack(1, __pyx_kp_s_p_1_0_2); if (unlikely(!__pyx_tuple__159)) __PYX_ERR(0, 4357, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__159); - __Pyx_GIVEREF(__pyx_tuple__159); + __pyx_tuple__161 = PyTuple_Pack(1, __pyx_kp_s_p_1_0_2); if (unlikely(!__pyx_tuple__161)) __PYX_ERR(0, 4370, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__161); + __Pyx_GIVEREF(__pyx_tuple__161); - /* "mtrand.pyx":4362 + /* "mtrand.pyx":4375 * * if np.any(np.less_equal(op, 0.0)): * raise ValueError("p <= 0.0") # <<<<<<<<<<<<<< * if np.any(np.greater_equal(op, 1.0)): * raise ValueError("p >= 1.0") */ - __pyx_tuple__160 = PyTuple_Pack(1, __pyx_kp_s_p_0_0_2); if (unlikely(!__pyx_tuple__160)) __PYX_ERR(0, 4362, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__160); - __Pyx_GIVEREF(__pyx_tuple__160); + __pyx_tuple__162 = PyTuple_Pack(1, __pyx_kp_s_p_0_0_2); if (unlikely(!__pyx_tuple__162)) __PYX_ERR(0, 4375, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__162); + __Pyx_GIVEREF(__pyx_tuple__162); - /* "mtrand.pyx":4364 + /* "mtrand.pyx":4377 * raise ValueError("p <= 0.0") * if np.any(np.greater_equal(op, 1.0)): * raise ValueError("p >= 1.0") # <<<<<<<<<<<<<< * return discd_array(self.internal_state, rk_logseries, size, op, * self.lock) */ - __pyx_tuple__161 = PyTuple_Pack(1, __pyx_kp_s_p_1_0_2); if (unlikely(!__pyx_tuple__161)) __PYX_ERR(0, 4364, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__161); - __Pyx_GIVEREF(__pyx_tuple__161); + __pyx_tuple__163 = PyTuple_Pack(1, __pyx_kp_s_p_1_0_2); if (unlikely(!__pyx_tuple__163)) __PYX_ERR(0, 4377, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__163); + __Pyx_GIVEREF(__pyx_tuple__163); - /* "mtrand.pyx":4483 + /* "mtrand.pyx":4496 * * if len(mean.shape) != 1: * raise ValueError("mean must be 1 dimensional") # <<<<<<<<<<<<<< * if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]): * raise ValueError("cov must be 2 dimensional and square") */ - __pyx_tuple__162 = PyTuple_Pack(1, __pyx_kp_s_mean_must_be_1_dimensional); if (unlikely(!__pyx_tuple__162)) __PYX_ERR(0, 4483, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__162); - __Pyx_GIVEREF(__pyx_tuple__162); + __pyx_tuple__164 = PyTuple_Pack(1, __pyx_kp_s_mean_must_be_1_dimensional); if (unlikely(!__pyx_tuple__164)) __PYX_ERR(0, 4496, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__164); + __Pyx_GIVEREF(__pyx_tuple__164); - /* "mtrand.pyx":4485 + /* "mtrand.pyx":4498 * raise ValueError("mean must be 1 dimensional") * if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]): * raise ValueError("cov must be 2 dimensional and square") # <<<<<<<<<<<<<< * if mean.shape[0] != cov.shape[0]: * raise ValueError("mean and cov must have same length") */ - __pyx_tuple__163 = PyTuple_Pack(1, __pyx_kp_s_cov_must_be_2_dimensional_and_sq); if (unlikely(!__pyx_tuple__163)) __PYX_ERR(0, 4485, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__163); - __Pyx_GIVEREF(__pyx_tuple__163); + __pyx_tuple__165 = PyTuple_Pack(1, __pyx_kp_s_cov_must_be_2_dimensional_and_sq); if (unlikely(!__pyx_tuple__165)) __PYX_ERR(0, 4498, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__165); + __Pyx_GIVEREF(__pyx_tuple__165); - /* "mtrand.pyx":4487 + /* "mtrand.pyx":4500 * raise ValueError("cov must be 2 dimensional and square") * if mean.shape[0] != cov.shape[0]: * raise ValueError("mean and cov must have same length") # <<<<<<<<<<<<<< * * # Compute shape of output and create a matrix of independent */ - __pyx_tuple__164 = PyTuple_Pack(1, __pyx_kp_s_mean_and_cov_must_have_same_leng); if (unlikely(!__pyx_tuple__164)) __PYX_ERR(0, 4487, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__164); - __Pyx_GIVEREF(__pyx_tuple__164); + __pyx_tuple__166 = PyTuple_Pack(1, __pyx_kp_s_mean_and_cov_must_have_same_leng); if (unlikely(!__pyx_tuple__166)) __PYX_ERR(0, 4500, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__166); + __Pyx_GIVEREF(__pyx_tuple__166); - /* "mtrand.pyx":4493 + /* "mtrand.pyx":4506 * # with the same length as mean and as many rows are necessary to * # form a matrix of shape final_shape. * final_shape = list(shape[:]) # <<<<<<<<<<<<<< * final_shape.append(mean.shape[0]) * x = self.standard_normal(final_shape).reshape(-1, mean.shape[0]) */ - __pyx_slice__165 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__165)) __PYX_ERR(0, 4493, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__165); - __Pyx_GIVEREF(__pyx_slice__165); + __pyx_slice__167 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__167)) __PYX_ERR(0, 4506, __pyx_L1_error) + __Pyx_GOTREF(__pyx_slice__167); + __Pyx_GIVEREF(__pyx_slice__167); - /* "mtrand.pyx":4515 + /* "mtrand.pyx":4528 * if check_valid != 'ignore': * if check_valid != 'warn' and check_valid != 'raise': * raise ValueError("check_valid must equal 'warn', 'raise', or 'ignore'") # <<<<<<<<<<<<<< * * psd = np.allclose(np.dot(v.T * s, v), cov, rtol=tol, atol=tol) */ - __pyx_tuple__166 = PyTuple_Pack(1, __pyx_kp_s_check_valid_must_equal_warn_rais); if (unlikely(!__pyx_tuple__166)) __PYX_ERR(0, 4515, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__166); - __Pyx_GIVEREF(__pyx_tuple__166); + __pyx_tuple__168 = PyTuple_Pack(1, __pyx_kp_s_check_valid_must_equal_warn_rais); if (unlikely(!__pyx_tuple__168)) __PYX_ERR(0, 4528, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__168); + __Pyx_GIVEREF(__pyx_tuple__168); - /* "mtrand.pyx":4523 + /* "mtrand.pyx":4533 + * if not psd: + * if check_valid == 'warn': + * warnings.warn("covariance is not positive-semidefinite.", # <<<<<<<<<<<<<< + * RuntimeWarning) + * else: + */ + __pyx_tuple__169 = PyTuple_Pack(2, __pyx_kp_s_covariance_is_not_positive_semid, __pyx_builtin_RuntimeWarning); if (unlikely(!__pyx_tuple__169)) __PYX_ERR(0, 4533, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__169); + __Pyx_GIVEREF(__pyx_tuple__169); + + /* "mtrand.pyx":4536 * RuntimeWarning) * else: * raise ValueError("covariance is not positive-semidefinite.") # <<<<<<<<<<<<<< * * x = np.dot(x, np.sqrt(s)[:, None] * v) */ - __pyx_tuple__167 = PyTuple_Pack(1, __pyx_kp_s_covariance_is_not_positive_semid); if (unlikely(!__pyx_tuple__167)) __PYX_ERR(0, 4523, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__167); - __Pyx_GIVEREF(__pyx_tuple__167); + __pyx_tuple__170 = PyTuple_Pack(1, __pyx_kp_s_covariance_is_not_positive_semid); if (unlikely(!__pyx_tuple__170)) __PYX_ERR(0, 4536, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__170); + __Pyx_GIVEREF(__pyx_tuple__170); - /* "mtrand.pyx":4525 + /* "mtrand.pyx":4538 * raise ValueError("covariance is not positive-semidefinite.") * * x = np.dot(x, np.sqrt(s)[:, None] * v) # <<<<<<<<<<<<<< * x += mean * x.shape = tuple(final_shape) */ - __pyx_slice__168 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__168)) __PYX_ERR(0, 4525, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__168); - __Pyx_GIVEREF(__pyx_slice__168); - __pyx_tuple__169 = PyTuple_Pack(2, __pyx_slice__168, Py_None); if (unlikely(!__pyx_tuple__169)) __PYX_ERR(0, 4525, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__169); - __Pyx_GIVEREF(__pyx_tuple__169); + __pyx_slice__171 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__171)) __PYX_ERR(0, 4538, __pyx_L1_error) + __Pyx_GOTREF(__pyx_slice__171); + __Pyx_GIVEREF(__pyx_slice__171); + __pyx_tuple__172 = PyTuple_Pack(2, __pyx_slice__171, Py_None); if (unlikely(!__pyx_tuple__172)) __PYX_ERR(0, 4538, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__172); + __Pyx_GIVEREF(__pyx_tuple__172); - /* "mtrand.pyx":4617 + /* "mtrand.pyx":4630 * * if kahan_sum(pix, d-1) > (1.0 + 1e-12): * raise ValueError("sum(pvals[:-1]) > 1.0") # <<<<<<<<<<<<<< * * shape = _shape_from_size(size, d) */ - __pyx_tuple__170 = PyTuple_Pack(1, __pyx_kp_s_sum_pvals_1_1_0); if (unlikely(!__pyx_tuple__170)) __PYX_ERR(0, 4617, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__170); - __Pyx_GIVEREF(__pyx_tuple__170); + __pyx_tuple__173 = PyTuple_Pack(1, __pyx_kp_s_sum_pvals_1_1_0); if (unlikely(!__pyx_tuple__173)) __PYX_ERR(0, 4630, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__173); + __Pyx_GIVEREF(__pyx_tuple__173); - /* "mtrand.pyx":4625 + /* "mtrand.pyx":4638 * mnix = PyArray_DATA(mnarr) * sz = PyArray_SIZE(mnarr) * with self.lock, nogil, cython.cdivision(True): # <<<<<<<<<<<<<< * i = 0 * while i < sz: */ - __pyx_tuple__171 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__171)) __PYX_ERR(0, 4625, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__171); - __Pyx_GIVEREF(__pyx_tuple__171); + __pyx_tuple__174 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__174)) __PYX_ERR(0, 4638, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__174); + __Pyx_GIVEREF(__pyx_tuple__174); + + /* "mtrand.pyx":4753 + * alpha_arr = PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1) + * if np.any(np.less_equal(alpha_arr, 0)): + * raise ValueError('alpha <= 0') # <<<<<<<<<<<<<< + * alpha_data = PyArray_DATA(alpha_arr) + * + */ + __pyx_tuple__175 = PyTuple_Pack(1, __pyx_kp_s_alpha_0); if (unlikely(!__pyx_tuple__175)) __PYX_ERR(0, 4753, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__175); + __Pyx_GIVEREF(__pyx_tuple__175); - /* "mtrand.pyx":4744 + /* "mtrand.pyx":4764 * i = 0 * totsize = PyArray_SIZE(val_arr) * with self.lock, nogil: # <<<<<<<<<<<<<< * while i < totsize: * acc = 0.0 */ - __pyx_tuple__172 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__172)) __PYX_ERR(0, 4744, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__172); - __Pyx_GIVEREF(__pyx_tuple__172); + __pyx_tuple__176 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__176)) __PYX_ERR(0, 4764, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__176); + __Pyx_GIVEREF(__pyx_tuple__176); - /* "mtrand.pyx":4813 + /* "mtrand.pyx":4833 * buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit * buf_ptr = buf.ctypes.data * with self.lock: # <<<<<<<<<<<<<< * # We trick gcc into providing a specialized implementation for * # the most common case, yielding a ~33% performance improvement. */ - __pyx_tuple__173 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__173)) __PYX_ERR(0, 4813, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__173); - __Pyx_GIVEREF(__pyx_tuple__173); + __pyx_tuple__177 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__177)) __PYX_ERR(0, 4833, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__177); + __Pyx_GIVEREF(__pyx_tuple__177); - /* "mtrand.pyx":4824 + /* "mtrand.pyx":4844 * # Multidimensional ndarrays require a bounce buffer. * buf = np.empty_like(x[0]) * with self.lock: # <<<<<<<<<<<<<< * for i in reversed(range(1, n)): * j = rk_interval(i, self.internal_state) */ - __pyx_tuple__174 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__174)) __PYX_ERR(0, 4824, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__174); - __Pyx_GIVEREF(__pyx_tuple__174); + __pyx_tuple__178 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__178)) __PYX_ERR(0, 4844, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__178); + __Pyx_GIVEREF(__pyx_tuple__178); - /* "mtrand.pyx":4832 + /* "mtrand.pyx":4852 * else: * # Untyped path. * with self.lock: # <<<<<<<<<<<<<< * for i in reversed(range(1, n)): * j = rk_interval(i, self.internal_state) */ - __pyx_tuple__175 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__175)) __PYX_ERR(0, 4832, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__175); - __Pyx_GIVEREF(__pyx_tuple__175); + __pyx_tuple__179 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__179)) __PYX_ERR(0, 4852, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__179); + __Pyx_GIVEREF(__pyx_tuple__179); /* "randint_helpers.pxi":5 * """ @@ -43112,10 +43426,10 @@ * """ * _rand_bool(low, high, size, rngstate) */ - __pyx_tuple__176 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__176)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__176); - __Pyx_GIVEREF(__pyx_tuple__176); - __pyx_codeobj__177 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__176, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_bool, 5, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__177)) __PYX_ERR(1, 5, __pyx_L1_error) + __pyx_tuple__180 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__180)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__180); + __Pyx_GIVEREF(__pyx_tuple__180); + __pyx_codeobj__181 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__180, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_bool, 5, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__181)) __PYX_ERR(1, 5, __pyx_L1_error) /* "randint_helpers.pxi":56 * return array @@ -43124,10 +43438,10 @@ * """ * _rand_int8(low, high, size, rngstate) */ - __pyx_tuple__178 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__178)) __PYX_ERR(1, 56, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__178); - __Pyx_GIVEREF(__pyx_tuple__178); - __pyx_codeobj__179 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__178, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_int8, 56, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__179)) __PYX_ERR(1, 56, __pyx_L1_error) + __pyx_tuple__182 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__182)) __PYX_ERR(1, 56, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__182); + __Pyx_GIVEREF(__pyx_tuple__182); + __pyx_codeobj__183 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__182, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_int8, 56, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__183)) __PYX_ERR(1, 56, __pyx_L1_error) /* "randint_helpers.pxi":107 * return array @@ -43136,10 +43450,10 @@ * """ * _rand_int16(low, high, size, rngstate) */ - __pyx_tuple__180 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__180)) __PYX_ERR(1, 107, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__180); - __Pyx_GIVEREF(__pyx_tuple__180); - __pyx_codeobj__181 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__180, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_int16, 107, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__181)) __PYX_ERR(1, 107, __pyx_L1_error) + __pyx_tuple__184 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__184)) __PYX_ERR(1, 107, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__184); + __Pyx_GIVEREF(__pyx_tuple__184); + __pyx_codeobj__185 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__184, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_int16, 107, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__185)) __PYX_ERR(1, 107, __pyx_L1_error) /* "randint_helpers.pxi":158 * return array @@ -43148,10 +43462,10 @@ * """ * _rand_int32(low, high, size, rngstate) */ - __pyx_tuple__182 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__182)) __PYX_ERR(1, 158, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__182); - __Pyx_GIVEREF(__pyx_tuple__182); - __pyx_codeobj__183 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__182, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_int32, 158, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__183)) __PYX_ERR(1, 158, __pyx_L1_error) + __pyx_tuple__186 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__186)) __PYX_ERR(1, 158, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__186); + __Pyx_GIVEREF(__pyx_tuple__186); + __pyx_codeobj__187 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__186, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_int32, 158, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__187)) __PYX_ERR(1, 158, __pyx_L1_error) /* "randint_helpers.pxi":209 * return array @@ -43160,10 +43474,10 @@ * """ * _rand_int64(low, high, size, rngstate) */ - __pyx_tuple__184 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__184)) __PYX_ERR(1, 209, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__184); - __Pyx_GIVEREF(__pyx_tuple__184); - __pyx_codeobj__185 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__184, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_int64, 209, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__185)) __PYX_ERR(1, 209, __pyx_L1_error) + __pyx_tuple__188 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__188)) __PYX_ERR(1, 209, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__188); + __Pyx_GIVEREF(__pyx_tuple__188); + __pyx_codeobj__189 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__188, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_int64, 209, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__189)) __PYX_ERR(1, 209, __pyx_L1_error) /* "randint_helpers.pxi":260 * return array @@ -43172,10 +43486,10 @@ * """ * _rand_uint8(low, high, size, rngstate) */ - __pyx_tuple__186 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__186)) __PYX_ERR(1, 260, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__186); - __Pyx_GIVEREF(__pyx_tuple__186); - __pyx_codeobj__187 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__186, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_uint8, 260, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__187)) __PYX_ERR(1, 260, __pyx_L1_error) + __pyx_tuple__190 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__190)) __PYX_ERR(1, 260, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__190); + __Pyx_GIVEREF(__pyx_tuple__190); + __pyx_codeobj__191 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__190, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_uint8, 260, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__191)) __PYX_ERR(1, 260, __pyx_L1_error) /* "randint_helpers.pxi":311 * return array @@ -43184,10 +43498,10 @@ * """ * _rand_uint16(low, high, size, rngstate) */ - __pyx_tuple__188 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__188)) __PYX_ERR(1, 311, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__188); - __Pyx_GIVEREF(__pyx_tuple__188); - __pyx_codeobj__189 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__188, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_uint16, 311, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__189)) __PYX_ERR(1, 311, __pyx_L1_error) + __pyx_tuple__192 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__192)) __PYX_ERR(1, 311, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__192); + __Pyx_GIVEREF(__pyx_tuple__192); + __pyx_codeobj__193 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__192, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_uint16, 311, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__193)) __PYX_ERR(1, 311, __pyx_L1_error) /* "randint_helpers.pxi":362 * return array @@ -43196,10 +43510,10 @@ * """ * _rand_uint32(low, high, size, rngstate) */ - __pyx_tuple__190 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__190)) __PYX_ERR(1, 362, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__190); - __Pyx_GIVEREF(__pyx_tuple__190); - __pyx_codeobj__191 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__190, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_uint32, 362, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__191)) __PYX_ERR(1, 362, __pyx_L1_error) + __pyx_tuple__194 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__194)) __PYX_ERR(1, 362, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__194); + __Pyx_GIVEREF(__pyx_tuple__194); + __pyx_codeobj__195 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__194, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_uint32, 362, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__195)) __PYX_ERR(1, 362, __pyx_L1_error) /* "randint_helpers.pxi":413 * return array @@ -43208,10 +43522,10 @@ * """ * _rand_uint64(low, high, size, rngstate) */ - __pyx_tuple__192 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__192)) __PYX_ERR(1, 413, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__192); - __Pyx_GIVEREF(__pyx_tuple__192); - __pyx_codeobj__193 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__192, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_uint64, 413, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__193)) __PYX_ERR(1, 413, __pyx_L1_error) + __pyx_tuple__196 = PyTuple_Pack(12, __pyx_n_s_low, __pyx_n_s_high, __pyx_n_s_size, __pyx_n_s_rngstate, __pyx_n_s_off, __pyx_n_s_rng, __pyx_n_s_buf, __pyx_n_s_out, __pyx_n_s_array, __pyx_n_s_cnt, __pyx_n_s_state, __pyx_n_s_array_data); if (unlikely(!__pyx_tuple__196)) __PYX_ERR(1, 413, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__196); + __Pyx_GIVEREF(__pyx_tuple__196); + __pyx_codeobj__197 = (PyObject*)__Pyx_PyCode_New(4, 0, 12, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__196, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_randint_helpers_pxi, __pyx_n_s_rand_uint64, 413, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__197)) __PYX_ERR(1, 413, __pyx_L1_error) /* "mtrand.pyx":566 * return sum @@ -43220,10 +43534,10 @@ * if size is None: * shape = (d,) */ - __pyx_tuple__194 = PyTuple_Pack(3, __pyx_n_s_size, __pyx_n_s_d, __pyx_n_s_shape); if (unlikely(!__pyx_tuple__194)) __PYX_ERR(0, 566, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__194); - __Pyx_GIVEREF(__pyx_tuple__194); - __pyx_codeobj__195 = (PyObject*)__Pyx_PyCode_New(2, 0, 3, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__194, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_mtrand_pyx, __pyx_n_s_shape_from_size, 566, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__195)) __PYX_ERR(0, 566, __pyx_L1_error) + __pyx_tuple__198 = PyTuple_Pack(3, __pyx_n_s_size, __pyx_n_s_d, __pyx_n_s_shape); if (unlikely(!__pyx_tuple__198)) __PYX_ERR(0, 566, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__198); + __Pyx_GIVEREF(__pyx_tuple__198); + __pyx_codeobj__199 = (PyObject*)__Pyx_PyCode_New(2, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__198, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_mtrand_pyx, __pyx_n_s_shape_from_size, 566, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__199)) __PYX_ERR(0, 566, __pyx_L1_error) /* "mtrand.pyx":638 * cdef object lock @@ -43232,12 +43546,12 @@ * * def __init__(self, seed=None): */ - __pyx_tuple__196 = PyTuple_Pack(1, __pyx_n_s_l); if (unlikely(!__pyx_tuple__196)) __PYX_ERR(0, 638, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__196); - __Pyx_GIVEREF(__pyx_tuple__196); - __pyx_tuple__197 = PyTuple_Pack(1, __pyx_n_s_l); if (unlikely(!__pyx_tuple__197)) __PYX_ERR(0, 638, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__197); - __Pyx_GIVEREF(__pyx_tuple__197); + __pyx_tuple__200 = PyTuple_Pack(1, __pyx_n_s_l); if (unlikely(!__pyx_tuple__200)) __PYX_ERR(0, 638, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__200); + __Pyx_GIVEREF(__pyx_tuple__200); + __pyx_tuple__201 = PyTuple_Pack(1, __pyx_n_s_l); if (unlikely(!__pyx_tuple__201)) __PYX_ERR(0, 638, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__201); + __Pyx_GIVEREF(__pyx_tuple__201); __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; @@ -43262,7 +43576,6 @@ __pyx_int_32768 = PyInt_FromLong(32768L); if (unlikely(!__pyx_int_32768)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_65536 = PyInt_FromLong(65536L); if (unlikely(!__pyx_int_65536)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_2147483648 = PyInt_FromString((char *)"2147483648", 0, 0); if (unlikely(!__pyx_int_2147483648)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_4294967295 = PyInt_FromString((char *)"4294967295", 0, 0); if (unlikely(!__pyx_int_4294967295)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_4294967296 = PyInt_FromString((char *)"4294967296", 0, 0); if (unlikely(!__pyx_int_4294967296)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_9223372036854775808 = PyInt_FromString((char *)"9223372036854775808", 0, 0); if (unlikely(!__pyx_int_9223372036854775808)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_18446744073709551616 = PyInt_FromString((char *)"18446744073709551616", 0, 0); if (unlikely(!__pyx_int_18446744073709551616)) __PYX_ERR(0, 1, __pyx_L1_error) @@ -43276,12 +43589,169 @@ return -1; } +static int __Pyx_modinit_global_init_code(void); /*proto*/ +static int __Pyx_modinit_variable_export_code(void); /*proto*/ +static int __Pyx_modinit_function_export_code(void); /*proto*/ +static int __Pyx_modinit_type_init_code(void); /*proto*/ +static int __Pyx_modinit_type_import_code(void); /*proto*/ +static int __Pyx_modinit_variable_import_code(void); /*proto*/ +static int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + __pyx_ptype_6mtrand_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_6mtrand_dtype)) __PYX_ERR(2, 87, __pyx_L1_error) + __pyx_ptype_6mtrand_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_6mtrand_ndarray)) __PYX_ERR(2, 89, __pyx_L1_error) + __pyx_ptype_6mtrand_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_6mtrand_flatiter)) __PYX_ERR(2, 91, __pyx_L1_error) + __pyx_ptype_6mtrand_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_6mtrand_broadcast)) __PYX_ERR(2, 97, __pyx_L1_error) + __pyx_vtabptr_6mtrand_RandomState = &__pyx_vtable_6mtrand_RandomState; + __pyx_vtable_6mtrand_RandomState._shuffle_raw = (PyObject *(*)(struct __pyx_obj_6mtrand_RandomState *, npy_intp, npy_intp, npy_intp, char *, char *))__pyx_f_6mtrand_11RandomState__shuffle_raw; + if (PyType_Ready(&__pyx_type_6mtrand_RandomState) < 0) __PYX_ERR(0, 593, __pyx_L1_error) + __pyx_type_6mtrand_RandomState.tp_print = 0; + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_6mtrand_RandomState.tp_dictoffset && __pyx_type_6mtrand_RandomState.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type_6mtrand_RandomState.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_SetVtable(__pyx_type_6mtrand_RandomState.tp_dict, __pyx_vtabptr_6mtrand_RandomState) < 0) __PYX_ERR(0, 593, __pyx_L1_error) + if (PyObject_SetAttrString(__pyx_m, "RandomState", (PyObject *)&__pyx_type_6mtrand_RandomState) < 0) __PYX_ERR(0, 593, __pyx_L1_error) + __pyx_ptype_6mtrand_RandomState = &__pyx_type_6mtrand_RandomState; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", + #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 + sizeof(PyTypeObject), + #else + sizeof(PyHeapTypeObject), + #endif + 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(3, 9, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#if PY_MAJOR_VERSION < 3 +#ifdef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC void +#else +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#endif +#else +#ifdef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#endif +#endif +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) + #define CYTHON_SMALL_CODE __attribute__((optimize("Os"))) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + + #if PY_MAJOR_VERSION < 3 -PyMODINIT_FUNC initmtrand(void); /*proto*/ -PyMODINIT_FUNC initmtrand(void) +__Pyx_PyMODINIT_FUNC initmtrand(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC initmtrand(void) #else -PyMODINIT_FUNC PyInit_mtrand(void); /*proto*/ -PyMODINIT_FUNC PyInit_mtrand(void) +__Pyx_PyMODINIT_FUNC PyInit_mtrand(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit_mtrand(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + result = PyDict_SetItemString(moddict, to_name, value); + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static int __pyx_pymod_exec_mtrand(PyObject *__pyx_pyinit_module) +#endif #endif { PyObject *__pyx_t_1 = NULL; @@ -43295,16 +43765,21 @@ PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; __Pyx_RefNannyDeclarations - #if CYTHON_REFNANNY - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); - if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); - } + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif - __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_mtrand(void)", 0); + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_mtrand(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) @@ -43321,6 +43796,9 @@ #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif @@ -43332,12 +43810,17 @@ #endif #endif /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("mtrand", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) @@ -43366,31 +43849,14 @@ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Global init code ---*/ - /*--- Variable export code ---*/ - /*--- Function export code ---*/ - /*--- Type init code ---*/ - __pyx_ptype_6mtrand_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_6mtrand_dtype)) __PYX_ERR(2, 87, __pyx_L1_error) - __pyx_ptype_6mtrand_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_6mtrand_ndarray)) __PYX_ERR(2, 89, __pyx_L1_error) - __pyx_ptype_6mtrand_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_6mtrand_flatiter)) __PYX_ERR(2, 91, __pyx_L1_error) - __pyx_ptype_6mtrand_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_6mtrand_broadcast)) __PYX_ERR(2, 97, __pyx_L1_error) - __pyx_vtabptr_6mtrand_RandomState = &__pyx_vtable_6mtrand_RandomState; - __pyx_vtable_6mtrand_RandomState._shuffle_raw = (PyObject *(*)(struct __pyx_obj_6mtrand_RandomState *, npy_intp, npy_intp, npy_intp, char *, char *))__pyx_f_6mtrand_11RandomState__shuffle_raw; - if (PyType_Ready(&__pyx_type_6mtrand_RandomState) < 0) __PYX_ERR(0, 593, __pyx_L1_error) - __pyx_type_6mtrand_RandomState.tp_print = 0; - if (__Pyx_SetVtable(__pyx_type_6mtrand_RandomState.tp_dict, __pyx_vtabptr_6mtrand_RandomState) < 0) __PYX_ERR(0, 593, __pyx_L1_error) - if (PyObject_SetAttrString(__pyx_m, "RandomState", (PyObject *)&__pyx_type_6mtrand_RandomState) < 0) __PYX_ERR(0, 593, __pyx_L1_error) - __pyx_ptype_6mtrand_RandomState = &__pyx_type_6mtrand_RandomState; - /*--- Type import code ---*/ - __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", - #if CYTHON_COMPILING_IN_PYPY - sizeof(PyTypeObject), - #else - sizeof(PyHeapTypeObject), - #endif - 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(3, 9, __pyx_L1_error) - /*--- Variable import code ---*/ - /*--- Function import code ---*/ + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + if (unlikely(__Pyx_modinit_type_init_code() != 0)) goto __pyx_L1_error; + if (unlikely(__Pyx_modinit_type_import_code() != 0)) goto __pyx_L1_error; + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) @@ -43511,7 +43977,7 @@ * * cimport cython */ - __pyx_t_2 = __pyx_f_6mtrand_import_array(); if (unlikely(__pyx_t_2 == -1)) __PYX_ERR(0, 143, __pyx_L1_error) + __pyx_t_2 = __pyx_f_6mtrand_import_array(); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 143, __pyx_L1_error) /* "mtrand.pyx":146 * @@ -43599,7 +44065,6 @@ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L7_try_end; __pyx_L2_error:; - __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; @@ -43653,14 +44118,12 @@ * from threading import Lock * except ImportError: */ - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L3_exception_handled:; - __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); @@ -43687,7 +44150,7 @@ * 'int8': (-2**7, 2**7, _rand_int8), * 'int16': (-2**15, 2**15, _rand_int16), */ - __pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 581, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyDict_NewPresized(9); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 581, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_bool); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 581, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); @@ -43903,7 +44366,7 @@ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_iinfo); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 638, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_tuple__196, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 638, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_tuple__200, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 638, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_max); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 638, __pyx_L1_error) @@ -43919,7 +44382,7 @@ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_iinfo); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 638, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_tuple__197, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 638, __pyx_L1_error) + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_tuple__201, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 638, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_max); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 638, __pyx_L1_error) @@ -43982,7 +44445,7 @@ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; PyType_Modified(__pyx_ptype_6mtrand_RandomState); - /* "mtrand.pyx":905 + /* "mtrand.pyx":910 * return disc0_array(self.internal_state, rk_long, size, self.lock) * * def randint(self, low, high=None, size=None, dtype=int): # <<<<<<<<<<<<<< @@ -43990,721 +44453,721 @@ * randint(low, high=None, size=None, dtype='l') */ __Pyx_INCREF(((PyObject *)(&PyInt_Type))); - __pyx_k__48 = ((PyObject *)(&PyInt_Type)); + __pyx_k__50 = ((PyObject *)(&PyInt_Type)); __Pyx_GIVEREF((&PyInt_Type)); - /* "mtrand.pyx":4890 + /* "mtrand.pyx":4910 * return arr * * _rand = RandomState() # <<<<<<<<<<<<<< * seed = _rand.seed * get_state = _rand.get_state */ - __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_ptype_6mtrand_RandomState), __pyx_empty_tuple, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4890, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_CallNoArg(((PyObject *)__pyx_ptype_6mtrand_RandomState)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4910, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_rand_2, __pyx_t_7) < 0) __PYX_ERR(0, 4890, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_rand_2, __pyx_t_7) < 0) __PYX_ERR(0, 4910, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4891 + /* "mtrand.pyx":4911 * * _rand = RandomState() * seed = _rand.seed # <<<<<<<<<<<<<< * get_state = _rand.get_state * set_state = _rand.set_state */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4891, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4911, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_seed); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4891, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_seed); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4911, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_seed, __pyx_t_9) < 0) __PYX_ERR(0, 4891, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_seed, __pyx_t_9) < 0) __PYX_ERR(0, 4911, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4892 + /* "mtrand.pyx":4912 * _rand = RandomState() * seed = _rand.seed * get_state = _rand.get_state # <<<<<<<<<<<<<< * set_state = _rand.set_state * random_sample = _rand.random_sample */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4892, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4912, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_get_state); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4892, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_get_state); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4912, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_get_state, __pyx_t_7) < 0) __PYX_ERR(0, 4892, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_get_state, __pyx_t_7) < 0) __PYX_ERR(0, 4912, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4893 + /* "mtrand.pyx":4913 * seed = _rand.seed * get_state = _rand.get_state * set_state = _rand.set_state # <<<<<<<<<<<<<< * random_sample = _rand.random_sample * choice = _rand.choice */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4893, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4913, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_set_state); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4893, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_set_state); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4913, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_set_state, __pyx_t_9) < 0) __PYX_ERR(0, 4893, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_set_state, __pyx_t_9) < 0) __PYX_ERR(0, 4913, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4894 + /* "mtrand.pyx":4914 * get_state = _rand.get_state * set_state = _rand.set_state * random_sample = _rand.random_sample # <<<<<<<<<<<<<< * choice = _rand.choice * randint = _rand.randint */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4894, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_random_sample); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4894, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_random_sample); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_random_sample, __pyx_t_7) < 0) __PYX_ERR(0, 4894, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_random_sample, __pyx_t_7) < 0) __PYX_ERR(0, 4914, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4895 + /* "mtrand.pyx":4915 * set_state = _rand.set_state * random_sample = _rand.random_sample * choice = _rand.choice # <<<<<<<<<<<<<< * randint = _rand.randint * bytes = _rand.bytes */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4895, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4915, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_choice); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4895, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_choice); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4915, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_choice, __pyx_t_9) < 0) __PYX_ERR(0, 4895, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_choice, __pyx_t_9) < 0) __PYX_ERR(0, 4915, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4896 + /* "mtrand.pyx":4916 * random_sample = _rand.random_sample * choice = _rand.choice * randint = _rand.randint # <<<<<<<<<<<<<< * bytes = _rand.bytes * uniform = _rand.uniform */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4896, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_randint); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4896, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_randint); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_randint, __pyx_t_7) < 0) __PYX_ERR(0, 4896, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_randint, __pyx_t_7) < 0) __PYX_ERR(0, 4916, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4897 + /* "mtrand.pyx":4917 * choice = _rand.choice * randint = _rand.randint * bytes = _rand.bytes # <<<<<<<<<<<<<< * uniform = _rand.uniform * rand = _rand.rand */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4897, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_bytes); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4897, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_bytes); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_bytes, __pyx_t_9) < 0) __PYX_ERR(0, 4897, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_bytes, __pyx_t_9) < 0) __PYX_ERR(0, 4917, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4898 + /* "mtrand.pyx":4918 * randint = _rand.randint * bytes = _rand.bytes * uniform = _rand.uniform # <<<<<<<<<<<<<< * rand = _rand.rand * randn = _rand.randn */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4898, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4918, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_uniform); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4898, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_uniform); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4918, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_uniform, __pyx_t_7) < 0) __PYX_ERR(0, 4898, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_uniform, __pyx_t_7) < 0) __PYX_ERR(0, 4918, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4899 + /* "mtrand.pyx":4919 * bytes = _rand.bytes * uniform = _rand.uniform * rand = _rand.rand # <<<<<<<<<<<<<< * randn = _rand.randn * random_integers = _rand.random_integers */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4899, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_rand); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4899, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_rand); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_rand, __pyx_t_9) < 0) __PYX_ERR(0, 4899, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_rand, __pyx_t_9) < 0) __PYX_ERR(0, 4919, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4900 + /* "mtrand.pyx":4920 * uniform = _rand.uniform * rand = _rand.rand * randn = _rand.randn # <<<<<<<<<<<<<< * random_integers = _rand.random_integers * standard_normal = _rand.standard_normal */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4900, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4920, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_randn); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4900, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_randn); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4920, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_randn, __pyx_t_7) < 0) __PYX_ERR(0, 4900, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_randn, __pyx_t_7) < 0) __PYX_ERR(0, 4920, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4901 + /* "mtrand.pyx":4921 * rand = _rand.rand * randn = _rand.randn * random_integers = _rand.random_integers # <<<<<<<<<<<<<< * standard_normal = _rand.standard_normal * normal = _rand.normal */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4901, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4921, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_random_integers); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4901, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_random_integers); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4921, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_random_integers, __pyx_t_9) < 0) __PYX_ERR(0, 4901, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_random_integers, __pyx_t_9) < 0) __PYX_ERR(0, 4921, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4902 + /* "mtrand.pyx":4922 * randn = _rand.randn * random_integers = _rand.random_integers * standard_normal = _rand.standard_normal # <<<<<<<<<<<<<< * normal = _rand.normal * beta = _rand.beta */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4902, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4922, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_standard_normal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4902, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_standard_normal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4922, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_standard_normal, __pyx_t_7) < 0) __PYX_ERR(0, 4902, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_standard_normal, __pyx_t_7) < 0) __PYX_ERR(0, 4922, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4903 + /* "mtrand.pyx":4923 * random_integers = _rand.random_integers * standard_normal = _rand.standard_normal * normal = _rand.normal # <<<<<<<<<<<<<< * beta = _rand.beta * exponential = _rand.exponential */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4903, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_normal); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4903, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_normal); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_normal, __pyx_t_9) < 0) __PYX_ERR(0, 4903, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_normal, __pyx_t_9) < 0) __PYX_ERR(0, 4923, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4904 + /* "mtrand.pyx":4924 * standard_normal = _rand.standard_normal * normal = _rand.normal * beta = _rand.beta # <<<<<<<<<<<<<< * exponential = _rand.exponential * standard_exponential = _rand.standard_exponential */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4904, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_beta); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4904, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_beta); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_beta, __pyx_t_7) < 0) __PYX_ERR(0, 4904, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_beta, __pyx_t_7) < 0) __PYX_ERR(0, 4924, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4905 + /* "mtrand.pyx":4925 * normal = _rand.normal * beta = _rand.beta * exponential = _rand.exponential # <<<<<<<<<<<<<< * standard_exponential = _rand.standard_exponential * standard_gamma = _rand.standard_gamma */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4905, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4925, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_exponential); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4905, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_exponential); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4925, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_exponential, __pyx_t_9) < 0) __PYX_ERR(0, 4905, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_exponential, __pyx_t_9) < 0) __PYX_ERR(0, 4925, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4906 + /* "mtrand.pyx":4926 * beta = _rand.beta * exponential = _rand.exponential * standard_exponential = _rand.standard_exponential # <<<<<<<<<<<<<< * standard_gamma = _rand.standard_gamma * gamma = _rand.gamma */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4906, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4926, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_standard_exponential); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4906, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_standard_exponential); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4926, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_standard_exponential, __pyx_t_7) < 0) __PYX_ERR(0, 4906, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_standard_exponential, __pyx_t_7) < 0) __PYX_ERR(0, 4926, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4907 + /* "mtrand.pyx":4927 * exponential = _rand.exponential * standard_exponential = _rand.standard_exponential * standard_gamma = _rand.standard_gamma # <<<<<<<<<<<<<< * gamma = _rand.gamma * f = _rand.f */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4907, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4927, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_standard_gamma); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4907, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_standard_gamma); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4927, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_standard_gamma, __pyx_t_9) < 0) __PYX_ERR(0, 4907, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_standard_gamma, __pyx_t_9) < 0) __PYX_ERR(0, 4927, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4908 + /* "mtrand.pyx":4928 * standard_exponential = _rand.standard_exponential * standard_gamma = _rand.standard_gamma * gamma = _rand.gamma # <<<<<<<<<<<<<< * f = _rand.f * noncentral_f = _rand.noncentral_f */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4908, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_gamma); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4908, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_gamma); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_gamma, __pyx_t_7) < 0) __PYX_ERR(0, 4908, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_gamma, __pyx_t_7) < 0) __PYX_ERR(0, 4928, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4909 + /* "mtrand.pyx":4929 * standard_gamma = _rand.standard_gamma * gamma = _rand.gamma * f = _rand.f # <<<<<<<<<<<<<< * noncentral_f = _rand.noncentral_f * chisquare = _rand.chisquare */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4909, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4929, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_f); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4909, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_f); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4929, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_f, __pyx_t_9) < 0) __PYX_ERR(0, 4909, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_f, __pyx_t_9) < 0) __PYX_ERR(0, 4929, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4910 + /* "mtrand.pyx":4930 * gamma = _rand.gamma * f = _rand.f * noncentral_f = _rand.noncentral_f # <<<<<<<<<<<<<< * chisquare = _rand.chisquare * noncentral_chisquare = _rand.noncentral_chisquare */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4910, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4930, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_noncentral_f); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4910, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_noncentral_f); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4930, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_noncentral_f, __pyx_t_7) < 0) __PYX_ERR(0, 4910, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_noncentral_f, __pyx_t_7) < 0) __PYX_ERR(0, 4930, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4911 + /* "mtrand.pyx":4931 * f = _rand.f * noncentral_f = _rand.noncentral_f * chisquare = _rand.chisquare # <<<<<<<<<<<<<< * noncentral_chisquare = _rand.noncentral_chisquare * standard_cauchy = _rand.standard_cauchy */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4911, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_chisquare); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4911, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_chisquare); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_chisquare, __pyx_t_9) < 0) __PYX_ERR(0, 4911, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_chisquare, __pyx_t_9) < 0) __PYX_ERR(0, 4931, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4912 + /* "mtrand.pyx":4932 * noncentral_f = _rand.noncentral_f * chisquare = _rand.chisquare * noncentral_chisquare = _rand.noncentral_chisquare # <<<<<<<<<<<<<< * standard_cauchy = _rand.standard_cauchy * standard_t = _rand.standard_t */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4912, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4932, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_noncentral_chisquare); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4912, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_noncentral_chisquare); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4932, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_noncentral_chisquare, __pyx_t_7) < 0) __PYX_ERR(0, 4912, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_noncentral_chisquare, __pyx_t_7) < 0) __PYX_ERR(0, 4932, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4913 + /* "mtrand.pyx":4933 * chisquare = _rand.chisquare * noncentral_chisquare = _rand.noncentral_chisquare * standard_cauchy = _rand.standard_cauchy # <<<<<<<<<<<<<< * standard_t = _rand.standard_t * vonmises = _rand.vonmises */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4913, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4933, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_standard_cauchy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4913, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_standard_cauchy); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4933, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_standard_cauchy, __pyx_t_9) < 0) __PYX_ERR(0, 4913, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_standard_cauchy, __pyx_t_9) < 0) __PYX_ERR(0, 4933, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4914 + /* "mtrand.pyx":4934 * noncentral_chisquare = _rand.noncentral_chisquare * standard_cauchy = _rand.standard_cauchy * standard_t = _rand.standard_t # <<<<<<<<<<<<<< * vonmises = _rand.vonmises * pareto = _rand.pareto */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4914, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4934, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_standard_t); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4914, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_standard_t); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4934, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_standard_t, __pyx_t_7) < 0) __PYX_ERR(0, 4914, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_standard_t, __pyx_t_7) < 0) __PYX_ERR(0, 4934, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4915 + /* "mtrand.pyx":4935 * standard_cauchy = _rand.standard_cauchy * standard_t = _rand.standard_t * vonmises = _rand.vonmises # <<<<<<<<<<<<<< * pareto = _rand.pareto * weibull = _rand.weibull */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4915, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4935, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_vonmises); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4915, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_vonmises); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4935, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_vonmises, __pyx_t_9) < 0) __PYX_ERR(0, 4915, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_vonmises, __pyx_t_9) < 0) __PYX_ERR(0, 4935, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4916 + /* "mtrand.pyx":4936 * standard_t = _rand.standard_t * vonmises = _rand.vonmises * pareto = _rand.pareto # <<<<<<<<<<<<<< * weibull = _rand.weibull * power = _rand.power */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4916, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4936, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_pareto); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4916, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_pareto); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4936, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_pareto, __pyx_t_7) < 0) __PYX_ERR(0, 4916, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_pareto, __pyx_t_7) < 0) __PYX_ERR(0, 4936, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4917 + /* "mtrand.pyx":4937 * vonmises = _rand.vonmises * pareto = _rand.pareto * weibull = _rand.weibull # <<<<<<<<<<<<<< * power = _rand.power * laplace = _rand.laplace */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4917, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4937, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_weibull); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4917, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_weibull); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4937, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_weibull, __pyx_t_9) < 0) __PYX_ERR(0, 4917, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_weibull, __pyx_t_9) < 0) __PYX_ERR(0, 4937, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4918 + /* "mtrand.pyx":4938 * pareto = _rand.pareto * weibull = _rand.weibull * power = _rand.power # <<<<<<<<<<<<<< * laplace = _rand.laplace * gumbel = _rand.gumbel */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4918, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4938, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_power); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4918, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_power); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4938, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_power, __pyx_t_7) < 0) __PYX_ERR(0, 4918, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_power, __pyx_t_7) < 0) __PYX_ERR(0, 4938, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4919 + /* "mtrand.pyx":4939 * weibull = _rand.weibull * power = _rand.power * laplace = _rand.laplace # <<<<<<<<<<<<<< * gumbel = _rand.gumbel * logistic = _rand.logistic */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4919, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4939, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_laplace); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4919, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_laplace); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4939, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_laplace, __pyx_t_9) < 0) __PYX_ERR(0, 4919, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_laplace, __pyx_t_9) < 0) __PYX_ERR(0, 4939, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4920 + /* "mtrand.pyx":4940 * power = _rand.power * laplace = _rand.laplace * gumbel = _rand.gumbel # <<<<<<<<<<<<<< * logistic = _rand.logistic * lognormal = _rand.lognormal */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4920, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4940, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_gumbel); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4920, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_gumbel); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4940, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_gumbel, __pyx_t_7) < 0) __PYX_ERR(0, 4920, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_gumbel, __pyx_t_7) < 0) __PYX_ERR(0, 4940, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4921 + /* "mtrand.pyx":4941 * laplace = _rand.laplace * gumbel = _rand.gumbel * logistic = _rand.logistic # <<<<<<<<<<<<<< * lognormal = _rand.lognormal * rayleigh = _rand.rayleigh */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4921, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4941, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_logistic); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4921, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_logistic); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4941, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_logistic, __pyx_t_9) < 0) __PYX_ERR(0, 4921, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_logistic, __pyx_t_9) < 0) __PYX_ERR(0, 4941, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4922 + /* "mtrand.pyx":4942 * gumbel = _rand.gumbel * logistic = _rand.logistic * lognormal = _rand.lognormal # <<<<<<<<<<<<<< * rayleigh = _rand.rayleigh * wald = _rand.wald */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4922, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4942, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_lognormal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4922, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_lognormal); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4942, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_lognormal, __pyx_t_7) < 0) __PYX_ERR(0, 4922, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_lognormal, __pyx_t_7) < 0) __PYX_ERR(0, 4942, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4923 + /* "mtrand.pyx":4943 * logistic = _rand.logistic * lognormal = _rand.lognormal * rayleigh = _rand.rayleigh # <<<<<<<<<<<<<< * wald = _rand.wald * triangular = _rand.triangular */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4923, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4943, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_rayleigh); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4923, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_rayleigh); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4943, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_rayleigh, __pyx_t_9) < 0) __PYX_ERR(0, 4923, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_rayleigh, __pyx_t_9) < 0) __PYX_ERR(0, 4943, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4924 + /* "mtrand.pyx":4944 * lognormal = _rand.lognormal * rayleigh = _rand.rayleigh * wald = _rand.wald # <<<<<<<<<<<<<< * triangular = _rand.triangular * */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4924, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4944, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_wald); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4924, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_wald); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4944, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_wald, __pyx_t_7) < 0) __PYX_ERR(0, 4924, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_wald, __pyx_t_7) < 0) __PYX_ERR(0, 4944, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4925 + /* "mtrand.pyx":4945 * rayleigh = _rand.rayleigh * wald = _rand.wald * triangular = _rand.triangular # <<<<<<<<<<<<<< * * binomial = _rand.binomial */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4925, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4945, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_triangular); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4925, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_triangular); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4945, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_triangular, __pyx_t_9) < 0) __PYX_ERR(0, 4925, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_triangular, __pyx_t_9) < 0) __PYX_ERR(0, 4945, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4927 + /* "mtrand.pyx":4947 * triangular = _rand.triangular * * binomial = _rand.binomial # <<<<<<<<<<<<<< * negative_binomial = _rand.negative_binomial * poisson = _rand.poisson */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4927, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4947, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_binomial); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4927, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_binomial); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4947, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_binomial, __pyx_t_7) < 0) __PYX_ERR(0, 4927, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_binomial, __pyx_t_7) < 0) __PYX_ERR(0, 4947, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4928 + /* "mtrand.pyx":4948 * * binomial = _rand.binomial * negative_binomial = _rand.negative_binomial # <<<<<<<<<<<<<< * poisson = _rand.poisson * zipf = _rand.zipf */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4928, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4948, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_negative_binomial); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4928, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_negative_binomial); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4948, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_negative_binomial, __pyx_t_9) < 0) __PYX_ERR(0, 4928, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_negative_binomial, __pyx_t_9) < 0) __PYX_ERR(0, 4948, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4929 + /* "mtrand.pyx":4949 * binomial = _rand.binomial * negative_binomial = _rand.negative_binomial * poisson = _rand.poisson # <<<<<<<<<<<<<< * zipf = _rand.zipf * geometric = _rand.geometric */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4929, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4949, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_poisson); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4929, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_poisson); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4949, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_poisson, __pyx_t_7) < 0) __PYX_ERR(0, 4929, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_poisson, __pyx_t_7) < 0) __PYX_ERR(0, 4949, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4930 + /* "mtrand.pyx":4950 * negative_binomial = _rand.negative_binomial * poisson = _rand.poisson * zipf = _rand.zipf # <<<<<<<<<<<<<< * geometric = _rand.geometric * hypergeometric = _rand.hypergeometric */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4930, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4950, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_zipf); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4930, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_zipf); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4950, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_zipf, __pyx_t_9) < 0) __PYX_ERR(0, 4930, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_zipf, __pyx_t_9) < 0) __PYX_ERR(0, 4950, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4931 + /* "mtrand.pyx":4951 * poisson = _rand.poisson * zipf = _rand.zipf * geometric = _rand.geometric # <<<<<<<<<<<<<< * hypergeometric = _rand.hypergeometric * logseries = _rand.logseries */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4931, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4951, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_geometric); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4931, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_geometric); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4951, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_geometric, __pyx_t_7) < 0) __PYX_ERR(0, 4931, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_geometric, __pyx_t_7) < 0) __PYX_ERR(0, 4951, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4932 + /* "mtrand.pyx":4952 * zipf = _rand.zipf * geometric = _rand.geometric * hypergeometric = _rand.hypergeometric # <<<<<<<<<<<<<< * logseries = _rand.logseries * */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4932, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4952, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_hypergeometric); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4932, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_hypergeometric); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4952, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_hypergeometric, __pyx_t_9) < 0) __PYX_ERR(0, 4932, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_hypergeometric, __pyx_t_9) < 0) __PYX_ERR(0, 4952, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4933 + /* "mtrand.pyx":4953 * geometric = _rand.geometric * hypergeometric = _rand.hypergeometric * logseries = _rand.logseries # <<<<<<<<<<<<<< * * multivariate_normal = _rand.multivariate_normal */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4933, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4953, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_logseries); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4933, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_logseries); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4953, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_logseries, __pyx_t_7) < 0) __PYX_ERR(0, 4933, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_logseries, __pyx_t_7) < 0) __PYX_ERR(0, 4953, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4935 + /* "mtrand.pyx":4955 * logseries = _rand.logseries * * multivariate_normal = _rand.multivariate_normal # <<<<<<<<<<<<<< * multinomial = _rand.multinomial * dirichlet = _rand.dirichlet */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4935, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4955, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_multivariate_normal); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4935, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_multivariate_normal); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4955, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_multivariate_normal, __pyx_t_9) < 0) __PYX_ERR(0, 4935, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_multivariate_normal, __pyx_t_9) < 0) __PYX_ERR(0, 4955, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4936 + /* "mtrand.pyx":4956 * * multivariate_normal = _rand.multivariate_normal * multinomial = _rand.multinomial # <<<<<<<<<<<<<< * dirichlet = _rand.dirichlet * */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4936, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4956, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_multinomial); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4936, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_multinomial); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4956, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_multinomial, __pyx_t_7) < 0) __PYX_ERR(0, 4936, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_multinomial, __pyx_t_7) < 0) __PYX_ERR(0, 4956, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4937 + /* "mtrand.pyx":4957 * multivariate_normal = _rand.multivariate_normal * multinomial = _rand.multinomial * dirichlet = _rand.dirichlet # <<<<<<<<<<<<<< * * shuffle = _rand.shuffle */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4937, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4957, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_dirichlet); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4937, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_dirichlet); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4957, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_dirichlet, __pyx_t_9) < 0) __PYX_ERR(0, 4937, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_dirichlet, __pyx_t_9) < 0) __PYX_ERR(0, 4957, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - /* "mtrand.pyx":4939 + /* "mtrand.pyx":4959 * dirichlet = _rand.dirichlet * * shuffle = _rand.shuffle # <<<<<<<<<<<<<< * permutation = _rand.permutation */ - __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4939, __pyx_L1_error) + __pyx_t_9 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4959, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_shuffle); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4939, __pyx_L1_error) + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_shuffle); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4959, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_shuffle, __pyx_t_7) < 0) __PYX_ERR(0, 4939, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_shuffle, __pyx_t_7) < 0) __PYX_ERR(0, 4959, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - /* "mtrand.pyx":4940 + /* "mtrand.pyx":4960 * * shuffle = _rand.shuffle * permutation = _rand.permutation # <<<<<<<<<<<<<< */ - __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4940, __pyx_L1_error) + __pyx_t_7 = __Pyx_GetModuleGlobalName(__pyx_n_s_rand_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 4960, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_permutation); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4940, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_permutation); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 4960, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_permutation, __pyx_t_9) < 0) __PYX_ERR(0, 4940, __pyx_L1_error) + if (PyDict_SetItem(__pyx_d, __pyx_n_s_permutation, __pyx_t_9) < 0) __PYX_ERR(0, 4960, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "mtrand.pyx":1 @@ -44712,51 +45175,51 @@ * # * # Copyright 2005 Robert Kern (robert.kern@gmail.com) */ - __pyx_t_9 = PyDict_New(); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_t_9 = __Pyx_PyDict_NewPresized(43); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_random_sample_line_8, __pyx_kp_u_random_sample_size_None_Return) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_tomaxint_line_858, __pyx_kp_u_tomaxint_size_None_Random_integ) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_randint_line_905, __pyx_kp_u_randint_low_high_None_size_None) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_bytes_line_999, __pyx_kp_u_bytes_length_Return_random_byte) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_choice_line_1028, __pyx_kp_u_choice_a_size_None_replace_True) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_uniform_line_1210, __pyx_kp_u_uniform_low_0_0_high_1_0_size_N) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_rand_line_1316, __pyx_kp_u_rand_d0_d1_dn_Random_values_in) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_randn_line_1360, __pyx_kp_u_randn_d0_d1_dn_Return_a_sample) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_tomaxint_line_863, __pyx_kp_u_tomaxint_size_None_Random_integ) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_randint_line_910, __pyx_kp_u_randint_low_high_None_size_None) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_bytes_line_1004, __pyx_kp_u_bytes_length_Return_random_byte) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_choice_line_1033, __pyx_kp_u_choice_a_size_None_replace_True) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_uniform_line_1215, __pyx_kp_u_uniform_low_0_0_high_1_0_size_N) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_rand_line_1321, __pyx_kp_u_rand_d0_d1_dn_Random_values_in) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_randn_line_1365, __pyx_kp_u_randn_d0_d1_dn_Return_a_sample) < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_random_integers_line, __pyx_kp_u_random_integers_low_high_None_s) < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_standard_normal_line, __pyx_kp_u_standard_normal_size_None_Draw) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_normal_line_1547, __pyx_kp_u_normal_loc_0_0_scale_1_0_size_N) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_normal_line_1552, __pyx_kp_u_normal_loc_0_0_scale_1_0_size_N) < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_standard_exponential, __pyx_kp_u_standard_exponential_size_None) < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_standard_gamma_line, __pyx_kp_u_standard_gamma_shape_size_None) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_gamma_line_1896, __pyx_kp_u_gamma_shape_scale_1_0_size_None) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_f_line_1992, __pyx_kp_u_f_dfnum_dfden_size_None_Draw_sa) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_noncentral_f_line_20, __pyx_kp_u_noncentral_f_dfnum_dfden_nonc_s) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_chisquare_line_2196, __pyx_kp_u_chisquare_df_size_None_Draw_sam) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_gamma_line_1901, __pyx_kp_u_gamma_shape_scale_1_0_size_None) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_f_line_1997, __pyx_kp_u_f_dfnum_dfden_size_None_Draw_sa) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_noncentral_f_line_21, __pyx_kp_u_noncentral_f_dfnum_dfden_nonc_s) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_chisquare_line_2205, __pyx_kp_u_chisquare_df_size_None_Draw_sam) < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_noncentral_chisquare, __pyx_kp_u_noncentral_chisquare_df_nonc_si) < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_standard_cauchy_line, __pyx_kp_u_standard_cauchy_size_None_Draw) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_standard_t_line_2445, __pyx_kp_u_standard_t_df_size_None_Draw_sa) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_vonmises_line_2551, __pyx_kp_u_vonmises_mu_kappa_size_None_Dra) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_pareto_line_2649, __pyx_kp_u_pareto_a_size_None_Draw_samples) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_weibull_line_2759, __pyx_kp_u_weibull_a_size_None_Draw_sample) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_power_line_2869, __pyx_kp_u_power_a_size_None_Draws_samples) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_laplace_line_2980, __pyx_kp_u_laplace_loc_0_0_scale_1_0_size) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_gumbel_line_3078, __pyx_kp_u_gumbel_loc_0_0_scale_1_0_size_N) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_logistic_line_3209, __pyx_kp_u_logistic_loc_0_0_scale_1_0_size) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_lognormal_line_3302, __pyx_kp_u_lognormal_mean_0_0_sigma_1_0_si) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_rayleigh_line_3426, __pyx_kp_u_rayleigh_scale_1_0_size_None_Dr) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_wald_line_3505, __pyx_kp_u_wald_mean_scale_size_None_Draw) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_triangular_line_3592, __pyx_kp_u_triangular_left_mode_right_size) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_binomial_line_3686, __pyx_kp_u_binomial_n_p_size_None_Draw_sam) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_standard_t_line_2456, __pyx_kp_u_standard_t_df_size_None_Draw_sa) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_vonmises_line_2562, __pyx_kp_u_vonmises_mu_kappa_size_None_Dra) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_pareto_line_2660, __pyx_kp_u_pareto_a_size_None_Draw_samples) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_weibull_line_2770, __pyx_kp_u_weibull_a_size_None_Draw_sample) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_power_line_2880, __pyx_kp_u_power_a_size_None_Draws_samples) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_laplace_line_2991, __pyx_kp_u_laplace_loc_0_0_scale_1_0_size) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_gumbel_line_3089, __pyx_kp_u_gumbel_loc_0_0_scale_1_0_size_N) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_logistic_line_3220, __pyx_kp_u_logistic_loc_0_0_scale_1_0_size) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_lognormal_line_3313, __pyx_kp_u_lognormal_mean_0_0_sigma_1_0_si) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_rayleigh_line_3437, __pyx_kp_u_rayleigh_scale_1_0_size_None_Dr) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_wald_line_3516, __pyx_kp_u_wald_mean_scale_size_None_Draw) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_triangular_line_3603, __pyx_kp_u_triangular_left_mode_right_size) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_binomial_line_3697, __pyx_kp_u_binomial_n_p_size_None_Draw_sam) < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_negative_binomial_li, __pyx_kp_u_negative_binomial_n_p_size_None) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_poisson_line_3903, __pyx_kp_u_poisson_lam_1_0_size_None_Draw) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_zipf_line_3991, __pyx_kp_u_zipf_a_size_None_Draw_samples_f) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_geometric_line_4082, __pyx_kp_u_geometric_p_size_None_Draw_samp) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_poisson_line_3914, __pyx_kp_u_poisson_lam_1_0_size_None_Draw) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_zipf_line_4002, __pyx_kp_u_zipf_a_size_None_Draw_samples_f) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_geometric_line_4095, __pyx_kp_u_geometric_p_size_None_Draw_samp) < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_hypergeometric_line, __pyx_kp_u_hypergeometric_ngood_nbad_nsamp) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_logseries_line_4272, __pyx_kp_u_logseries_p_size_None_Draw_samp) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_logseries_line_4285, __pyx_kp_u_logseries_p_size_None_Draw_samp) < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_multivariate_normal, __pyx_kp_u_multivariate_normal_mean_cov_si) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_multinomial_line_453, __pyx_kp_u_multinomial_n_pvals_size_None_D) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_dirichlet_line_4643, __pyx_kp_u_dirichlet_alpha_size_None_Draw) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_shuffle_line_4759, __pyx_kp_u_shuffle_x_Modify_a_sequence_in) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_permutation_line_484, __pyx_kp_u_permutation_x_Randomly_permute) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_multinomial_line_454, __pyx_kp_u_multinomial_n_pvals_size_None_D) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_dirichlet_line_4656, __pyx_kp_u_dirichlet_alpha_size_None_Draw) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_shuffle_line_4779, __pyx_kp_u_shuffle_x_Modify_a_sequence_in) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + if (PyDict_SetItem(__pyx_t_9, __pyx_kp_u_RandomState_permutation_line_486, __pyx_kp_u_permutation_x_Randomly_permute) < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_9) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; @@ -44780,10 +45243,12 @@ } __pyx_L0:; __Pyx_RefNannyFinishContext(); - #if PY_MAJOR_VERSION < 3 - return; - #else + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 return __pyx_m; + #else + return; #endif } @@ -44805,6 +45270,20 @@ } #endif +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); @@ -44965,10 +45444,19 @@ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 + result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); + if (likely(result)) { + Py_INCREF(result); + } else if (unlikely(PyErr_Occurred())) { + result = NULL; + } else { +#else result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { +#endif #else result = PyObject_GetItem(__pyx_d, name); if (!result) { @@ -44980,7 +45468,7 @@ } /* PyCFunctionFastCall */ - #if CYTHON_FAST_PYCCALL + #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); @@ -45003,12 +45491,12 @@ #endif /* PyFunctionFastCall */ - #if CYTHON_FAST_PYCALL + #if CYTHON_FAST_PYCALL #include "frameobject.h" static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; - PyThreadState *tstate = PyThreadState_GET(); + PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; @@ -45123,7 +45611,7 @@ #endif /* PyObjectCall */ - #if CYTHON_COMPILING_IN_CPYTHON + #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; @@ -45143,7 +45631,7 @@ #endif /* PyObjectCallMethO */ - #if CYTHON_COMPILING_IN_CPYTHON + #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; @@ -45163,7 +45651,7 @@ #endif /* PyObjectCallOneArg */ - #if CYTHON_COMPILING_IN_CPYTHON + #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); @@ -45203,23 +45691,38 @@ #endif /* SaveResetException */ - #if CYTHON_FAST_THREAD_STATE + #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if PY_VERSION_HEX >= 0x030700A2 + *type = tstate->exc_state.exc_type; + *value = tstate->exc_state.exc_value; + *tb = tstate->exc_state.exc_traceback; + #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; + #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; + #if PY_VERSION_HEX >= 0x030700A2 + tmp_type = tstate->exc_state.exc_type; + tmp_value = tstate->exc_state.exc_value; + tmp_tb = tstate->exc_state.exc_traceback; + tstate->exc_state.exc_type = type; + tstate->exc_state.exc_value = value; + tstate->exc_state.exc_traceback = tb; + #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; + #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); @@ -45227,17 +45730,32 @@ #endif /* PyErrExceptionMatches */ - #if CYTHON_FAST_THREAD_STATE + #if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; - return PyErr_GivenExceptionMatches(exc_type, err); + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetException */ - #if CYTHON_FAST_THREAD_STATE + #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { @@ -45274,12 +45792,21 @@ *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE + #if PY_VERSION_HEX >= 0x030700A2 + tmp_type = tstate->exc_state.exc_type; + tmp_value = tstate->exc_state.exc_value; + tmp_tb = tstate->exc_state.exc_traceback; + tstate->exc_state.exc_type = local_type; + tstate->exc_state.exc_value = local_value; + tstate->exc_state.exc_traceback = local_tb; + #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; + #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); @@ -45298,7 +45825,7 @@ } /* PyErrFetchRestore */ - #if CYTHON_FAST_THREAD_STATE + #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; @@ -45322,7 +45849,7 @@ #endif /* RaiseException */ - #if PY_MAJOR_VERSION < 3 + #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare @@ -45437,11 +45964,7 @@ "raise: exception class must be a subclass of BaseException"); goto bad; } -#if PY_VERSION_HEX >= 0x03030000 if (cause) { -#else - if (cause && cause != Py_None) { -#endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; @@ -45469,7 +45992,7 @@ PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else - PyThreadState *tstate = PyThreadState_GET(); + PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); @@ -45493,7 +46016,7 @@ } #endif #ifdef __Pyx_CyFunction_USED - if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { + if (likely(PyCFunction_Check(func) || __Pyx_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif @@ -45511,15 +46034,106 @@ PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } - if (likely(PyObject_TypeCheck(obj, type))) + if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } +/* PyIntBinop */ + #if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { + if (op1 == op2) { + Py_RETURN_TRUE; + } + #if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(op1))) { + const long b = intval; + long a = PyInt_AS_LONG(op1); + if (a == b) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } + } + #endif + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact(op1))) { + const long b = intval; + long a; + const digit* digits = ((PyLongObject*)op1)->ob_digit; + const Py_ssize_t size = Py_SIZE(op1); + if (likely(__Pyx_sst_abs(size) <= 1)) { + a = likely(size) ? digits[0] : 0; + if (size == -1) a = -a; + } else { + switch (size) { + case -2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + } + CYTHON_FALLTHROUGH; + case 2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + } + CYTHON_FALLTHROUGH; + case -3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + } + CYTHON_FALLTHROUGH; + case 3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + } + CYTHON_FALLTHROUGH; + case -4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + } + CYTHON_FALLTHROUGH; + case 4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; + } + CYTHON_FALLTHROUGH; + #if PyLong_SHIFT < 30 && PyLong_SHIFT != 15 + default: return PyLong_Type.tp_richcompare(op1, op2, Py_EQ); + #else + default: Py_RETURN_FALSE; + #endif + } + } + if (a == b) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } + } + #endif + if (PyFloat_CheckExact(op1)) { + const long b = intval; + double a = PyFloat_AS_DOUBLE(op1); + if ((double)a == (double)b) { + Py_RETURN_TRUE; + } else { + Py_RETURN_FALSE; + } + } + return PyObject_RichCompare(op1, op2, Py_EQ); +} +#endif + /* GetItemInt */ - static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { + static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); @@ -45864,10 +46478,10 @@ /* IterFinish */ static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_FAST_THREAD_STATE - PyThreadState *tstate = PyThreadState_GET(); + PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { - if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; @@ -45908,6 +46522,35 @@ return 0; } +/* ObjectGetItem */ + #if CYTHON_USE_TYPE_SLOTS +static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { + PyObject *runerr; + Py_ssize_t key_value; + PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; + if (unlikely(!(m && m->sq_item))) { + PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); + return NULL; + } + key_value = __Pyx_PyIndex_AsSsize_t(index); + if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { + return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); + } + if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { + PyErr_Clear(); + PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); + } + return NULL; +} +static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { + PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; + if (likely(m && m->mp_subscript)) { + return m->mp_subscript(obj, key); + } + return __Pyx_PyObject_GetIndex(obj, key); +} +#endif + /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { @@ -45947,6 +46590,7 @@ goto long_long; #endif } + CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); @@ -45957,6 +46601,7 @@ goto long_long; #endif } + CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); @@ -45967,6 +46612,7 @@ goto long_long; #endif } + CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); @@ -45977,6 +46623,7 @@ goto long_long; #endif } + CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); @@ -45987,6 +46634,7 @@ goto long_long; #endif } + CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); @@ -45997,6 +46645,7 @@ goto long_long; #endif } + CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_subtract(op1, op2); } } @@ -46024,91 +46673,6 @@ } #endif -/* PyIntBinop */ - #if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { - if (op1 == op2) { - Py_RETURN_TRUE; - } - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - const long b = intval; - long a = PyInt_AS_LONG(op1); - if (a == b) { - Py_RETURN_TRUE; - } else { - Py_RETURN_FALSE; - } - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a; - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - if (likely(__Pyx_sst_abs(size) <= 1)) { - a = likely(size) ? digits[0] : 0; - if (size == -1) a = -a; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - #if PyLong_SHIFT < 30 && PyLong_SHIFT != 15 - default: return PyLong_Type.tp_richcompare(op1, op2, Py_EQ); - #else - default: Py_RETURN_FALSE; - #endif - } - } - if (a == b) { - Py_RETURN_TRUE; - } else { - Py_RETURN_FALSE; - } - } - #endif - if (PyFloat_CheckExact(op1)) { - const long b = intval; - double a = PyFloat_AS_DOUBLE(op1); - if ((double)a == (double)b) { - Py_RETURN_TRUE; - } else { - Py_RETURN_FALSE; - } - } - return PyObject_RichCompare(op1, op2, Py_EQ); -} -#endif - /* SliceObject */ static CYTHON_INLINE int __Pyx_PyObject_SetSlice(PyObject* obj, PyObject* value, Py_ssize_t cstart, Py_ssize_t cstop, @@ -46207,8 +46771,22 @@ return -1; } +/* PyObjectSetAttrStr */ + #if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_setattro)) + return tp->tp_setattro(obj, attr_name, value); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_setattr)) + return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value); +#endif + return PyObject_SetAttr(obj, attr_name, value); +} +#endif + /* KeywordStringCheck */ - static CYTHON_INLINE int __Pyx_CheckKeywordStrings( + static int __Pyx_CheckKeywordStrings( PyObject *kwdict, const char* function_name, int kw_allowed) @@ -46222,7 +46800,7 @@ #else while (PyDict_Next(kwdict, &pos, &key, 0)) { #if PY_MAJOR_VERSION < 3 - if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) + if (unlikely(!PyString_Check(key))) #endif if (unlikely(!PyUnicode_Check(key))) goto invalid_keyword_type; @@ -46286,6 +46864,7 @@ goto long_long; #endif } + CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); @@ -46296,6 +46875,7 @@ goto long_long; #endif } + CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); @@ -46306,6 +46886,7 @@ goto long_long; #endif } + CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); @@ -46316,6 +46897,7 @@ goto long_long; #endif } + CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); @@ -46326,6 +46908,7 @@ goto long_long; #endif } + CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); @@ -46336,6 +46919,7 @@ goto long_long; #endif } + CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } @@ -46370,7 +46954,7 @@ PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; - #if PY_VERSION_HEX < 0x03030000 + #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) @@ -46394,17 +46978,8 @@ #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { - #if PY_VERSION_HEX < 0x03030000 - PyObject *py_level = PyInt_FromLong(1); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, NULL); - Py_DECREF(py_level); - #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); - #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; @@ -46415,7 +46990,7 @@ } #endif if (!module) { - #if PY_VERSION_HEX < 0x03030000 + #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; @@ -46429,7 +47004,7 @@ } } bad: - #if PY_VERSION_HEX < 0x03030000 + #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); @@ -46452,7 +47027,7 @@ } /* SetItemInt */ - static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { + static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { int r; if (!j) return -1; r = PyObject_SetItem(o, j, v); @@ -46499,6 +47074,56 @@ return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); } +/* PyObject_GenericGetAttrNoDict */ + #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'%.50s' object has no attribute '%U'", + tp->tp_name, attr_name); +#else + "'%.50s' object has no attribute '%.400s'", + tp->tp_name, PyString_AS_STRING(attr_name)); +#endif + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { + PyObject *descr; + PyTypeObject *tp = Py_TYPE(obj); + if (unlikely(!PyString_Check(attr_name))) { + return PyObject_GenericGetAttr(obj, attr_name); + } + assert(!tp->tp_dictoffset); + descr = _PyType_Lookup(tp, attr_name); + if (unlikely(!descr)) { + return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); + } + Py_INCREF(descr); + #if PY_MAJOR_VERSION < 3 + if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) + #endif + { + descrgetfunc f = Py_TYPE(descr)->tp_descr_get; + if (unlikely(f)) { + PyObject *res = f(descr, obj, (PyObject *)tp); + Py_DECREF(descr); + return res; + } + } + return descr; +} +#endif + +/* PyObject_GenericGetAttr */ + #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { + if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { + return PyObject_GenericGetAttr(obj, attr_name); + } + return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); +} +#endif + /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 @@ -46518,29 +47143,29 @@ } /* CLineInTraceback */ - static int __Pyx_CLineForTraceback(int c_line) { -#ifdef CYTHON_CLINE_IN_TRACEBACK - return ((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0; -#else + #ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON - PyObject **cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { - use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); + use_cline = __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); } else #endif { - PyObject *ptype, *pvalue, *ptraceback; - PyObject *use_cline_obj; - PyErr_Fetch(&ptype, &pvalue, &ptraceback); - use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { + PyErr_Clear(); use_cline = NULL; } - PyErr_Restore(ptype, pvalue, ptraceback); } if (!use_cline) { c_line = 0; @@ -46549,9 +47174,10 @@ else if (PyObject_Not(use_cline) != 0) { c_line = 0; } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; -#endif } +#endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { @@ -46693,8 +47319,9 @@ int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { - c_line = __Pyx_CLineForTraceback(c_line); + c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { @@ -46704,10 +47331,10 @@ __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( - PyThreadState_GET(), /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - __pyx_d, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); @@ -49757,6 +50384,78 @@ return (long) -1; } +/* FastTypeChecks */ + #if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { + if (likely(err == exc_type)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); + } + return PyErr_GivenExceptionMatches(err, exc_type); +} +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { + if (likely(err == exc_type1 || err == exc_type2)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); + } + return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); +} +#endif + /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; @@ -49883,7 +50582,7 @@ if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) - PyErr_Clear(); + return -1; ++t; } return 0; @@ -49896,46 +50595,53 @@ Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { -#if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) - if ( -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - __Pyx_sys_getdefaultencoding_not_ascii && -#endif - PyUnicode_Check(o)) { -#if PY_VERSION_HEX < 0x03030000 - char* defenc_c; - PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); - if (!defenc) return NULL; - defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - { - char* end = defenc_c + PyBytes_GET_SIZE(defenc); - char* c; - for (c = defenc_c; c < end; c++) { - if ((unsigned char) (*c) >= 128) { - PyUnicode_AsASCIIString(o); - return NULL; - } + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; } } + } #endif - *length = PyBytes_GET_SIZE(defenc); - return defenc_c; + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} #else - if (__Pyx_PyUnicode_READY(o) == -1) return NULL; +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - if (PyUnicode_IS_ASCII(o)) { - *length = PyUnicode_GET_LENGTH(o); - return PyUnicode_AsUTF8(o); - } else { - PyUnicode_AsASCIIString(o); - return NULL; - } + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } #else - return PyUnicode_AsUTF8AndSize(o, length); + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif #endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && #endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) @@ -49959,6 +50665,26 @@ if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; @@ -49966,9 +50692,9 @@ const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 - if (PyInt_Check(x) || PyLong_Check(x)) + if (likely(PyInt_Check(x) || PyLong_Check(x))) #else - if (PyLong_Check(x)) + if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS @@ -49976,32 +50702,30 @@ #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; - res = PyNumber_Int(x); + res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; - res = PyNumber_Long(x); + res = m->nb_long(x); } #else - if (m && m->nb_int) { + if (likely(m && m->nb_int)) { name = "int"; - res = PyNumber_Long(x); + res = m->nb_int(x); } #endif #else - res = PyNumber_Int(x); + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } #endif - if (res) { + if (likely(res)) { #if PY_MAJOR_VERSION < 3 - if (!PyInt_Check(res) && !PyLong_Check(res)) { + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else - if (!PyLong_Check(res)) { + if (unlikely(!PyLong_CheckExact(res))) { #endif - PyErr_Format(PyExc_TypeError, - "__%.4s__ returned non-%.4s (type %.200s)", - name, name, Py_TYPE(res)->tp_name); - Py_DECREF(res); - return NULL; + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { diff -Nru python-numpy-1.13.3/numpy/random/mtrand/mtrand.pyx python-numpy-1.14.5/numpy/random/mtrand/mtrand.pyx --- python-numpy-1.13.3/numpy/random/mtrand/mtrand.pyx 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/random/mtrand/mtrand.pyx 2018-06-12 18:28:52.000000000 +0000 @@ -211,7 +211,7 @@ itera = PyArray_IterNew(oa) with lock, nogil: for i from 0 <= i < length: - array_data[i] = func(state, ((itera.dataptr))[0]) + array_data[i] = func(state, (PyArray_ITER_DATA(itera))[0]) PyArray_ITER_NEXT(itera) else: array = np.empty(size, np.float64) @@ -536,7 +536,7 @@ itera = PyArray_IterNew(oa) with lock, nogil: for i from 0 <= i < length: - array_data[i] = func(state, ((itera.dataptr))[0]) + array_data[i] = func(state, (PyArray_ITER_DATA(itera))[0]) PyArray_ITER_NEXT(itera) else: array = np.empty(size, int) @@ -659,7 +659,7 @@ Parameters ---------- - seed : int or array_like, optional + seed : int or 1-d array_like, optional Seed for `RandomState`. Must be convertible to 32 bit unsigned integers. @@ -676,14 +676,19 @@ errcode = rk_randomseed(self.internal_state) else: idx = operator.index(seed) - if idx > int(2**32 - 1) or idx < 0: + if (idx >= 2**32) or (idx < 0): raise ValueError("Seed must be between 0 and 2**32 - 1") with self.lock: rk_seed(idx, self.internal_state) except TypeError: - obj = np.asarray(seed).astype(np.int64, casting='safe') - if ((obj > int(2**32 - 1)) | (obj < 0)).any(): - raise ValueError("Seed must be between 0 and 2**32 - 1") + obj = np.asarray(seed) + if obj.size == 0: + raise ValueError("Seed must be non-empty") + obj = obj.astype(np.int64, casting='safe') + if obj.ndim != 1: + raise ValueError("Seed array must be 1-d") + if ((obj >= 2**32) | (obj < 0)).any(): + raise ValueError("Seed values must be between 0 and 2**32 - 1") obj = obj.astype('L', casting='unsafe') with self.lock: init_by_array(self.internal_state, PyArray_DATA(obj), @@ -897,7 +902,7 @@ array([[[ True, True], [ True, True]], [[ True, True], - [ True, True]]], dtype=bool) + [ True, True]]]) """ return disc0_array(self.internal_state, rk_long, size, self.lock) @@ -2007,10 +2012,10 @@ Parameters ---------- - dfnum : int or array_like of ints - Degrees of freedom in numerator. Should be greater than zero. - dfden : int or array_like of ints - Degrees of freedom in denominator. Should be greater than zero. + dfnum : float or array_like of floats + Degrees of freedom in numerator, should be > 0. + dfden : float or array_like of float + Degrees of freedom in denominator, should be > 0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), @@ -2109,12 +2114,16 @@ Parameters ---------- - dfnum : int or array_like of ints - Parameter, should be > 1. - dfden : int or array_like of ints - Parameter, should be > 1. + dfnum : float or array_like of floats + Numerator degrees of freedom, should be > 0. + + .. versionchanged:: 1.14.0 + Earlier NumPy versions required dfnum > 1. + dfden : float or array_like of floats + Denominator degrees of freedom, should be > 0. nonc : float or array_like of floats - Parameter, should be >= 0. + Non-centrality parameter, the sum of the squares of the numerator + means, should be >= 0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), @@ -2175,8 +2184,8 @@ fdfden = PyFloat_AsDouble(dfden) fnonc = PyFloat_AsDouble(nonc) - if fdfnum <= 1: - raise ValueError("dfnum <= 1") + if fdfnum <= 0: + raise ValueError("dfnum <= 0") if fdfden <= 0: raise ValueError("dfden <= 0") if fnonc < 0: @@ -2184,8 +2193,8 @@ return cont3_array_sc(self.internal_state, rk_noncentral_f, size, fdfnum, fdfden, fnonc, self.lock) - if np.any(np.less_equal(odfnum, 1.0)): - raise ValueError("dfnum <= 1") + if np.any(np.less_equal(odfnum, 0.0)): + raise ValueError("dfnum <= 0") if np.any(np.less_equal(odfden, 0.0)): raise ValueError("dfden <= 0") if np.any(np.less(ononc, 0.0)): @@ -2206,8 +2215,8 @@ Parameters ---------- - df : int or array_like of ints - Number of degrees of freedom. + df : float or array_like of floats + Number of degrees of freedom, should be > 0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), @@ -2285,9 +2294,11 @@ Parameters ---------- - df : int or array_like of ints - Degrees of freedom, should be > 0 as of NumPy 1.10.0, - should be > 1 for earlier versions. + df : float or array_like of floats + Degrees of freedom, should be > 0. + + .. versionchanged:: 1.10.0 + Earlier NumPy versions required dfnum > 1. nonc : float or array_like of floats Non-centrality, should be non-negative. size : int or tuple of ints, optional @@ -2455,7 +2466,7 @@ Parameters ---------- - df : int or array_like of ints + df : float or array_like of floats Degrees of freedom, should be > 0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then @@ -4070,13 +4081,15 @@ if oa.shape == (): fa = PyFloat_AsDouble(a) - if fa <= 1.0: - raise ValueError("a <= 1.0") + # use logic that ensures NaN is rejected. + if not fa > 1.0: + raise ValueError("'a' must be a valid float > 1.0") return discd_array_sc(self.internal_state, rk_zipf, size, fa, self.lock) - if np.any(np.less_equal(oa, 1.0)): - raise ValueError("a <= 1.0") + # use logic that ensures NaN is rejected. + if not np.all(np.greater(oa, 1.0)): + raise ValueError("'a' must contain valid floats > 1.0") return discd_array(self.internal_state, rk_zipf, size, oa, self.lock) def geometric(self, p, size=None): @@ -4666,6 +4679,11 @@ samples : ndarray, The drawn samples, of shape (size, alpha.ndim). + Raises + ------- + ValueError + If any value in alpha is less than or equal to zero + Notes ----- .. math:: X \\approx \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i} @@ -4731,6 +4749,8 @@ k = len(alpha) alpha_arr = PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1) + if np.any(np.less_equal(alpha_arr, 0)): + raise ValueError('alpha <= 0') alpha_data = PyArray_DATA(alpha_arr) shape = _shape_from_size(size, k) diff -Nru python-numpy-1.13.3/numpy/random/mtrand/numpy.pxd python-numpy-1.14.5/numpy/random/mtrand/numpy.pxd --- python-numpy-1.13.3/numpy/random/mtrand/numpy.pxd 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/random/mtrand/numpy.pxd 2018-06-12 17:31:56.000000000 +0000 @@ -41,7 +41,7 @@ NPY_ARRAY_ALIGNED NPY_ARRAY_NOTSWAPPED NPY_ARRAY_WRITEABLE - NPY_ARRAY_UPDATEIFCOPY + NPY_ARRAY_WRITEBACKIFCOPY NPY_ARR_HAS_DESCR NPY_ARRAY_BEHAVED @@ -131,6 +131,7 @@ object PyArray_IterNew(object arr) void PyArray_ITER_NEXT(flatiter it) nogil + void* PyArray_ITER_DATA(flatiter it) nogil dtype PyArray_DescrFromType(int) diff -Nru python-numpy-1.13.3/numpy/random/mtrand/randomkit.c python-numpy-1.14.5/numpy/random/mtrand/randomkit.c --- python-numpy-1.13.3/numpy/random/mtrand/randomkit.c 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/random/mtrand/randomkit.c 2018-06-12 17:31:56.000000000 +0000 @@ -64,13 +64,6 @@ /* static char const rcsid[] = "@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */ -#include -#include -#include -#include -#include -#include -#include #ifdef _WIN32 /* @@ -109,18 +102,27 @@ #include #endif +/* + * Do not move this include. randomkit.h must be included + * after windows timeb.h is included. + */ +#include "randomkit.h" + #else /* Unix */ +#include "randomkit.h" #include #include #include #endif -/* - * Do not move this include. randomkit.h must be included - * after windows timeb.h is included. - */ -#include "randomkit.h" +#include +#include +#include +#include +#include +#include +#include #ifndef RK_DEV_URANDOM #define RK_DEV_URANDOM "/dev/urandom" diff -Nru python-numpy-1.13.3/numpy/random/tests/test_random.py python-numpy-1.14.5/numpy/random/tests/test_random.py --- python-numpy-1.13.3/numpy/random/tests/test_random.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/random/tests/test_random.py 2018-06-12 18:28:52.000000000 +0000 @@ -3,15 +3,16 @@ import numpy as np from numpy.testing import ( - TestCase, run_module_suite, assert_, assert_raises, assert_equal, - assert_warns, assert_no_warnings, assert_array_equal, - assert_array_almost_equal, suppress_warnings) + run_module_suite, assert_, assert_raises, assert_equal, assert_warns, + assert_no_warnings, assert_array_equal, assert_array_almost_equal, + suppress_warnings + ) from numpy import random import sys import warnings -class TestSeed(TestCase): +class TestSeed(object): def test_scalar(self): s = np.random.RandomState(0) assert_equal(s.randint(1000), 684) @@ -41,8 +42,15 @@ assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296]) assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296]) + def test_invalid_array_shape(self): + # gh-9832 + assert_raises(ValueError, np.random.RandomState, np.array([], dtype=np.int64)) + assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]]) + assert_raises(ValueError, np.random.RandomState, [[1, 2, 3], + [4, 5, 6]]) -class TestBinomial(TestCase): + +class TestBinomial(object): def test_n_zero(self): # Tests the corner case of n == 0 for the binomial distribution. # binomial(0, p) should be zero for any p in [0, 1]. @@ -57,7 +65,7 @@ assert_raises(ValueError, random.binomial, 1, np.nan) -class TestMultinomial(TestCase): +class TestMultinomial(object): def test_basic(self): random.multinomial(100, [0.2, 0.8]) @@ -82,11 +90,11 @@ (2, 2, 2)) assert_raises(TypeError, np.random.multinomial, 1, p, - np.float(1)) + float(1)) -class TestSetState(TestCase): - def setUp(self): +class TestSetState(object): + def setup(self): self.seed = 1234567890 self.prng = random.RandomState(self.seed) self.state = self.prng.get_state() @@ -133,7 +141,7 @@ self.prng.negative_binomial(0.5, 0.5) -class TestRandint(TestCase): +class TestRandint(object): rfunc = np.random.randint @@ -142,7 +150,7 @@ np.int32, np.uint32, np.int64, np.uint64] def test_unsupported_type(self): - assert_raises(TypeError, self.rfunc, 1, dtype=np.float) + assert_raises(TypeError, self.rfunc, 1, dtype=float) def test_bounds_checking(self): for dt in self.itype: @@ -199,7 +207,7 @@ def test_repeatability(self): import hashlib # We use a md5 hash of generated sequences of 1000 samples - # in the range [0, 6) for all but np.bool, where the range + # in the range [0, 6) for all but bool, where the range # is [0, 2). Hashes are for little endian numbers. tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0', 'int16': '1b7741b80964bb190c50d541dca1cac1', @@ -225,9 +233,9 @@ # bools do not depend on endianess np.random.seed(1234) - val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8) + val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) res = hashlib.md5(val).hexdigest() - assert_(tgt[np.dtype(np.bool).name] == res) + assert_(tgt[np.dtype(bool).name] == res) def test_int64_uint64_corner_case(self): # When stored in Numpy arrays, `lbnd` is casted @@ -259,23 +267,23 @@ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 sample = self.rfunc(lbnd, ubnd, dtype=dt) - self.assertEqual(sample.dtype, np.dtype(dt)) + assert_equal(sample.dtype, np.dtype(dt)) - for dt in (np.bool, np.int, np.long): - lbnd = 0 if dt is np.bool else np.iinfo(dt).min - ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + for dt in (bool, int, np.long): + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 # gh-7284: Ensure that we get Python data types sample = self.rfunc(lbnd, ubnd, dtype=dt) - self.assertFalse(hasattr(sample, 'dtype')) - self.assertEqual(type(sample), dt) + assert_(not hasattr(sample, 'dtype')) + assert_equal(type(sample), dt) -class TestRandomDist(TestCase): +class TestRandomDist(object): # Make sure the random distribution returns the correct value for a # given seed - def setUp(self): + def setup(self): self.seed = 1234567890 def test_rand(self): @@ -522,7 +530,12 @@ assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) - assert_raises(TypeError, np.random.dirichlet, p, np.float(1)) + assert_raises(TypeError, np.random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, np.random.mtrand.dirichlet, alpha) def test_exponential(self): np.random.seed(self.seed) @@ -929,10 +942,10 @@ assert_array_equal(actual, desired) -class TestBroadcast(TestCase): +class TestBroadcast(object): # tests that functions that broadcast behave # correctly when presented with non-scalar arguments - def setUp(self): + def setup(self): self.seed = 123456789 def setSeed(self): @@ -1101,6 +1114,12 @@ assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + def test_noncentral_f_small_df(self): + self.setSeed() + desired = np.array([6.869638627492048, 0.785880199263955]) + actual = np.random.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + def test_chisquare(self): df = [1] bad_df = [-1] @@ -1422,6 +1441,10 @@ actual = zipf(a * 3) assert_array_equal(actual, desired) assert_raises(ValueError, zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, zipf, np.nan) + assert_raises(ValueError, zipf, [0, 0, np.nan]) + def test_geometric(self): p = [0.5] @@ -1484,9 +1507,9 @@ assert_raises(ValueError, logseries, bad_p_one * 3) assert_raises(ValueError, logseries, bad_p_two * 3) -class TestThread(TestCase): +class TestThread(object): # make sure each state produces the same sequence even in threads - def setUp(self): + def setup(self): self.seeds = range(4) def check_function(self, function, sz): @@ -1527,8 +1550,8 @@ self.check_function(gen_random, sz=(10000, 6)) # See Issue #4263 -class TestSingleEltArrayInput(TestCase): - def setUp(self): +class TestSingleEltArrayInput(object): + def setup(self): self.argOne = np.array([2]) self.argTwo = np.array([3]) self.argThree = np.array([4]) @@ -1551,7 +1574,7 @@ else: out = func(self.argOne) - self.assertEqual(out.shape, self.tgtShape) + assert_equal(out.shape, self.tgtShape) def test_two_arg_funcs(self): funcs = (np.random.uniform, np.random.normal, @@ -1572,17 +1595,17 @@ argTwo = self.argTwo out = func(self.argOne, argTwo) - self.assertEqual(out.shape, self.tgtShape) + assert_equal(out.shape, self.tgtShape) out = func(self.argOne[0], argTwo) - self.assertEqual(out.shape, self.tgtShape) + assert_equal(out.shape, self.tgtShape) out = func(self.argOne, argTwo[0]) - self.assertEqual(out.shape, self.tgtShape) + assert_equal(out.shape, self.tgtShape) # TODO: Uncomment once randint can broadcast arguments # def test_randint(self): -# itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, +# itype = [bool, np.int8, np.uint8, np.int16, np.uint16, # np.int32, np.uint32, np.int64, np.uint64] # func = np.random.randint # high = np.array([1]) @@ -1604,13 +1627,13 @@ for func in funcs: out = func(self.argOne, self.argTwo, self.argThree) - self.assertEqual(out.shape, self.tgtShape) + assert_equal(out.shape, self.tgtShape) out = func(self.argOne[0], self.argTwo, self.argThree) - self.assertEqual(out.shape, self.tgtShape) + assert_equal(out.shape, self.tgtShape) out = func(self.argOne, self.argTwo[0], self.argThree) - self.assertEqual(out.shape, self.tgtShape) + assert_equal(out.shape, self.tgtShape) if __name__ == "__main__": run_module_suite() diff -Nru python-numpy-1.13.3/numpy/random/tests/test_regression.py python-numpy-1.14.5/numpy/random/tests/test_regression.py --- python-numpy-1.13.3/numpy/random/tests/test_regression.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/random/tests/test_regression.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,14 +1,15 @@ from __future__ import division, absolute_import, print_function import sys -from numpy.testing import (TestCase, run_module_suite, assert_, - assert_array_equal, assert_raises) +from numpy.testing import ( + run_module_suite, assert_, assert_array_equal, assert_raises, + ) from numpy import random from numpy.compat import long import numpy as np -class TestRegression(TestCase): +class TestRegression(object): def test_VonMises_range(self): # Make sure generated random variables are in [-pi, pi]. diff -Nru python-numpy-1.13.3/numpy/testing/decorators.py python-numpy-1.14.5/numpy/testing/decorators.py --- python-numpy-1.13.3/numpy/testing/decorators.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/testing/decorators.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,265 +1,6 @@ """ -Decorators for labeling and modifying behavior of test objects. - -Decorators that merely return a modified version of the original -function object are straightforward. Decorators that return a new -function object need to use -:: - - nose.tools.make_decorator(original_function)(decorator) - -in returning the decorator, in order to preserve meta-data such as -function name, setup and teardown functions and so on - see -``nose.tools`` for more information. +Back compatibility decorators module. It will import the appropriate +set of tools """ -from __future__ import division, absolute_import, print_function - -import collections - -from .utils import SkipTest, assert_warns - - -def slow(t): - """ - Label a test as 'slow'. - - The exact definition of a slow test is obviously both subjective and - hardware-dependent, but in general any individual test that requires more - than a second or two should be labeled as slow (the whole suite consits of - thousands of tests, so even a second is significant). - - Parameters - ---------- - t : callable - The test to label as slow. - - Returns - ------- - t : callable - The decorated test `t`. - - Examples - -------- - The `numpy.testing` module includes ``import decorators as dec``. - A test can be decorated as slow like this:: - - from numpy.testing import * - - @dec.slow - def test_big(self): - print('Big, slow test') - - """ - - t.slow = True - return t - -def setastest(tf=True): - """ - Signals to nose that this function is or is not a test. - - Parameters - ---------- - tf : bool - If True, specifies that the decorated callable is a test. - If False, specifies that the decorated callable is not a test. - Default is True. - - Notes - ----- - This decorator can't use the nose namespace, because it can be - called from a non-test module. See also ``istest`` and ``nottest`` in - ``nose.tools``. - - Examples - -------- - `setastest` can be used in the following way:: - - from numpy.testing.decorators import setastest - - @setastest(False) - def func_with_test_in_name(arg1, arg2): - pass - - """ - def set_test(t): - t.__test__ = tf - return t - return set_test - -def skipif(skip_condition, msg=None): - """ - Make function raise SkipTest exception if a given condition is true. - - If the condition is a callable, it is used at runtime to dynamically - make the decision. This is useful for tests that may require costly - imports, to delay the cost until the test suite is actually executed. - - Parameters - ---------- - skip_condition : bool or callable - Flag to determine whether to skip the decorated test. - msg : str, optional - Message to give on raising a SkipTest exception. Default is None. - - Returns - ------- - decorator : function - Decorator which, when applied to a function, causes SkipTest - to be raised when `skip_condition` is True, and the function - to be called normally otherwise. - - Notes - ----- - The decorator itself is decorated with the ``nose.tools.make_decorator`` - function in order to transmit function name, and various other metadata. - - """ - - def skip_decorator(f): - # Local import to avoid a hard nose dependency and only incur the - # import time overhead at actual test-time. - import nose - - # Allow for both boolean or callable skip conditions. - if isinstance(skip_condition, collections.Callable): - skip_val = lambda: skip_condition() - else: - skip_val = lambda: skip_condition - - def get_msg(func,msg=None): - """Skip message with information about function being skipped.""" - if msg is None: - out = 'Test skipped due to test condition' - else: - out = msg - - return "Skipping test: %s: %s" % (func.__name__, out) - - # We need to define *two* skippers because Python doesn't allow both - # return with value and yield inside the same function. - def skipper_func(*args, **kwargs): - """Skipper for normal test functions.""" - if skip_val(): - raise SkipTest(get_msg(f, msg)) - else: - return f(*args, **kwargs) - - def skipper_gen(*args, **kwargs): - """Skipper for test generators.""" - if skip_val(): - raise SkipTest(get_msg(f, msg)) - else: - for x in f(*args, **kwargs): - yield x - - # Choose the right skipper to use when building the actual decorator. - if nose.util.isgenerator(f): - skipper = skipper_gen - else: - skipper = skipper_func - - return nose.tools.make_decorator(f)(skipper) - - return skip_decorator - - -def knownfailureif(fail_condition, msg=None): - """ - Make function raise KnownFailureException exception if given condition is true. - - If the condition is a callable, it is used at runtime to dynamically - make the decision. This is useful for tests that may require costly - imports, to delay the cost until the test suite is actually executed. - - Parameters - ---------- - fail_condition : bool or callable - Flag to determine whether to mark the decorated test as a known - failure (if True) or not (if False). - msg : str, optional - Message to give on raising a KnownFailureException exception. - Default is None. - - Returns - ------- - decorator : function - Decorator, which, when applied to a function, causes - KnownFailureException to be raised when `fail_condition` is True, - and the function to be called normally otherwise. - - Notes - ----- - The decorator itself is decorated with the ``nose.tools.make_decorator`` - function in order to transmit function name, and various other metadata. - - """ - if msg is None: - msg = 'Test skipped due to known failure' - - # Allow for both boolean or callable known failure conditions. - if isinstance(fail_condition, collections.Callable): - fail_val = lambda: fail_condition() - else: - fail_val = lambda: fail_condition - - def knownfail_decorator(f): - # Local import to avoid a hard nose dependency and only incur the - # import time overhead at actual test-time. - import nose - from .noseclasses import KnownFailureException - - def knownfailer(*args, **kwargs): - if fail_val(): - raise KnownFailureException(msg) - else: - return f(*args, **kwargs) - return nose.tools.make_decorator(f)(knownfailer) - - return knownfail_decorator - -def deprecated(conditional=True): - """ - Filter deprecation warnings while running the test suite. - - This decorator can be used to filter DeprecationWarning's, to avoid - printing them during the test suite run, while checking that the test - actually raises a DeprecationWarning. - - Parameters - ---------- - conditional : bool or callable, optional - Flag to determine whether to mark test as deprecated or not. If the - condition is a callable, it is used at runtime to dynamically make the - decision. Default is True. - - Returns - ------- - decorator : function - The `deprecated` decorator itself. - - Notes - ----- - .. versionadded:: 1.4.0 - - """ - def deprecate_decorator(f): - # Local import to avoid a hard nose dependency and only incur the - # import time overhead at actual test-time. - import nose - - def _deprecated_imp(*args, **kwargs): - # Poor man's replacement for the with statement - with assert_warns(DeprecationWarning): - f(*args, **kwargs) - - if isinstance(conditional, collections.Callable): - cond = conditional() - else: - cond = conditional - if cond: - return nose.tools.make_decorator(f)(_deprecated_imp) - else: - return f - return deprecate_decorator +from .nose_tools.decorators import * diff -Nru python-numpy-1.13.3/numpy/testing/__init__.py python-numpy-1.14.5/numpy/testing/__init__.py --- python-numpy-1.13.3/numpy/testing/__init__.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/testing/__init__.py 2018-06-12 18:28:52.000000000 +0000 @@ -10,6 +10,6 @@ from unittest import TestCase from . import decorators as dec -from .nosetester import run_module_suite, NoseTester as Tester +from .nosetester import run_module_suite, NoseTester as Tester, _numpy_tester from .utils import * -test = nosetester._numpy_tester().test +test = _numpy_tester().test diff -Nru python-numpy-1.13.3/numpy/testing/noseclasses.py python-numpy-1.14.5/numpy/testing/noseclasses.py --- python-numpy-1.13.3/numpy/testing/noseclasses.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/testing/noseclasses.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,340 +1,6 @@ -# These classes implement a doctest runner plugin for nose, a "known failure" -# error class, and a customized TestProgram for NumPy. +""" +Back compatibility noseclasses module. It will import the appropriate +set of tools -# Because this module imports nose directly, it should not -# be used except by nosetester.py to avoid a general NumPy -# dependency on nose. -from __future__ import division, absolute_import, print_function - -import os -import doctest -import inspect - -import nose -from nose.plugins import doctests as npd -from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin -from nose.plugins.base import Plugin -from nose.util import src -import numpy -from .nosetester import get_package_name -from .utils import KnownFailureException, KnownFailureTest - - -# Some of the classes in this module begin with 'Numpy' to clearly distinguish -# them from the plethora of very similar names from nose/unittest/doctest - -#----------------------------------------------------------------------------- -# Modified version of the one in the stdlib, that fixes a python bug (doctests -# not found in extension modules, http://bugs.python.org/issue3158) -class NumpyDocTestFinder(doctest.DocTestFinder): - - def _from_module(self, module, object): - """ - Return true if the given object is defined in the given - module. - """ - if module is None: - return True - elif inspect.isfunction(object): - return module.__dict__ is object.__globals__ - elif inspect.isbuiltin(object): - return module.__name__ == object.__module__ - elif inspect.isclass(object): - return module.__name__ == object.__module__ - elif inspect.ismethod(object): - # This one may be a bug in cython that fails to correctly set the - # __module__ attribute of methods, but since the same error is easy - # to make by extension code writers, having this safety in place - # isn't such a bad idea - return module.__name__ == object.__self__.__class__.__module__ - elif inspect.getmodule(object) is not None: - return module is inspect.getmodule(object) - elif hasattr(object, '__module__'): - return module.__name__ == object.__module__ - elif isinstance(object, property): - return True # [XX] no way not be sure. - else: - raise ValueError("object must be a class or function") - - def _find(self, tests, obj, name, module, source_lines, globs, seen): - """ - Find tests for the given object and any contained objects, and - add them to `tests`. - """ - - doctest.DocTestFinder._find(self, tests, obj, name, module, - source_lines, globs, seen) - - # Below we re-run pieces of the above method with manual modifications, - # because the original code is buggy and fails to correctly identify - # doctests in extension modules. - - # Local shorthands - from inspect import ( - isroutine, isclass, ismodule, isfunction, ismethod - ) - - # Look for tests in a module's contained objects. - if ismodule(obj) and self._recurse: - for valname, val in obj.__dict__.items(): - valname1 = '%s.%s' % (name, valname) - if ( (isroutine(val) or isclass(val)) - and self._from_module(module, val)): - - self._find(tests, val, valname1, module, source_lines, - globs, seen) - - # Look for tests in a class's contained objects. - if isclass(obj) and self._recurse: - for valname, val in obj.__dict__.items(): - # Special handling for staticmethod/classmethod. - if isinstance(val, staticmethod): - val = getattr(obj, valname) - if isinstance(val, classmethod): - val = getattr(obj, valname).__func__ - - # Recurse to methods, properties, and nested classes. - if ((isfunction(val) or isclass(val) or - ismethod(val) or isinstance(val, property)) and - self._from_module(module, val)): - valname = '%s.%s' % (name, valname) - self._find(tests, val, valname, module, source_lines, - globs, seen) - - -# second-chance checker; if the default comparison doesn't -# pass, then see if the expected output string contains flags that -# tell us to ignore the output -class NumpyOutputChecker(doctest.OutputChecker): - def check_output(self, want, got, optionflags): - ret = doctest.OutputChecker.check_output(self, want, got, - optionflags) - if not ret: - if "#random" in want: - return True - - # it would be useful to normalize endianness so that - # bigendian machines don't fail all the tests (and there are - # actually some bigendian examples in the doctests). Let's try - # making them all little endian - got = got.replace("'>", "'<") - want = want.replace("'>", "'<") - - # try to normalize out 32 and 64 bit default int sizes - for sz in [4, 8]: - got = got.replace("'>> np.testing.nosetester.get_package_name('nonsense') - 'numpy' - - """ - - fullpath = filepath[:] - pkg_name = [] - while 'site-packages' in filepath or 'dist-packages' in filepath: - filepath, p2 = os.path.split(filepath) - if p2 in ('site-packages', 'dist-packages'): - break - pkg_name.append(p2) - - # if package name determination failed, just default to numpy/scipy - if not pkg_name: - if 'scipy' in fullpath: - return 'scipy' - else: - return 'numpy' - - # otherwise, reverse to get correct order and return - pkg_name.reverse() - - # don't include the outer egg directory - if pkg_name[0].endswith('.egg'): - pkg_name.pop(0) - - return '.'.join(pkg_name) - - -def run_module_suite(file_to_run=None, argv=None): - """ - Run a test module. - - Equivalent to calling ``$ nosetests `` from - the command line - - Parameters - ---------- - file_to_run : str, optional - Path to test module, or None. - By default, run the module from which this function is called. - argv : list of strings - Arguments to be passed to the nose test runner. ``argv[0]`` is - ignored. All command line arguments accepted by ``nosetests`` - will work. If it is the default value None, sys.argv is used. - - .. versionadded:: 1.9.0 - - Examples - -------- - Adding the following:: - - if __name__ == "__main__" : - run_module_suite(argv=sys.argv) - - at the end of a test module will run the tests when that module is - called in the python interpreter. - - Alternatively, calling:: - - >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py") - - from an interpreter will run all the test routine in 'test_matlib.py'. - """ - if file_to_run is None: - f = sys._getframe(1) - file_to_run = f.f_locals.get('__file__', None) - if file_to_run is None: - raise AssertionError - - if argv is None: - argv = sys.argv + [file_to_run] - else: - argv = argv + [file_to_run] - - nose = import_nose() - from .noseclasses import KnownFailurePlugin - nose.run(argv=argv, addplugins=[KnownFailurePlugin()]) - - -class NoseTester(object): - """ - Nose test runner. - - This class is made available as numpy.testing.Tester, and a test function - is typically added to a package's __init__.py like so:: - - from numpy.testing import Tester - test = Tester().test - - Calling this test function finds and runs all tests associated with the - package and all its sub-packages. - - Attributes - ---------- - package_path : str - Full path to the package to test. - package_name : str - Name of the package to test. - - Parameters - ---------- - package : module, str or None, optional - The package to test. If a string, this should be the full path to - the package. If None (default), `package` is set to the module from - which `NoseTester` is initialized. - raise_warnings : None, str or sequence of warnings, optional - This specifies which warnings to configure as 'raise' instead - of being shown once during the test execution. Valid strings are: - - - "develop" : equals ``(Warning,)`` - - "release" : equals ``()``, don't raise on any warnings. - - Default is "release". - depth : int, optional - If `package` is None, then this can be used to initialize from the - module of the caller of (the caller of (...)) the code that - initializes `NoseTester`. Default of 0 means the module of the - immediate caller; higher values are useful for utility routines that - want to initialize `NoseTester` objects on behalf of other code. - - """ - def __init__(self, package=None, raise_warnings="release", depth=0): - # Back-compat: 'None' used to mean either "release" or "develop" - # depending on whether this was a release or develop version of - # numpy. Those semantics were fine for testing numpy, but not so - # helpful for downstream projects like scipy that use - # numpy.testing. (They want to set this based on whether *they* are a - # release or develop version, not whether numpy is.) So we continue to - # accept 'None' for back-compat, but it's now just an alias for the - # default "release". - if raise_warnings is None: - raise_warnings = "release" - - package_name = None - if package is None: - f = sys._getframe(1 + depth) - package_path = f.f_locals.get('__file__', None) - if package_path is None: - raise AssertionError - package_path = os.path.dirname(package_path) - package_name = f.f_locals.get('__name__', None) - elif isinstance(package, type(os)): - package_path = os.path.dirname(package.__file__) - package_name = getattr(package, '__name__', None) - else: - package_path = str(package) - - self.package_path = package_path - - # Find the package name under test; this name is used to limit coverage - # reporting (if enabled). - if package_name is None: - package_name = get_package_name(package_path) - self.package_name = package_name - - # Set to "release" in constructor in maintenance branches. - self.raise_warnings = raise_warnings - - def _test_argv(self, label, verbose, extra_argv): - ''' Generate argv for nosetest command - - Parameters - ---------- - label : {'fast', 'full', '', attribute identifier}, optional - see ``test`` docstring - verbose : int, optional - Verbosity value for test outputs, in the range 1-10. Default is 1. - extra_argv : list, optional - List with any extra arguments to pass to nosetests. - - Returns - ------- - argv : list - command line arguments that will be passed to nose - ''' - argv = [__file__, self.package_path, '-s'] - if label and label != 'full': - if not isinstance(label, basestring): - raise TypeError('Selection label should be a string') - if label == 'fast': - label = 'not slow' - argv += ['-A', label] - argv += ['--verbosity', str(verbose)] - - # When installing with setuptools, and also in some other cases, the - # test_*.py files end up marked +x executable. Nose, by default, does - # not run files marked with +x as they might be scripts. However, in - # our case nose only looks for test_*.py files under the package - # directory, which should be safe. - argv += ['--exe'] - - if extra_argv: - argv += extra_argv - return argv - - def _show_system_info(self): - nose = import_nose() - - import numpy - print("NumPy version %s" % numpy.__version__) - relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous - print("NumPy relaxed strides checking option:", relaxed_strides) - npdir = os.path.dirname(numpy.__file__) - print("NumPy is installed in %s" % npdir) - - if 'scipy' in self.package_name: - import scipy - print("SciPy version %s" % scipy.__version__) - spdir = os.path.dirname(scipy.__file__) - print("SciPy is installed in %s" % spdir) - - pyversion = sys.version.replace('\n', '') - print("Python version %s" % pyversion) - print("nose version %d.%d.%d" % nose.__versioninfo__) - - def _get_custom_doctester(self): - """ Return instantiated plugin for doctests - - Allows subclassing of this class to override doctester - - A return value of None means use the nose builtin doctest plugin - """ - from .noseclasses import NumpyDoctest - return NumpyDoctest() - - def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, - doctests=False, coverage=False): - """ - Run tests for module using nose. - - This method does the heavy lifting for the `test` method. It takes all - the same arguments, for details see `test`. - - See Also - -------- - test - - """ - # fail with nice error message if nose is not present - import_nose() - # compile argv - argv = self._test_argv(label, verbose, extra_argv) - # our way of doing coverage - if coverage: - argv += ['--cover-package=%s' % self.package_name, '--with-coverage', - '--cover-tests', '--cover-erase'] - # construct list of plugins - import nose.plugins.builtin - from .noseclasses import KnownFailurePlugin, Unplugger - plugins = [KnownFailurePlugin()] - plugins += [p() for p in nose.plugins.builtin.plugins] - # add doctesting if required - doctest_argv = '--with-doctest' in argv - if doctests == False and doctest_argv: - doctests = True - plug = self._get_custom_doctester() - if plug is None: - # use standard doctesting - if doctests and not doctest_argv: - argv += ['--with-doctest'] - else: # custom doctesting - if doctest_argv: # in fact the unplugger would take care of this - argv.remove('--with-doctest') - plugins += [Unplugger('doctest'), plug] - if doctests: - argv += ['--with-' + plug.name] - return argv, plugins - - def test(self, label='fast', verbose=1, extra_argv=None, - doctests=False, coverage=False, raise_warnings=None): - """ - Run tests for module using nose. - - Parameters - ---------- - label : {'fast', 'full', '', attribute identifier}, optional - Identifies the tests to run. This can be a string to pass to - the nosetests executable with the '-A' option, or one of several - special values. Special values are: - * 'fast' - the default - which corresponds to the ``nosetests -A`` - option of 'not slow'. - * 'full' - fast (as above) and slow tests as in the - 'no -A' option to nosetests - this is the same as ''. - * None or '' - run all tests. - attribute_identifier - string passed directly to nosetests as '-A'. - verbose : int, optional - Verbosity value for test outputs, in the range 1-10. Default is 1. - extra_argv : list, optional - List with any extra arguments to pass to nosetests. - doctests : bool, optional - If True, run doctests in module. Default is False. - coverage : bool, optional - If True, report coverage of NumPy code. Default is False. - (This requires the `coverage module: - `_). - raise_warnings : None, str or sequence of warnings, optional - This specifies which warnings to configure as 'raise' instead - of being shown once during the test execution. Valid strings are: - - - "develop" : equals ``(Warning,)`` - - "release" : equals ``()``, don't raise on any warnings. - - The default is to use the class initialization value. - - Returns - ------- - result : object - Returns the result of running the tests as a - ``nose.result.TextTestResult`` object. - - Notes - ----- - Each NumPy module exposes `test` in its namespace to run all tests for it. - For example, to run all tests for numpy.lib: - - >>> np.lib.test() #doctest: +SKIP - - Examples - -------- - >>> result = np.lib.test() #doctest: +SKIP - Running unit tests for numpy.lib - ... - Ran 976 tests in 3.933s - - OK - - >>> result.errors #doctest: +SKIP - [] - >>> result.knownfail #doctest: +SKIP - [] - """ - - # cap verbosity at 3 because nose becomes *very* verbose beyond that - verbose = min(verbose, 3) - - from . import utils - utils.verbose = verbose - - if doctests: - print("Running unit tests and doctests for %s" % self.package_name) - else: - print("Running unit tests for %s" % self.package_name) - - self._show_system_info() - - # reset doctest state on every run - import doctest - doctest.master = None - - if raise_warnings is None: - raise_warnings = self.raise_warnings - - _warn_opts = dict(develop=(Warning,), - release=()) - if isinstance(raise_warnings, basestring): - raise_warnings = _warn_opts[raise_warnings] - - with suppress_warnings("location") as sup: - # Reset the warning filters to the default state, - # so that running the tests is more repeatable. - warnings.resetwarnings() - # Set all warnings to 'warn', this is because the default 'once' - # has the bad property of possibly shadowing later warnings. - warnings.filterwarnings('always') - # Force the requested warnings to raise - for warningtype in raise_warnings: - warnings.filterwarnings('error', category=warningtype) - # Filter out annoying import messages. - sup.filter(message='Not importing directory') - sup.filter(message="numpy.dtype size changed") - sup.filter(message="numpy.ufunc size changed") - sup.filter(category=np.ModuleDeprecationWarning) - # Filter out boolean '-' deprecation messages. This allows - # older versions of scipy to test without a flood of messages. - sup.filter(message=".*boolean negative.*") - sup.filter(message=".*boolean subtract.*") - # Filter out distutils cpu warnings (could be localized to - # distutils tests). ASV has problems with top level import, - # so fetch module for suppression here. - with warnings.catch_warnings(): - warnings.simplefilter("always") - from ..distutils import cpuinfo - sup.filter(category=UserWarning, module=cpuinfo) - # See #7949: Filter out deprecation warnings due to the -3 flag to - # python 2 - if sys.version_info.major == 2 and sys.py3kwarning: - # This is very specific, so using the fragile module filter - # is fine - import threading - sup.filter(DeprecationWarning, - r"sys\.exc_clear\(\) not supported in 3\.x", - module=threading) - sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__") - sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__") - sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x") - sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x") - sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x") - # Filter out some deprecation warnings inside nose 1.3.7 when run - # on python 3.5b2. See - # https://github.com/nose-devs/nose/issues/929 - # Note: it is hard to filter based on module for sup (lineno could - # be implemented). - warnings.filterwarnings("ignore", message=".*getargspec.*", - category=DeprecationWarning, - module=r"nose\.") - - from .noseclasses import NumpyTestProgram - - argv, plugins = self.prepare_test_args( - label, verbose, extra_argv, doctests, coverage) - - t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins) - - return t.result - - def bench(self, label='fast', verbose=1, extra_argv=None): - """ - Run benchmarks for module using nose. - - Parameters - ---------- - label : {'fast', 'full', '', attribute identifier}, optional - Identifies the benchmarks to run. This can be a string to pass to - the nosetests executable with the '-A' option, or one of several - special values. Special values are: - * 'fast' - the default - which corresponds to the ``nosetests -A`` - option of 'not slow'. - * 'full' - fast (as above) and slow benchmarks as in the - 'no -A' option to nosetests - this is the same as ''. - * None or '' - run all tests. - attribute_identifier - string passed directly to nosetests as '-A'. - verbose : int, optional - Verbosity value for benchmark outputs, in the range 1-10. Default is 1. - extra_argv : list, optional - List with any extra arguments to pass to nosetests. - - Returns - ------- - success : bool - Returns True if running the benchmarks works, False if an error - occurred. - - Notes - ----- - Benchmarks are like tests, but have names starting with "bench" instead - of "test", and can be found under the "benchmarks" sub-directory of the - module. - - Each NumPy module exposes `bench` in its namespace to run all benchmarks - for it. - - Examples - -------- - >>> success = np.lib.bench() #doctest: +SKIP - Running benchmarks for numpy.lib - ... - using 562341 items: - unique: - 0.11 - unique1d: - 0.11 - ratio: 1.0 - nUnique: 56230 == 56230 - ... - OK - - >>> success #doctest: +SKIP - True - - """ - - print("Running benchmarks for %s" % self.package_name) - self._show_system_info() - - argv = self._test_argv(label, verbose, extra_argv) - argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] - - # import nose or make informative error - nose = import_nose() - - # get plugin to disable doctests - from .noseclasses import Unplugger - add_plugins = [Unplugger('doctest')] - - return nose.run(argv=argv, addplugins=add_plugins) - +from .nose_tools.nosetester import * -def _numpy_tester(): - if hasattr(np, "__version__") and ".dev0" in np.__version__: - mode = "develop" - else: - mode = "release" - return NoseTester(raise_warnings=mode, depth=1) +__all__ = ['get_package_name', 'run_module_suite', 'NoseTester', + '_numpy_tester', 'get_package_name', 'import_nose', + 'suppress_warnings'] diff -Nru python-numpy-1.13.3/numpy/testing/nose_tools/decorators.py python-numpy-1.14.5/numpy/testing/nose_tools/decorators.py --- python-numpy-1.13.3/numpy/testing/nose_tools/decorators.py 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/numpy/testing/nose_tools/decorators.py 2018-06-12 18:28:52.000000000 +0000 @@ -0,0 +1,282 @@ +""" +Decorators for labeling and modifying behavior of test objects. + +Decorators that merely return a modified version of the original +function object are straightforward. Decorators that return a new +function object need to use +:: + + nose.tools.make_decorator(original_function)(decorator) + +in returning the decorator, in order to preserve meta-data such as +function name, setup and teardown functions and so on - see +``nose.tools`` for more information. + +""" +from __future__ import division, absolute_import, print_function + +import collections + +from .utils import SkipTest, assert_warns + + +def slow(t): + """ + Label a test as 'slow'. + + The exact definition of a slow test is obviously both subjective and + hardware-dependent, but in general any individual test that requires more + than a second or two should be labeled as slow (the whole suite consits of + thousands of tests, so even a second is significant). + + Parameters + ---------- + t : callable + The test to label as slow. + + Returns + ------- + t : callable + The decorated test `t`. + + Examples + -------- + The `numpy.testing` module includes ``import decorators as dec``. + A test can be decorated as slow like this:: + + from numpy.testing import * + + @dec.slow + def test_big(self): + print('Big, slow test') + + """ + + t.slow = True + return t + +def setastest(tf=True): + """ + Signals to nose that this function is or is not a test. + + Parameters + ---------- + tf : bool + If True, specifies that the decorated callable is a test. + If False, specifies that the decorated callable is not a test. + Default is True. + + Notes + ----- + This decorator can't use the nose namespace, because it can be + called from a non-test module. See also ``istest`` and ``nottest`` in + ``nose.tools``. + + Examples + -------- + `setastest` can be used in the following way:: + + from numpy.testing import dec + + @dec.setastest(False) + def func_with_test_in_name(arg1, arg2): + pass + + """ + def set_test(t): + t.__test__ = tf + return t + return set_test + +def skipif(skip_condition, msg=None): + """ + Make function raise SkipTest exception if a given condition is true. + + If the condition is a callable, it is used at runtime to dynamically + make the decision. This is useful for tests that may require costly + imports, to delay the cost until the test suite is actually executed. + + Parameters + ---------- + skip_condition : bool or callable + Flag to determine whether to skip the decorated test. + msg : str, optional + Message to give on raising a SkipTest exception. Default is None. + + Returns + ------- + decorator : function + Decorator which, when applied to a function, causes SkipTest + to be raised when `skip_condition` is True, and the function + to be called normally otherwise. + + Notes + ----- + The decorator itself is decorated with the ``nose.tools.make_decorator`` + function in order to transmit function name, and various other metadata. + + """ + + def skip_decorator(f): + # Local import to avoid a hard nose dependency and only incur the + # import time overhead at actual test-time. + import nose + + # Allow for both boolean or callable skip conditions. + if isinstance(skip_condition, collections.Callable): + skip_val = lambda: skip_condition() + else: + skip_val = lambda: skip_condition + + def get_msg(func,msg=None): + """Skip message with information about function being skipped.""" + if msg is None: + out = 'Test skipped due to test condition' + else: + out = msg + + return "Skipping test: %s: %s" % (func.__name__, out) + + # We need to define *two* skippers because Python doesn't allow both + # return with value and yield inside the same function. + def skipper_func(*args, **kwargs): + """Skipper for normal test functions.""" + if skip_val(): + raise SkipTest(get_msg(f, msg)) + else: + return f(*args, **kwargs) + + def skipper_gen(*args, **kwargs): + """Skipper for test generators.""" + if skip_val(): + raise SkipTest(get_msg(f, msg)) + else: + for x in f(*args, **kwargs): + yield x + + # Choose the right skipper to use when building the actual decorator. + if nose.util.isgenerator(f): + skipper = skipper_gen + else: + skipper = skipper_func + + return nose.tools.make_decorator(f)(skipper) + + return skip_decorator + + +def knownfailureif(fail_condition, msg=None): + """ + Make function raise KnownFailureException exception if given condition is true. + + If the condition is a callable, it is used at runtime to dynamically + make the decision. This is useful for tests that may require costly + imports, to delay the cost until the test suite is actually executed. + + Parameters + ---------- + fail_condition : bool or callable + Flag to determine whether to mark the decorated test as a known + failure (if True) or not (if False). + msg : str, optional + Message to give on raising a KnownFailureException exception. + Default is None. + + Returns + ------- + decorator : function + Decorator, which, when applied to a function, causes + KnownFailureException to be raised when `fail_condition` is True, + and the function to be called normally otherwise. + + Notes + ----- + The decorator itself is decorated with the ``nose.tools.make_decorator`` + function in order to transmit function name, and various other metadata. + + """ + if msg is None: + msg = 'Test skipped due to known failure' + + # Allow for both boolean or callable known failure conditions. + if isinstance(fail_condition, collections.Callable): + fail_val = lambda: fail_condition() + else: + fail_val = lambda: fail_condition + + def knownfail_decorator(f): + # Local import to avoid a hard nose dependency and only incur the + # import time overhead at actual test-time. + import nose + from .noseclasses import KnownFailureException + + def knownfailer(*args, **kwargs): + if fail_val(): + raise KnownFailureException(msg) + else: + return f(*args, **kwargs) + return nose.tools.make_decorator(f)(knownfailer) + + return knownfail_decorator + +def deprecated(conditional=True): + """ + Filter deprecation warnings while running the test suite. + + This decorator can be used to filter DeprecationWarning's, to avoid + printing them during the test suite run, while checking that the test + actually raises a DeprecationWarning. + + Parameters + ---------- + conditional : bool or callable, optional + Flag to determine whether to mark test as deprecated or not. If the + condition is a callable, it is used at runtime to dynamically make the + decision. Default is True. + + Returns + ------- + decorator : function + The `deprecated` decorator itself. + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + def deprecate_decorator(f): + # Local import to avoid a hard nose dependency and only incur the + # import time overhead at actual test-time. + import nose + + def _deprecated_imp(*args, **kwargs): + # Poor man's replacement for the with statement + with assert_warns(DeprecationWarning): + f(*args, **kwargs) + + if isinstance(conditional, collections.Callable): + cond = conditional() + else: + cond = conditional + if cond: + return nose.tools.make_decorator(f)(_deprecated_imp) + else: + return f + return deprecate_decorator + + +def parametrize(vars, input): + """ + Pytest compatibility class. This implements the simplest level of + pytest.mark.parametrize for use in nose as an aid in making the transition + to pytest. It achieves that by adding a dummy var parameter and ignoring + the doc_func parameter of the base class. It does not support variable + substitution by name, nor does it support nesting or classes. See the + pytest documentation for usage. + + .. versionadded:: 1.14.0 + + """ + from .parameterized import parameterized + + return parameterized(input) diff -Nru python-numpy-1.13.3/numpy/testing/nose_tools/noseclasses.py python-numpy-1.14.5/numpy/testing/nose_tools/noseclasses.py --- python-numpy-1.13.3/numpy/testing/nose_tools/noseclasses.py 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/numpy/testing/nose_tools/noseclasses.py 2018-06-12 18:28:52.000000000 +0000 @@ -0,0 +1,366 @@ +# These classes implement a doctest runner plugin for nose, a "known failure" +# error class, and a customized TestProgram for NumPy. + +# Because this module imports nose directly, it should not +# be used except by nosetester.py to avoid a general NumPy +# dependency on nose. +from __future__ import division, absolute_import, print_function + +import os +import sys +import doctest +import inspect + +import numpy +import nose +from nose.plugins import doctests as npd +from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin +from nose.plugins.base import Plugin +from nose.util import src +from .nosetester import get_package_name +from .utils import KnownFailureException, KnownFailureTest + + +# Some of the classes in this module begin with 'Numpy' to clearly distinguish +# them from the plethora of very similar names from nose/unittest/doctest + +#----------------------------------------------------------------------------- +# Modified version of the one in the stdlib, that fixes a python bug (doctests +# not found in extension modules, http://bugs.python.org/issue3158) +class NumpyDocTestFinder(doctest.DocTestFinder): + + def _from_module(self, module, object): + """ + Return true if the given object is defined in the given + module. + """ + if module is None: + return True + elif inspect.isfunction(object): + return module.__dict__ is object.__globals__ + elif inspect.isbuiltin(object): + return module.__name__ == object.__module__ + elif inspect.isclass(object): + return module.__name__ == object.__module__ + elif inspect.ismethod(object): + # This one may be a bug in cython that fails to correctly set the + # __module__ attribute of methods, but since the same error is easy + # to make by extension code writers, having this safety in place + # isn't such a bad idea + return module.__name__ == object.__self__.__class__.__module__ + elif inspect.getmodule(object) is not None: + return module is inspect.getmodule(object) + elif hasattr(object, '__module__'): + return module.__name__ == object.__module__ + elif isinstance(object, property): + return True # [XX] no way not be sure. + else: + raise ValueError("object must be a class or function") + + def _find(self, tests, obj, name, module, source_lines, globs, seen): + """ + Find tests for the given object and any contained objects, and + add them to `tests`. + """ + + doctest.DocTestFinder._find(self, tests, obj, name, module, + source_lines, globs, seen) + + # Below we re-run pieces of the above method with manual modifications, + # because the original code is buggy and fails to correctly identify + # doctests in extension modules. + + # Local shorthands + from inspect import ( + isroutine, isclass, ismodule, isfunction, ismethod + ) + + # Look for tests in a module's contained objects. + if ismodule(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + valname1 = '%s.%s' % (name, valname) + if ( (isroutine(val) or isclass(val)) + and self._from_module(module, val)): + + self._find(tests, val, valname1, module, source_lines, + globs, seen) + + # Look for tests in a class's contained objects. + if isclass(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + # Special handling for staticmethod/classmethod. + if isinstance(val, staticmethod): + val = getattr(obj, valname) + if isinstance(val, classmethod): + val = getattr(obj, valname).__func__ + + # Recurse to methods, properties, and nested classes. + if ((isfunction(val) or isclass(val) or + ismethod(val) or isinstance(val, property)) and + self._from_module(module, val)): + valname = '%s.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + +# second-chance checker; if the default comparison doesn't +# pass, then see if the expected output string contains flags that +# tell us to ignore the output +class NumpyOutputChecker(doctest.OutputChecker): + def check_output(self, want, got, optionflags): + ret = doctest.OutputChecker.check_output(self, want, got, + optionflags) + if not ret: + if "#random" in want: + return True + + # it would be useful to normalize endianness so that + # bigendian machines don't fail all the tests (and there are + # actually some bigendian examples in the doctests). Let's try + # making them all little endian + got = got.replace("'>", "'<") + want = want.replace("'>", "'<") + + # try to normalize out 32 and 64 bit default int sizes + for sz in [4, 8]: + got = got.replace("'>> np.testing.nosetester.get_package_name('nonsense') + 'numpy' + + """ + + fullpath = filepath[:] + pkg_name = [] + while 'site-packages' in filepath or 'dist-packages' in filepath: + filepath, p2 = os.path.split(filepath) + if p2 in ('site-packages', 'dist-packages'): + break + pkg_name.append(p2) + + # if package name determination failed, just default to numpy/scipy + if not pkg_name: + if 'scipy' in fullpath: + return 'scipy' + else: + return 'numpy' + + # otherwise, reverse to get correct order and return + pkg_name.reverse() + + # don't include the outer egg directory + if pkg_name[0].endswith('.egg'): + pkg_name.pop(0) + + return '.'.join(pkg_name) + + +def run_module_suite(file_to_run=None, argv=None): + """ + Run a test module. + + Equivalent to calling ``$ nosetests `` from + the command line + + Parameters + ---------- + file_to_run : str, optional + Path to test module, or None. + By default, run the module from which this function is called. + argv : list of strings + Arguments to be passed to the nose test runner. ``argv[0]`` is + ignored. All command line arguments accepted by ``nosetests`` + will work. If it is the default value None, sys.argv is used. + + .. versionadded:: 1.9.0 + + Examples + -------- + Adding the following:: + + if __name__ == "__main__" : + run_module_suite(argv=sys.argv) + + at the end of a test module will run the tests when that module is + called in the python interpreter. + + Alternatively, calling:: + + >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py") + + from an interpreter will run all the test routine in 'test_matlib.py'. + """ + if file_to_run is None: + f = sys._getframe(1) + file_to_run = f.f_locals.get('__file__', None) + if file_to_run is None: + raise AssertionError + + if argv is None: + argv = sys.argv + [file_to_run] + else: + argv = argv + [file_to_run] + + nose = import_nose() + from .noseclasses import KnownFailurePlugin + nose.run(argv=argv, addplugins=[KnownFailurePlugin()]) + + +class NoseTester(object): + """ + Nose test runner. + + This class is made available as numpy.testing.Tester, and a test function + is typically added to a package's __init__.py like so:: + + from numpy.testing import Tester + test = Tester().test + + Calling this test function finds and runs all tests associated with the + package and all its sub-packages. + + Attributes + ---------- + package_path : str + Full path to the package to test. + package_name : str + Name of the package to test. + + Parameters + ---------- + package : module, str or None, optional + The package to test. If a string, this should be the full path to + the package. If None (default), `package` is set to the module from + which `NoseTester` is initialized. + raise_warnings : None, str or sequence of warnings, optional + This specifies which warnings to configure as 'raise' instead + of being shown once during the test execution. Valid strings are: + + - "develop" : equals ``(Warning,)`` + - "release" : equals ``()``, don't raise on any warnings. + + Default is "release". + depth : int, optional + If `package` is None, then this can be used to initialize from the + module of the caller of (the caller of (...)) the code that + initializes `NoseTester`. Default of 0 means the module of the + immediate caller; higher values are useful for utility routines that + want to initialize `NoseTester` objects on behalf of other code. + + """ + def __init__(self, package=None, raise_warnings="release", depth=0, + check_fpu_mode=False): + # Back-compat: 'None' used to mean either "release" or "develop" + # depending on whether this was a release or develop version of + # numpy. Those semantics were fine for testing numpy, but not so + # helpful for downstream projects like scipy that use + # numpy.testing. (They want to set this based on whether *they* are a + # release or develop version, not whether numpy is.) So we continue to + # accept 'None' for back-compat, but it's now just an alias for the + # default "release". + if raise_warnings is None: + raise_warnings = "release" + + package_name = None + if package is None: + f = sys._getframe(1 + depth) + package_path = f.f_locals.get('__file__', None) + if package_path is None: + raise AssertionError + package_path = os.path.dirname(package_path) + package_name = f.f_locals.get('__name__', None) + elif isinstance(package, type(os)): + package_path = os.path.dirname(package.__file__) + package_name = getattr(package, '__name__', None) + else: + package_path = str(package) + + self.package_path = package_path + + # Find the package name under test; this name is used to limit coverage + # reporting (if enabled). + if package_name is None: + package_name = get_package_name(package_path) + self.package_name = package_name + + # Set to "release" in constructor in maintenance branches. + self.raise_warnings = raise_warnings + + # Whether to check for FPU mode changes + self.check_fpu_mode = check_fpu_mode + + def _test_argv(self, label, verbose, extra_argv): + ''' Generate argv for nosetest command + + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + see ``test`` docstring + verbose : int, optional + Verbosity value for test outputs, in the range 1-10. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + + Returns + ------- + argv : list + command line arguments that will be passed to nose + ''' + argv = [__file__, self.package_path, '-s'] + if label and label != 'full': + if not isinstance(label, basestring): + raise TypeError('Selection label should be a string') + if label == 'fast': + label = 'not slow' + argv += ['-A', label] + argv += ['--verbosity', str(verbose)] + + # When installing with setuptools, and also in some other cases, the + # test_*.py files end up marked +x executable. Nose, by default, does + # not run files marked with +x as they might be scripts. However, in + # our case nose only looks for test_*.py files under the package + # directory, which should be safe. + argv += ['--exe'] + + if extra_argv: + argv += extra_argv + return argv + + def _show_system_info(self): + nose = import_nose() + + import numpy + print("NumPy version %s" % numpy.__version__) + relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous + print("NumPy relaxed strides checking option:", relaxed_strides) + npdir = os.path.dirname(numpy.__file__) + print("NumPy is installed in %s" % npdir) + + if 'scipy' in self.package_name: + import scipy + print("SciPy version %s" % scipy.__version__) + spdir = os.path.dirname(scipy.__file__) + print("SciPy is installed in %s" % spdir) + + pyversion = sys.version.replace('\n', '') + print("Python version %s" % pyversion) + print("nose version %d.%d.%d" % nose.__versioninfo__) + + def _get_custom_doctester(self): + """ Return instantiated plugin for doctests + + Allows subclassing of this class to override doctester + + A return value of None means use the nose builtin doctest plugin + """ + from .noseclasses import NumpyDoctest + return NumpyDoctest() + + def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, timer=False): + """ + Run tests for module using nose. + + This method does the heavy lifting for the `test` method. It takes all + the same arguments, for details see `test`. + + See Also + -------- + test + + """ + # fail with nice error message if nose is not present + import_nose() + # compile argv + argv = self._test_argv(label, verbose, extra_argv) + # our way of doing coverage + if coverage: + argv += ['--cover-package=%s' % self.package_name, '--with-coverage', + '--cover-tests', '--cover-erase'] + + if timer: + if timer is True: + argv += ['--with-timer'] + elif isinstance(timer, int): + argv += ['--with-timer', '--timer-top-n', str(timer)] + + # construct list of plugins + import nose.plugins.builtin + from nose.plugins import EntryPointPluginManager + from .noseclasses import (KnownFailurePlugin, Unplugger, + FPUModeCheckPlugin) + plugins = [KnownFailurePlugin()] + plugins += [p() for p in nose.plugins.builtin.plugins] + if self.check_fpu_mode: + plugins += [FPUModeCheckPlugin()] + argv += ["--with-fpumodecheckplugin"] + try: + # External plugins (like nose-timer) + entrypoint_manager = EntryPointPluginManager() + entrypoint_manager.loadPlugins() + plugins += [p for p in entrypoint_manager.plugins] + except ImportError: + # Relies on pkg_resources, not a hard dependency + pass + + # add doctesting if required + doctest_argv = '--with-doctest' in argv + if doctests == False and doctest_argv: + doctests = True + plug = self._get_custom_doctester() + if plug is None: + # use standard doctesting + if doctests and not doctest_argv: + argv += ['--with-doctest'] + else: # custom doctesting + if doctest_argv: # in fact the unplugger would take care of this + argv.remove('--with-doctest') + plugins += [Unplugger('doctest'), plug] + if doctests: + argv += ['--with-' + plug.name] + return argv, plugins + + def test(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, raise_warnings=None, + timer=False): + """ + Run tests for module using nose. + + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + Identifies the tests to run. This can be a string to pass to + the nosetests executable with the '-A' option, or one of several + special values. Special values are: + * 'fast' - the default - which corresponds to the ``nosetests -A`` + option of 'not slow'. + * 'full' - fast (as above) and slow tests as in the + 'no -A' option to nosetests - this is the same as ''. + * None or '' - run all tests. + attribute_identifier - string passed directly to nosetests as '-A'. + verbose : int, optional + Verbosity value for test outputs, in the range 1-10. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + doctests : bool, optional + If True, run doctests in module. Default is False. + coverage : bool, optional + If True, report coverage of NumPy code. Default is False. + (This requires the `coverage module: + `_). + raise_warnings : None, str or sequence of warnings, optional + This specifies which warnings to configure as 'raise' instead + of being shown once during the test execution. Valid strings are: + + - "develop" : equals ``(Warning,)`` + - "release" : equals ``()``, don't raise on any warnings. + + The default is to use the class initialization value. + timer : bool or int, optional + Timing of individual tests with ``nose-timer`` (which needs to be + installed). If True, time tests and report on all of them. + If an integer (say ``N``), report timing results for ``N`` slowest + tests. + + Returns + ------- + result : object + Returns the result of running the tests as a + ``nose.result.TextTestResult`` object. + + Notes + ----- + Each NumPy module exposes `test` in its namespace to run all tests for it. + For example, to run all tests for numpy.lib: + + >>> np.lib.test() #doctest: +SKIP + + Examples + -------- + >>> result = np.lib.test() #doctest: +SKIP + Running unit tests for numpy.lib + ... + Ran 976 tests in 3.933s + + OK + + >>> result.errors #doctest: +SKIP + [] + >>> result.knownfail #doctest: +SKIP + [] + """ + + # cap verbosity at 3 because nose becomes *very* verbose beyond that + verbose = min(verbose, 3) + + from . import utils + utils.verbose = verbose + + argv, plugins = self.prepare_test_args( + label, verbose, extra_argv, doctests, coverage, timer) + + if doctests: + print("Running unit tests and doctests for %s" % self.package_name) + else: + print("Running unit tests for %s" % self.package_name) + + self._show_system_info() + + # reset doctest state on every run + import doctest + doctest.master = None + + if raise_warnings is None: + raise_warnings = self.raise_warnings + + _warn_opts = dict(develop=(Warning,), + release=()) + if isinstance(raise_warnings, basestring): + raise_warnings = _warn_opts[raise_warnings] + + with suppress_warnings("location") as sup: + # Reset the warning filters to the default state, + # so that running the tests is more repeatable. + warnings.resetwarnings() + # Set all warnings to 'warn', this is because the default 'once' + # has the bad property of possibly shadowing later warnings. + warnings.filterwarnings('always') + # Force the requested warnings to raise + for warningtype in raise_warnings: + warnings.filterwarnings('error', category=warningtype) + # Filter out annoying import messages. + sup.filter(message='Not importing directory') + sup.filter(message="numpy.dtype size changed") + sup.filter(message="numpy.ufunc size changed") + sup.filter(category=np.ModuleDeprecationWarning) + # Filter out boolean '-' deprecation messages. This allows + # older versions of scipy to test without a flood of messages. + sup.filter(message=".*boolean negative.*") + sup.filter(message=".*boolean subtract.*") + # Filter out distutils cpu warnings (could be localized to + # distutils tests). ASV has problems with top level import, + # so fetch module for suppression here. + with warnings.catch_warnings(): + warnings.simplefilter("always") + from ...distutils import cpuinfo + sup.filter(category=UserWarning, module=cpuinfo) + # See #7949: Filter out deprecation warnings due to the -3 flag to + # python 2 + if sys.version_info.major == 2 and sys.py3kwarning: + # This is very specific, so using the fragile module filter + # is fine + import threading + sup.filter(DeprecationWarning, + r"sys\.exc_clear\(\) not supported in 3\.x", + module=threading) + sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__") + sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__") + sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x") + sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x") + sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x") + # Filter out some deprecation warnings inside nose 1.3.7 when run + # on python 3.5b2. See + # https://github.com/nose-devs/nose/issues/929 + # Note: it is hard to filter based on module for sup (lineno could + # be implemented). + warnings.filterwarnings("ignore", message=".*getargspec.*", + category=DeprecationWarning, + module=r"nose\.") + + from .noseclasses import NumpyTestProgram + + t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins) + + return t.result + + def bench(self, label='fast', verbose=1, extra_argv=None): + """ + Run benchmarks for module using nose. + + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + Identifies the benchmarks to run. This can be a string to pass to + the nosetests executable with the '-A' option, or one of several + special values. Special values are: + * 'fast' - the default - which corresponds to the ``nosetests -A`` + option of 'not slow'. + * 'full' - fast (as above) and slow benchmarks as in the + 'no -A' option to nosetests - this is the same as ''. + * None or '' - run all tests. + attribute_identifier - string passed directly to nosetests as '-A'. + verbose : int, optional + Verbosity value for benchmark outputs, in the range 1-10. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + + Returns + ------- + success : bool + Returns True if running the benchmarks works, False if an error + occurred. + + Notes + ----- + Benchmarks are like tests, but have names starting with "bench" instead + of "test", and can be found under the "benchmarks" sub-directory of the + module. + + Each NumPy module exposes `bench` in its namespace to run all benchmarks + for it. + + Examples + -------- + >>> success = np.lib.bench() #doctest: +SKIP + Running benchmarks for numpy.lib + ... + using 562341 items: + unique: + 0.11 + unique1d: + 0.11 + ratio: 1.0 + nUnique: 56230 == 56230 + ... + OK + + >>> success #doctest: +SKIP + True + + """ + + print("Running benchmarks for %s" % self.package_name) + self._show_system_info() + + argv = self._test_argv(label, verbose, extra_argv) + argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] + + # import nose or make informative error + nose = import_nose() + + # get plugin to disable doctests + from .noseclasses import Unplugger + add_plugins = [Unplugger('doctest')] + + return nose.run(argv=argv, addplugins=add_plugins) + + +def _numpy_tester(): + if hasattr(np, "__version__") and ".dev0" in np.__version__: + mode = "develop" + else: + mode = "release" + return NoseTester(raise_warnings=mode, depth=1, + check_fpu_mode=True) diff -Nru python-numpy-1.13.3/numpy/testing/nose_tools/parameterized.py python-numpy-1.14.5/numpy/testing/nose_tools/parameterized.py --- python-numpy-1.13.3/numpy/testing/nose_tools/parameterized.py 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/numpy/testing/nose_tools/parameterized.py 2018-06-12 18:28:52.000000000 +0000 @@ -0,0 +1,489 @@ +""" +tl;dr: all code code is licensed under simplified BSD, unless stated otherwise. + +Unless stated otherwise in the source files, all code is copyright 2010 David +Wolever . All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation are those +of the authors and should not be interpreted as representing official policies, +either expressed or implied, of David Wolever. + +""" +import re +import sys +import inspect +import warnings +from functools import wraps +from types import MethodType as MethodType +from collections import namedtuple + +try: + from collections import OrderedDict as MaybeOrderedDict +except ImportError: + MaybeOrderedDict = dict + +from unittest import TestCase + +PY3 = sys.version_info[0] == 3 +PY2 = sys.version_info[0] == 2 + + +if PY3: + # Python 3 doesn't have an InstanceType, so just use a dummy type. + class InstanceType(): + pass + lzip = lambda *a: list(zip(*a)) + text_type = str + string_types = str, + bytes_type = bytes + def make_method(func, instance, type): + if instance is None: + return func + return MethodType(func, instance) +else: + from types import InstanceType + lzip = zip + text_type = unicode + bytes_type = str + string_types = basestring, + def make_method(func, instance, type): + return MethodType(func, instance, type) + +_param = namedtuple("param", "args kwargs") + +class param(_param): + """ Represents a single parameter to a test case. + + For example:: + + >>> p = param("foo", bar=16) + >>> p + param("foo", bar=16) + >>> p.args + ('foo', ) + >>> p.kwargs + {'bar': 16} + + Intended to be used as an argument to ``@parameterized``:: + + @parameterized([ + param("foo", bar=16), + ]) + def test_stuff(foo, bar=16): + pass + """ + + def __new__(cls, *args , **kwargs): + return _param.__new__(cls, args, kwargs) + + @classmethod + def explicit(cls, args=None, kwargs=None): + """ Creates a ``param`` by explicitly specifying ``args`` and + ``kwargs``:: + + >>> param.explicit([1,2,3]) + param(*(1, 2, 3)) + >>> param.explicit(kwargs={"foo": 42}) + param(*(), **{"foo": "42"}) + """ + args = args or () + kwargs = kwargs or {} + return cls(*args, **kwargs) + + @classmethod + def from_decorator(cls, args): + """ Returns an instance of ``param()`` for ``@parameterized`` argument + ``args``:: + + >>> param.from_decorator((42, )) + param(args=(42, ), kwargs={}) + >>> param.from_decorator("foo") + param(args=("foo", ), kwargs={}) + """ + if isinstance(args, param): + return args + elif isinstance(args, string_types): + args = (args, ) + try: + return cls(*args) + except TypeError as e: + if "after * must be" not in str(e): + raise + raise TypeError( + "Parameters must be tuples, but %r is not (hint: use '(%r, )')" + %(args, args), + ) + + def __repr__(self): + return "param(*%r, **%r)" %self + + +class QuietOrderedDict(MaybeOrderedDict): + """ When OrderedDict is available, use it to make sure that the kwargs in + doc strings are consistently ordered. """ + __str__ = dict.__str__ + __repr__ = dict.__repr__ + + +def parameterized_argument_value_pairs(func, p): + """Return tuples of parameterized arguments and their values. + + This is useful if you are writing your own doc_func + function and need to know the values for each parameter name:: + + >>> def func(a, foo=None, bar=42, **kwargs): pass + >>> p = param(1, foo=7, extra=99) + >>> parameterized_argument_value_pairs(func, p) + [("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})] + + If the function's first argument is named ``self`` then it will be + ignored:: + + >>> def func(self, a): pass + >>> p = param(1) + >>> parameterized_argument_value_pairs(func, p) + [("a", 1)] + + Additionally, empty ``*args`` or ``**kwargs`` will be ignored:: + + >>> def func(foo, *args): pass + >>> p = param(1) + >>> parameterized_argument_value_pairs(func, p) + [("foo", 1)] + >>> p = param(1, 16) + >>> parameterized_argument_value_pairs(func, p) + [("foo", 1), ("*args", (16, ))] + """ + argspec = inspect.getargspec(func) + arg_offset = 1 if argspec.args[:1] == ["self"] else 0 + + named_args = argspec.args[arg_offset:] + + result = lzip(named_args, p.args) + named_args = argspec.args[len(result) + arg_offset:] + varargs = p.args[len(result):] + + result.extend([ + (name, p.kwargs.get(name, default)) + for (name, default) + in zip(named_args, argspec.defaults or []) + ]) + + seen_arg_names = set([ n for (n, _) in result ]) + keywords = QuietOrderedDict(sorted([ + (name, p.kwargs[name]) + for name in p.kwargs + if name not in seen_arg_names + ])) + + if varargs: + result.append(("*%s" %(argspec.varargs, ), tuple(varargs))) + + if keywords: + result.append(("**%s" %(argspec.keywords, ), keywords)) + + return result + +def short_repr(x, n=64): + """ A shortened repr of ``x`` which is guaranteed to be ``unicode``:: + + >>> short_repr("foo") + u"foo" + >>> short_repr("123456789", n=4) + u"12...89" + """ + + x_repr = repr(x) + if isinstance(x_repr, bytes_type): + try: + x_repr = text_type(x_repr, "utf-8") + except UnicodeDecodeError: + x_repr = text_type(x_repr, "latin1") + if len(x_repr) > n: + x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:] + return x_repr + +def default_doc_func(func, num, p): + if func.__doc__ is None: + return None + + all_args_with_values = parameterized_argument_value_pairs(func, p) + + # Assumes that the function passed is a bound method. + descs = ["%s=%s" %(n, short_repr(v)) for n, v in all_args_with_values] + + # The documentation might be a multiline string, so split it + # and just work with the first string, ignoring the period + # at the end if there is one. + first, nl, rest = func.__doc__.lstrip().partition("\n") + suffix = "" + if first.endswith("."): + suffix = "." + first = first[:-1] + args = "%s[with %s]" %(len(first) and " " or "", ", ".join(descs)) + return "".join([first.rstrip(), args, suffix, nl, rest]) + +def default_name_func(func, num, p): + base_name = func.__name__ + name_suffix = "_%s" %(num, ) + if len(p.args) > 0 and isinstance(p.args[0], string_types): + name_suffix += "_" + parameterized.to_safe_name(p.args[0]) + return base_name + name_suffix + + +_test_runner_override = None +_test_runner_guess = False +_test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"]) +_test_runner_aliases = { + "_pytest": "pytest", +} + +def set_test_runner(name): + global _test_runner_override + if name not in _test_runners: + raise TypeError( + "Invalid test runner: %r (must be one of: %s)" + %(name, ", ".join(_test_runners)), + ) + _test_runner_override = name + +def detect_runner(): + """ Guess which test runner we're using by traversing the stack and looking + for the first matching module. This *should* be reasonably safe, as + it's done during test disocvery where the test runner should be the + stack frame immediately outside. """ + if _test_runner_override is not None: + return _test_runner_override + global _test_runner_guess + if _test_runner_guess is False: + stack = inspect.stack() + for record in reversed(stack): + frame = record[0] + module = frame.f_globals.get("__name__").partition(".")[0] + if module in _test_runner_aliases: + module = _test_runner_aliases[module] + if module in _test_runners: + _test_runner_guess = module + break + if record[1].endswith("python2.6/unittest.py"): + _test_runner_guess = "unittest" + break + else: + _test_runner_guess = None + return _test_runner_guess + +class parameterized(object): + """ Parameterize a test case:: + + class TestInt(object): + @parameterized([ + ("A", 10), + ("F", 15), + param("10", 42, base=42) + ]) + def test_int(self, input, expected, base=16): + actual = int(input, base=base) + assert_equal(actual, expected) + + @parameterized([ + (2, 3, 5) + (3, 5, 8), + ]) + def test_add(a, b, expected): + assert_equal(a + b, expected) + """ + + def __init__(self, input, doc_func=None): + self.get_input = self.input_as_callable(input) + self.doc_func = doc_func or default_doc_func + + def __call__(self, test_func): + self.assert_not_in_testcase_subclass() + + @wraps(test_func) + def wrapper(test_self=None): + test_cls = test_self and type(test_self) + if test_self is not None: + if issubclass(test_cls, InstanceType): + raise TypeError(( + "@parameterized can't be used with old-style classes, but " + "%r has an old-style class. Consider using a new-style " + "class, or '@parameterized.expand' " + "(see http://stackoverflow.com/q/54867/71522 for more " + "information on old-style classes)." + ) %(test_self, )) + + original_doc = wrapper.__doc__ + for num, args in enumerate(wrapper.parameterized_input): + p = param.from_decorator(args) + unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p) + try: + wrapper.__doc__ = nose_tuple[0].__doc__ + # Nose uses `getattr(instance, test_func.__name__)` to get + # a method bound to the test instance (as opposed to a + # method bound to the instance of the class created when + # tests were being enumerated). Set a value here to make + # sure nose can get the correct test method. + if test_self is not None: + setattr(test_cls, test_func.__name__, unbound_func) + yield nose_tuple + finally: + if test_self is not None: + delattr(test_cls, test_func.__name__) + wrapper.__doc__ = original_doc + wrapper.parameterized_input = self.get_input() + wrapper.parameterized_func = test_func + test_func.__name__ = "_parameterized_original_%s" %(test_func.__name__, ) + return wrapper + + def param_as_nose_tuple(self, test_self, func, num, p): + nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1])) + nose_func.__doc__ = self.doc_func(func, num, p) + # Track the unbound function because we need to setattr the unbound + # function onto the class for nose to work (see comments above), and + # Python 3 doesn't let us pull the function out of a bound method. + unbound_func = nose_func + if test_self is not None: + # Under nose on Py2 we need to return an unbound method to make + # sure that the `self` in the method is properly shared with the + # `self` used in `setUp` and `tearDown`. But only there. Everyone + # else needs a bound method. + func_self = ( + None if PY2 and detect_runner() == "nose" else + test_self + ) + nose_func = make_method(nose_func, func_self, type(test_self)) + return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, ) + + def assert_not_in_testcase_subclass(self): + parent_classes = self._terrible_magic_get_defining_classes() + if any(issubclass(cls, TestCase) for cls in parent_classes): + raise Exception("Warning: '@parameterized' tests won't work " + "inside subclasses of 'TestCase' - use " + "'@parameterized.expand' instead.") + + def _terrible_magic_get_defining_classes(self): + """ Returns the set of parent classes of the class currently being defined. + Will likely only work if called from the ``parameterized`` decorator. + This function is entirely @brandon_rhodes's fault, as he suggested + the implementation: http://stackoverflow.com/a/8793684/71522 + """ + stack = inspect.stack() + if len(stack) <= 4: + return [] + frame = stack[4] + code_context = frame[4] and frame[4][0].strip() + if not (code_context and code_context.startswith("class ")): + return [] + _, _, parents = code_context.partition("(") + parents, _, _ = parents.partition(")") + return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals) + + @classmethod + def input_as_callable(cls, input): + if callable(input): + return lambda: cls.check_input_values(input()) + input_values = cls.check_input_values(input) + return lambda: input_values + + @classmethod + def check_input_values(cls, input_values): + # Explicitly convery non-list inputs to a list so that: + # 1. A helpful exception will be raised if they aren't iterable, and + # 2. Generators are unwrapped exactly once (otherwise `nosetests + # --processes=n` has issues; see: + # https://github.com/wolever/nose-parameterized/pull/31) + if not isinstance(input_values, list): + input_values = list(input_values) + return [ param.from_decorator(p) for p in input_values ] + + @classmethod + def expand(cls, input, name_func=None, doc_func=None, **legacy): + """ A "brute force" method of parameterizing test cases. Creates new + test cases and injects them into the namespace that the wrapped + function is being defined in. Useful for parameterizing tests in + subclasses of 'UnitTest', where Nose test generators don't work. + + >>> @parameterized.expand([("foo", 1, 2)]) + ... def test_add1(name, input, expected): + ... actual = add1(input) + ... assert_equal(actual, expected) + ... + >>> locals() + ... 'test_add1_foo_0': ... + >>> + """ + + if "testcase_func_name" in legacy: + warnings.warn("testcase_func_name= is deprecated; use name_func=", + DeprecationWarning, stacklevel=2) + if not name_func: + name_func = legacy["testcase_func_name"] + + if "testcase_func_doc" in legacy: + warnings.warn("testcase_func_doc= is deprecated; use doc_func=", + DeprecationWarning, stacklevel=2) + if not doc_func: + doc_func = legacy["testcase_func_doc"] + + doc_func = doc_func or default_doc_func + name_func = name_func or default_name_func + + def parameterized_expand_wrapper(f, instance=None): + stack = inspect.stack() + frame = stack[1] + frame_locals = frame[0].f_locals + + paramters = cls.input_as_callable(input)() + for num, p in enumerate(paramters): + name = name_func(f, num, p) + frame_locals[name] = cls.param_as_standalone_func(p, f, name) + frame_locals[name].__doc__ = doc_func(f, num, p) + + f.__test__ = False + return parameterized_expand_wrapper + + @classmethod + def param_as_standalone_func(cls, p, func, name): + @wraps(func) + def standalone_func(*a): + return func(*(a + p.args), **p.kwargs) + standalone_func.__name__ = name + + # place_as is used by py.test to determine what source file should be + # used for this test. + standalone_func.place_as = func + + # Remove __wrapped__ because py.test will try to look at __wrapped__ + # to determine which parameters should be used with this test case, + # and obviously we don't need it to do any parameterization. + try: + del standalone_func.__wrapped__ + except AttributeError: + pass + return standalone_func + + @classmethod + def to_safe_name(cls, s): + return str(re.sub("[^a-zA-Z0-9_]+", "_", s)) diff -Nru python-numpy-1.13.3/numpy/testing/nose_tools/utils.py python-numpy-1.14.5/numpy/testing/nose_tools/utils.py --- python-numpy-1.13.3/numpy/testing/nose_tools/utils.py 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/numpy/testing/nose_tools/utils.py 2018-06-12 18:28:52.000000000 +0000 @@ -0,0 +1,2229 @@ +""" +Utility function to facilitate testing. + +""" +from __future__ import division, absolute_import, print_function + +import os +import sys +import re +import operator +import warnings +from functools import partial, wraps +import shutil +import contextlib +from tempfile import mkdtemp, mkstemp +from unittest.case import SkipTest + +from numpy.core import( + float32, empty, arange, array_repr, ndarray, isnat, array) +from numpy.lib.utils import deprecate + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO + +__all__ = [ + 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', + 'assert_array_equal', 'assert_array_less', 'assert_string_equal', + 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', + 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', + 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure', + 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', + 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', + 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', + 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', + 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare', + '_assert_valid_refcount', '_gen_alignment_data', + ] + + +class KnownFailureException(Exception): + '''Raise this exception to mark a test as a known failing test.''' + pass + + +KnownFailureTest = KnownFailureException # backwards compat +verbose = 0 + +IS_PYPY = '__pypy__' in sys.modules +HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None + + +def import_nose(): + """ Import nose only when needed. + """ + nose_is_good = True + minimum_nose_version = (1, 0, 0) + try: + import nose + except ImportError: + nose_is_good = False + else: + if nose.__versioninfo__ < minimum_nose_version: + nose_is_good = False + + if not nose_is_good: + msg = ('Need nose >= %d.%d.%d for tests - see ' + 'http://nose.readthedocs.io' % + minimum_nose_version) + raise ImportError(msg) + + return nose + + +def assert_(val, msg=''): + """ + Assert that works in release mode. + Accepts callable msg to allow deferring evaluation until failure. + + The Python built-in ``assert`` does not work when executing code in + optimized mode (the ``-O`` flag) - no byte-code is generated for it. + + For documentation on usage, refer to the Python documentation. + + """ + __tracebackhide__ = True # Hide traceback for py.test + if not val: + try: + smsg = msg() + except TypeError: + smsg = msg + raise AssertionError(smsg) + + +def gisnan(x): + """like isnan, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isnan and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isnan + st = isnan(x) + if isinstance(st, type(NotImplemented)): + raise TypeError("isnan not supported for this type") + return st + + +def gisfinite(x): + """like isfinite, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isfinite and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isfinite, errstate + with errstate(invalid='ignore'): + st = isfinite(x) + if isinstance(st, type(NotImplemented)): + raise TypeError("isfinite not supported for this type") + return st + + +def gisinf(x): + """like isinf, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isinf and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isinf, errstate + with errstate(invalid='ignore'): + st = isinf(x) + if isinstance(st, type(NotImplemented)): + raise TypeError("isinf not supported for this type") + return st + + +@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. " + "Use numpy.random.rand instead.") +def rand(*args): + """Returns an array of random numbers with the given shape. + + This only uses the standard library, so it is useful for testing purposes. + """ + import random + from numpy.core import zeros, float64 + results = zeros(args, float64) + f = results.flat + for i in range(len(f)): + f[i] = random.random() + return results + + +if os.name == 'nt': + # Code "stolen" from enthought/debug/memusage.py + def GetPerformanceAttributes(object, counter, instance=None, + inum=-1, format=None, machine=None): + # NOTE: Many counters require 2 samples to give accurate results, + # including "% Processor Time" (as by definition, at any instant, a + # thread's CPU usage is either 0 or 100). To read counters like this, + # you should copy this function, but keep the counter open, and call + # CollectQueryData() each time you need to know. + # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp + # My older explanation for this was that the "AddCounter" process forced + # the CPU to 100%, but the above makes more sense :) + import win32pdh + if format is None: + format = win32pdh.PDH_FMT_LONG + path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter)) + hq = win32pdh.OpenQuery() + try: + hc = win32pdh.AddCounter(hq, path) + try: + win32pdh.CollectQueryData(hq) + type, val = win32pdh.GetFormattedCounterValue(hc, format) + return val + finally: + win32pdh.RemoveCounter(hc) + finally: + win32pdh.CloseQuery(hq) + + def memusage(processName="python", instance=0): + # from win32pdhutil, part of the win32all package + import win32pdh + return GetPerformanceAttributes("Process", "Virtual Bytes", + processName, instance, + win32pdh.PDH_FMT_LONG, None) +elif sys.platform[:5] == 'linux': + + def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())): + """ + Return virtual memory size in bytes of the running python. + + """ + try: + f = open(_proc_pid_stat, 'r') + l = f.readline().split(' ') + f.close() + return int(l[22]) + except Exception: + return +else: + def memusage(): + """ + Return memory usage of running python. [Not implemented] + + """ + raise NotImplementedError + + +if sys.platform[:5] == 'linux': + def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()), + _load_time=[]): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + import time + if not _load_time: + _load_time.append(time.time()) + try: + f = open(_proc_pid_stat, 'r') + l = f.readline().split(' ') + f.close() + return int(l[13]) + except Exception: + return int(100*(time.time()-_load_time[0])) +else: + # os.getpid is not in all platforms available. + # Using time is safe but inaccurate, especially when process + # was suspended or sleeping. + def jiffies(_load_time=[]): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + import time + if not _load_time: + _load_time.append(time.time()) + return int(100*(time.time()-_load_time[0])) + + +def build_err_msg(arrays, err_msg, header='Items are not equal:', + verbose=True, names=('ACTUAL', 'DESIRED'), precision=8): + msg = ['\n' + header] + if err_msg: + if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header): + msg = [msg[0] + ' ' + err_msg] + else: + msg.append(err_msg) + if verbose: + for i, a in enumerate(arrays): + + if isinstance(a, ndarray): + # precision argument is only needed if the objects are ndarrays + r_func = partial(array_repr, precision=precision) + else: + r_func = repr + + try: + r = r_func(a) + except Exception as exc: + r = '[repr failed for <{}>: {}]'.format(type(a).__name__, exc) + if r.count('\n') > 3: + r = '\n'.join(r.splitlines()[:3]) + r += '...' + msg.append(' %s: %s' % (names[i], r)) + return '\n'.join(msg) + + +def assert_equal(actual, desired, err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal. + + Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), + check that all elements of these objects are equal. An exception is raised + at the first conflicting values. + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal. + + Examples + -------- + >>> np.testing.assert_equal([4,5], [4,6]) + ... + : + Items are not equal: + item=1 + ACTUAL: 5 + DESIRED: 6 + + """ + __tracebackhide__ = True # Hide traceback for py.test + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg, verbose) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(repr(k)) + assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose) + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + assert_equal(len(actual), len(desired), err_msg, verbose) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose) + return + from numpy.core import ndarray, isscalar, signbit + from numpy.lib import iscomplexobj, real, imag + if isinstance(actual, ndarray) or isinstance(desired, ndarray): + return assert_array_equal(actual, desired, err_msg, verbose) + msg = build_err_msg([actual, desired], err_msg, verbose=verbose) + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except ValueError: + usecomplex = False + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_equal(actualr, desiredr) + assert_equal(actuali, desiredi) + except AssertionError: + raise AssertionError(msg) + + # isscalar test to check cases such as [np.nan] != np.nan + if isscalar(desired) != isscalar(actual): + raise AssertionError(msg) + + # Inf/nan/negative zero handling + try: + isdesnan = gisnan(desired) + isactnan = gisnan(actual) + if isdesnan and isactnan: + return # both nan, so equal + + # handle signed zero specially for floats + if desired == 0 and actual == 0: + if not signbit(desired) == signbit(actual): + raise AssertionError(msg) + + except (TypeError, ValueError, NotImplementedError): + pass + + try: + isdesnat = isnat(desired) + isactnat = isnat(actual) + dtypes_match = array(desired).dtype.type == array(actual).dtype.type + if isdesnat and isactnat: + # If both are NaT (and have the same dtype -- datetime or + # timedelta) they are considered equal. + if dtypes_match: + return + else: + raise AssertionError(msg) + + except (TypeError, ValueError, NotImplementedError): + pass + + try: + # Explicitly use __eq__ for comparison, gh-2552 + if not (desired == actual): + raise AssertionError(msg) + + except (DeprecationWarning, FutureWarning) as e: + # this handles the case when the two types are not even comparable + if 'elementwise == comparison' in e.args[0]: + raise AssertionError(msg) + else: + raise + + +def print_assert_equal(test_string, actual, desired): + """ + Test if two objects are equal, and print an error message if test fails. + + The test is performed with ``actual == desired``. + + Parameters + ---------- + test_string : str + The message supplied to AssertionError. + actual : object + The object to test for equality against `desired`. + desired : object + The expected result. + + Examples + -------- + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) + Traceback (most recent call last): + ... + AssertionError: Test XYZ of func xyz failed + ACTUAL: + [0, 1] + DESIRED: + [0, 2] + + """ + __tracebackhide__ = True # Hide traceback for py.test + import pprint + + if not (actual == desired): + msg = StringIO() + msg.write(test_string) + msg.write(' failed\nACTUAL: \n') + pprint.pprint(actual, msg) + msg.write('DESIRED: \n') + pprint.pprint(desired, msg) + raise AssertionError(msg.getvalue()) + + +def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): + """ + Raises an AssertionError if two items are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies that the elements of ``actual`` and ``desired`` satisfy. + + ``abs(desired-actual) < 1.5 * 10**(-decimal)`` + + That is a looser test than originally documented, but agrees with what the + actual implementation in `assert_array_almost_equal` did up to rounding + vagaries. An exception is raised at conflicting values. For ndarrays this + delegates to assert_array_almost_equal + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + decimal : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> import numpy.testing as npt + >>> npt.assert_almost_equal(2.3333333333333, 2.33333334) + >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) + ... + : + Items are not equal: + ACTUAL: 2.3333333333333002 + DESIRED: 2.3333333399999998 + + >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]), + ... np.array([1.0,2.33333334]), decimal=9) + ... + : + Arrays are not almost equal + + (mismatch 50.0%) + x: array([ 1. , 2.33333333]) + y: array([ 1. , 2.33333334]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy.core import ndarray + from numpy.lib import iscomplexobj, real, imag + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except ValueError: + usecomplex = False + + def _build_err_msg(): + header = ('Arrays are not almost equal to %d decimals' % decimal) + return build_err_msg([actual, desired], err_msg, verbose=verbose, + header=header) + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_almost_equal(actualr, desiredr, decimal=decimal) + assert_almost_equal(actuali, desiredi, decimal=decimal) + except AssertionError: + raise AssertionError(_build_err_msg()) + + if isinstance(actual, (ndarray, tuple, list)) \ + or isinstance(desired, (ndarray, tuple, list)): + return assert_array_almost_equal(actual, desired, decimal, err_msg) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + if gisnan(desired) or gisnan(actual): + if not (gisnan(desired) and gisnan(actual)): + raise AssertionError(_build_err_msg()) + else: + if not desired == actual: + raise AssertionError(_build_err_msg()) + return + except (NotImplementedError, TypeError): + pass + if abs(desired - actual) >= 1.5 * 10.0**(-decimal): + raise AssertionError(_build_err_msg()) + + +def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): + """ + Raises an AssertionError if two items are not equal up to significant + digits. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + Given two numbers, check that they are approximately equal. + Approximately equal is defined as the number of significant digits + that agree. + + Parameters + ---------- + actual : scalar + The object to check. + desired : scalar + The expected object. + significant : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, + significant=8) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, + significant=8) + ... + : + Items are not equal to 8 significant digits: + ACTUAL: 1.234567e-021 + DESIRED: 1.2345672000000001e-021 + + the evaluated condition that raises the exception is + + >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1) + True + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + (actual, desired) = map(float, (actual, desired)) + if desired == actual: + return + # Normalized the numbers to be in range (-10.0,10.0) + # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) + with np.errstate(invalid='ignore'): + scale = 0.5*(np.abs(desired) + np.abs(actual)) + scale = np.power(10, np.floor(np.log10(scale))) + try: + sc_desired = desired/scale + except ZeroDivisionError: + sc_desired = 0.0 + try: + sc_actual = actual/scale + except ZeroDivisionError: + sc_actual = 0.0 + msg = build_err_msg([actual, desired], err_msg, + header='Items are not equal to %d significant digits:' % + significant, + verbose=verbose) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + if gisnan(desired) or gisnan(actual): + if not (gisnan(desired) and gisnan(actual)): + raise AssertionError(msg) + else: + if not desired == actual: + raise AssertionError(msg) + return + except (TypeError, NotImplementedError): + pass + if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)): + raise AssertionError(msg) + + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, + header='', precision=6, equal_nan=True, + equal_inf=True): + __tracebackhide__ = True # Hide traceback for py.test + from numpy.core import array, isnan, isinf, any, inf + x = array(x, copy=False, subok=True) + y = array(y, copy=False, subok=True) + + def isnumber(x): + return x.dtype.char in '?bhilqpBHILQPefdgFDG' + + def istime(x): + return x.dtype.char in "Mm" + + def chk_same_position(x_id, y_id, hasval='nan'): + """Handling nan/inf: check that x and y have the nan/inf at the same + locations.""" + try: + assert_array_equal(x_id, y_id) + except AssertionError: + msg = build_err_msg([x, y], + err_msg + '\nx and y %s location mismatch:' + % (hasval), verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + + try: + cond = (x.shape == () or y.shape == ()) or x.shape == y.shape + if not cond: + msg = build_err_msg([x, y], + err_msg + + '\n(shapes %s, %s mismatch)' % (x.shape, + y.shape), + verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + + if isnumber(x) and isnumber(y): + has_nan = has_inf = False + if equal_nan: + x_isnan, y_isnan = isnan(x), isnan(y) + # Validate that NaNs are in the same place + has_nan = any(x_isnan) or any(y_isnan) + if has_nan: + chk_same_position(x_isnan, y_isnan, hasval='nan') + + if equal_inf: + x_isinf, y_isinf = isinf(x), isinf(y) + # Validate that infinite values are in the same place + has_inf = any(x_isinf) or any(y_isinf) + if has_inf: + # Check +inf and -inf separately, since they are different + chk_same_position(x == +inf, y == +inf, hasval='+inf') + chk_same_position(x == -inf, y == -inf, hasval='-inf') + + if has_nan and has_inf: + x = x[~(x_isnan | x_isinf)] + y = y[~(y_isnan | y_isinf)] + elif has_nan: + x = x[~x_isnan] + y = y[~y_isnan] + elif has_inf: + x = x[~x_isinf] + y = y[~y_isinf] + + # Only do the comparison if actual values are left + if x.size == 0: + return + + elif istime(x) and istime(y): + # If one is datetime64 and the other timedelta64 there is no point + if equal_nan and x.dtype.type == y.dtype.type: + x_isnat, y_isnat = isnat(x), isnat(y) + + if any(x_isnat) or any(y_isnat): + chk_same_position(x_isnat, y_isnat, hasval="NaT") + + if any(x_isnat) or any(y_isnat): + x = x[~x_isnat] + y = y[~y_isnat] + + val = comparison(x, y) + + if isinstance(val, bool): + cond = val + reduced = [0] + else: + reduced = val.ravel() + cond = reduced.all() + reduced = reduced.tolist() + if not cond: + match = 100-100.0*reduced.count(1)/len(reduced) + msg = build_err_msg([x, y], + err_msg + + '\n(mismatch %s%%)' % (match,), + verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + except ValueError: + import traceback + efmt = traceback.format_exc() + header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header) + + msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise ValueError(msg) + + +def assert_array_equal(x, y, err_msg='', verbose=True): + """ + Raises an AssertionError if two array_like objects are not equal. + + Given two array_like objects, check that the shape is equal and all + elements of these objects are equal. An exception is raised at + shape mismatch or conflicting values. In contrast to the standard usage + in numpy, NaNs are compared like numbers, no assertion is raised if + both objects have NaNs in the same positions. + + The usual caution for verifying equality with floating point numbers is + advised. + + Parameters + ---------- + x : array_like + The actual object to check. + y : array_like + The desired, expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + The first assert does not raise an exception: + + >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], + ... [np.exp(0),2.33333, np.nan]) + + Assert fails with numerical inprecision with floats: + + >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan]) + ... + : + AssertionError: + Arrays are not equal + + (mismatch 50.0%) + x: array([ 1. , 3.14159265, NaN]) + y: array([ 1. , 3.14159265, NaN]) + + Use `assert_allclose` or one of the nulp (number of floating point values) + functions for these cases instead: + + >>> np.testing.assert_allclose([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan], + ... rtol=1e-10, atol=0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__eq__, x, y, err_msg=err_msg, + verbose=verbose, header='Arrays are not equal') + + +def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies identical shapes and that the elements of ``actual`` and + ``desired`` satisfy. + + ``abs(desired-actual) < 1.5 * 10**(-decimal)`` + + That is a looser test than originally documented, but agrees with what the + actual implementation did up to rounding vagaries. An exception is raised + at shape mismatch or conflicting values. In contrast to the standard usage + in numpy, NaNs are compared like numbers, no assertion is raised if both + objects have NaNs in the same positions. + + Parameters + ---------- + x : array_like + The actual object to check. + y : array_like + The desired, expected object. + decimal : int, optional + Desired precision, default is 6. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + the first assert does not raise an exception + + >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], + [1.0,2.333,np.nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33339,np.nan], decimal=5) + ... + : + AssertionError: + Arrays are not almost equal + + (mismatch 50.0%) + x: array([ 1. , 2.33333, NaN]) + y: array([ 1. , 2.33339, NaN]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33333, 5], decimal=5) + : + ValueError: + Arrays are not almost equal + x: array([ 1. , 2.33333, NaN]) + y: array([ 1. , 2.33333, 5. ]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy.core import around, number, float_, result_type, array + from numpy.core.numerictypes import issubdtype + from numpy.core.fromnumeric import any as npany + + def compare(x, y): + try: + if npany(gisinf(x)) or npany( gisinf(y)): + xinfid = gisinf(x) + yinfid = gisinf(y) + if not (xinfid == yinfid).all(): + return False + # if one item, x and y is +- inf + if x.size == y.size == 1: + return x == y + x = x[~xinfid] + y = y[~yinfid] + except (TypeError, NotImplementedError): + pass + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + dtype = result_type(y, 1.) + y = array(y, dtype=dtype, copy=False, subok=True) + z = abs(x - y) + + if not issubdtype(z.dtype, number): + z = z.astype(float_) # handle object arrays + + return z < 1.5 * 10.0**(-decimal) + + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header=('Arrays are not almost equal to %d decimals' % decimal), + precision=decimal) + + +def assert_array_less(x, y, err_msg='', verbose=True): + """ + Raises an AssertionError if two array_like objects are not ordered by less + than. + + Given two array_like objects, check that the shape is equal and all + elements of the first object are strictly smaller than those of the + second object. An exception is raised at shape mismatch or incorrectly + ordered values. Shape mismatch does not raise if an object has zero + dimension. In contrast to the standard usage in numpy, NaNs are + compared, no assertion is raised if both objects have NaNs in the same + positions. + + + + Parameters + ---------- + x : array_like + The smaller object to check. + y : array_like + The larger object to compare. + err_msg : string + The error message to be printed in case of failure. + verbose : bool + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_array_equal: tests objects for equality + assert_array_almost_equal: test objects for equality up to precision + + + + Examples + -------- + >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan]) + >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan]) + ... + : + Arrays are not less-ordered + (mismatch 50.0%) + x: array([ 1., 1., NaN]) + y: array([ 1., 2., NaN]) + + >>> np.testing.assert_array_less([1.0, 4.0], 3) + ... + : + Arrays are not less-ordered + (mismatch 50.0%) + x: array([ 1., 4.]) + y: array(3) + + >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4]) + ... + : + Arrays are not less-ordered + (shapes (3,), (1,) mismatch) + x: array([ 1., 2., 3.]) + y: array([4]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, + verbose=verbose, + header='Arrays are not less-ordered', + equal_inf=False) + + +def runstring(astr, dict): + exec(astr, dict) + + +def assert_string_equal(actual, desired): + """ + Test if two strings are equal. + + If the given strings are equal, `assert_string_equal` does nothing. + If they are not equal, an AssertionError is raised, and the diff + between the strings is shown. + + Parameters + ---------- + actual : str + The string to test for equality against the expected string. + desired : str + The expected string. + + Examples + -------- + >>> np.testing.assert_string_equal('abc', 'abc') + >>> np.testing.assert_string_equal('abc', 'abcd') + Traceback (most recent call last): + File "", line 1, in + ... + AssertionError: Differences in strings: + - abc+ abcd? + + + """ + # delay import of difflib to reduce startup time + __tracebackhide__ = True # Hide traceback for py.test + import difflib + + if not isinstance(actual, str): + raise AssertionError(repr(type(actual))) + if not isinstance(desired, str): + raise AssertionError(repr(type(desired))) + if re.match(r'\A'+desired+r'\Z', actual, re.M): + return + + diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1))) + diff_list = [] + while diff: + d1 = diff.pop(0) + if d1.startswith(' '): + continue + if d1.startswith('- '): + l = [d1] + d2 = diff.pop(0) + if d2.startswith('? '): + l.append(d2) + d2 = diff.pop(0) + if not d2.startswith('+ '): + raise AssertionError(repr(d2)) + l.append(d2) + if diff: + d3 = diff.pop(0) + if d3.startswith('? '): + l.append(d3) + else: + diff.insert(0, d3) + if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]): + continue + diff_list.extend(l) + continue + raise AssertionError(repr(d1)) + if not diff_list: + return + msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip() + if actual != desired: + raise AssertionError(msg) + + +def rundocs(filename=None, raise_on_error=True): + """ + Run doctests found in the given file. + + By default `rundocs` raises an AssertionError on failure. + + Parameters + ---------- + filename : str + The path to the file for which the doctests are run. + raise_on_error : bool + Whether to raise an AssertionError when a doctest fails. Default is + True. + + Notes + ----- + The doctests can be run by the user/developer by adding the ``doctests`` + argument to the ``test()`` call. For example, to run all tests (including + doctests) for `numpy.lib`: + + >>> np.lib.test(doctests=True) #doctest: +SKIP + """ + from numpy.compat import npy_load_module + import doctest + if filename is None: + f = sys._getframe(1) + filename = f.f_globals['__file__'] + name = os.path.splitext(os.path.basename(filename))[0] + m = npy_load_module(name, filename) + + tests = doctest.DocTestFinder().find(m) + runner = doctest.DocTestRunner(verbose=False) + + msg = [] + if raise_on_error: + out = lambda s: msg.append(s) + else: + out = None + + for test in tests: + runner.run(test, out=out) + + if runner.failures > 0 and raise_on_error: + raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) + + +def raises(*args,**kwargs): + nose = import_nose() + return nose.tools.raises(*args,**kwargs) + + +def assert_raises(*args, **kwargs): + """ + assert_raises(exception_class, callable, *args, **kwargs) + assert_raises(exception_class) + + Fail unless an exception of class exception_class is thrown + by callable when invoked with arguments args and keyword + arguments kwargs. If a different type of exception is + thrown, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + + Alternatively, `assert_raises` can be used as a context manager: + + >>> from numpy.testing import assert_raises + >>> with assert_raises(ZeroDivisionError): + ... 1 / 0 + + is equivalent to + + >>> def div(x, y): + ... return x / y + >>> assert_raises(ZeroDivisionError, div, 1, 0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + nose = import_nose() + return nose.tools.assert_raises(*args,**kwargs) + + +def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): + """ + assert_raises_regex(exception_class, expected_regexp, callable, *args, + **kwargs) + assert_raises_regex(exception_class, expected_regexp) + + Fail unless an exception of class exception_class and with message that + matches expected_regexp is thrown by callable when invoked with arguments + args and keyword arguments kwargs. + + Alternatively, can be used as a context manager like `assert_raises`. + + Name of this function adheres to Python 3.2+ reference, but should work in + all versions down to 2.6. + + Notes + ----- + .. versionadded:: 1.9.0 + + """ + __tracebackhide__ = True # Hide traceback for py.test + nose = import_nose() + + if sys.version_info.major >= 3: + funcname = nose.tools.assert_raises_regex + else: + # Only present in Python 2.7, missing from unittest in 2.6 + funcname = nose.tools.assert_raises_regexp + + return funcname(exception_class, expected_regexp, *args, **kwargs) + + +def decorate_methods(cls, decorator, testmatch=None): + """ + Apply a decorator to all methods in a class matching a regular expression. + + The given decorator is applied to all public methods of `cls` that are + matched by the regular expression `testmatch` + (``testmatch.search(methodname)``). Methods that are private, i.e. start + with an underscore, are ignored. + + Parameters + ---------- + cls : class + Class whose methods to decorate. + decorator : function + Decorator to apply to methods + testmatch : compiled regexp or str, optional + The regular expression. Default value is None, in which case the + nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) + is used. + If `testmatch` is a string, it is compiled to a regular expression + first. + + """ + if testmatch is None: + testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) + else: + testmatch = re.compile(testmatch) + cls_attr = cls.__dict__ + + # delayed import to reduce startup time + from inspect import isfunction + + methods = [_m for _m in cls_attr.values() if isfunction(_m)] + for function in methods: + try: + if hasattr(function, 'compat_func_name'): + funcname = function.compat_func_name + else: + funcname = function.__name__ + except AttributeError: + # not a function + continue + if testmatch.search(funcname) and not funcname.startswith('_'): + setattr(cls, funcname, decorator(function)) + return + + +def measure(code_str,times=1,label=None): + """ + Return elapsed time for executing code in the namespace of the caller. + + The supplied code string is compiled with the Python builtin ``compile``. + The precision of the timing is 10 milli-seconds. If the code will execute + fast on this timescale, it can be executed many times to get reasonable + timing accuracy. + + Parameters + ---------- + code_str : str + The code to be timed. + times : int, optional + The number of times the code is executed. Default is 1. The code is + only compiled once. + label : str, optional + A label to identify `code_str` with. This is passed into ``compile`` + as the second argument (for run-time error messages). + + Returns + ------- + elapsed : float + Total elapsed time in seconds for executing `code_str` `times` times. + + Examples + -------- + >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', + ... times=times) + >>> print("Time for a single execution : ", etime / times, "s") + Time for a single execution : 0.005 s + + """ + frame = sys._getframe(1) + locs, globs = frame.f_locals, frame.f_globals + + code = compile(code_str, + 'Test name: %s ' % label, + 'exec') + i = 0 + elapsed = jiffies() + while i < times: + i += 1 + exec(code, globs, locs) + elapsed = jiffies() - elapsed + return 0.01*elapsed + + +def _assert_valid_refcount(op): + """ + Check that ufuncs don't mishandle refcount of object `1`. + Used in a few regression tests. + """ + if not HAS_REFCOUNT: + return True + import numpy as np + + b = np.arange(100*100).reshape(100, 100) + c = b + i = 1 + + rc = sys.getrefcount(i) + for j in range(15): + d = op(b, c) + assert_(sys.getrefcount(i) >= rc) + del d # for pyflakes + + +def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, + err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + tolerance. + + The test is equivalent to ``allclose(actual, desired, rtol, atol)``. + It compares the difference between `actual` and `desired` to + ``atol + rtol * abs(desired)``. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + actual : array_like + Array obtained. + desired : array_like + Array desired. + rtol : float, optional + Relative tolerance. + atol : float, optional + Absolute tolerance. + equal_nan : bool, optional. + If True, NaNs will compare equal. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_array_almost_equal_nulp, assert_array_max_ulp + + Examples + -------- + >>> x = [1e-5, 1e-3, 1e-1] + >>> y = np.arccos(np.cos(x)) + >>> assert_allclose(x, y, rtol=1e-5, atol=0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + def compare(x, y): + return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol, + equal_nan=equal_nan) + + actual, desired = np.asanyarray(actual), np.asanyarray(desired) + header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol) + assert_array_compare(compare, actual, desired, err_msg=str(err_msg), + verbose=verbose, header=header, equal_nan=equal_nan) + + +def assert_array_almost_equal_nulp(x, y, nulp=1): + """ + Compare two arrays relatively to their spacing. + + This is a relatively robust method to compare two arrays whose amplitude + is variable. + + Parameters + ---------- + x, y : array_like + Input arrays. + nulp : int, optional + The maximum number of unit in the last place for tolerance (see Notes). + Default is 1. + + Returns + ------- + None + + Raises + ------ + AssertionError + If the spacing between `x` and `y` for one or more elements is larger + than `nulp`. + + See Also + -------- + assert_array_max_ulp : Check that all items of arrays differ in at most + N Units in the Last Place. + spacing : Return the distance between x and the nearest adjacent number. + + Notes + ----- + An assertion is raised if the following condition is not met:: + + abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y))) + + Examples + -------- + >>> x = np.array([1., 1e-10, 1e-20]) + >>> eps = np.finfo(x.dtype).eps + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) + + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) + Traceback (most recent call last): + ... + AssertionError: X and Y are not equal to 1 ULP (max is 2) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ax = np.abs(x) + ay = np.abs(y) + ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) + if not np.all(np.abs(x-y) <= ref): + if np.iscomplexobj(x) or np.iscomplexobj(y): + msg = "X and Y are not equal to %d ULP" % nulp + else: + max_nulp = np.max(nulp_diff(x, y)) + msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp) + raise AssertionError(msg) + + +def assert_array_max_ulp(a, b, maxulp=1, dtype=None): + """ + Check that all items of arrays differ in at most N Units in the Last Place. + + Parameters + ---------- + a, b : array_like + Input arrays to be compared. + maxulp : int, optional + The maximum number of units in the last place that elements of `a` and + `b` can differ. Default is 1. + dtype : dtype, optional + Data-type to convert `a` and `b` to if given. Default is None. + + Returns + ------- + ret : ndarray + Array containing number of representable floating point numbers between + items in `a` and `b`. + + Raises + ------ + AssertionError + If one or more elements differ by more than `maxulp`. + + See Also + -------- + assert_array_almost_equal_nulp : Compare two arrays relatively to their + spacing. + + Examples + -------- + >>> a = np.linspace(0., 1., 100) + >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ret = nulp_diff(a, b, dtype) + if not np.all(ret <= maxulp): + raise AssertionError("Arrays are not almost equal up to %g ULP" % + maxulp) + return ret + + +def nulp_diff(x, y, dtype=None): + """For each item in x and y, return the number of representable floating + points between them. + + Parameters + ---------- + x : array_like + first input array + y : array_like + second input array + dtype : dtype, optional + Data-type to convert `x` and `y` to if given. Default is None. + + Returns + ------- + nulp : array_like + number of representable floating point numbers between each item in x + and y. + + Examples + -------- + # By definition, epsilon is the smallest number such as 1 + eps != 1, so + # there should be exactly one ULP between 1 and 1 + eps + >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) + 1.0 + """ + import numpy as np + if dtype: + x = np.array(x, dtype=dtype) + y = np.array(y, dtype=dtype) + else: + x = np.array(x) + y = np.array(y) + + t = np.common_type(x, y) + if np.iscomplexobj(x) or np.iscomplexobj(y): + raise NotImplementedError("_nulp not implemented for complex array") + + x = np.array(x, dtype=t) + y = np.array(y, dtype=t) + + if not x.shape == y.shape: + raise ValueError("x and y do not have the same shape: %s - %s" % + (x.shape, y.shape)) + + def _diff(rx, ry, vdt): + diff = np.array(rx-ry, dtype=vdt) + return np.abs(diff) + + rx = integer_repr(x) + ry = integer_repr(y) + return _diff(rx, ry, t) + + +def _integer_repr(x, vdt, comp): + # Reinterpret binary representation of the float as sign-magnitude: + # take into account two-complement representation + # See also + # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm + rx = x.view(vdt) + if not (rx.size == 1): + rx[rx < 0] = comp - rx[rx < 0] + else: + if rx < 0: + rx = comp - rx + + return rx + + +def integer_repr(x): + """Return the signed-magnitude interpretation of the binary representation of + x.""" + import numpy as np + if x.dtype == np.float32: + return _integer_repr(x, np.int32, np.int32(-2**31)) + elif x.dtype == np.float64: + return _integer_repr(x, np.int64, np.int64(-2**63)) + else: + raise ValueError("Unsupported dtype %s" % x.dtype) + + +# The following two classes are copied from python 2.6 warnings module (context +# manager) +class WarningMessage(object): + + """ + Holds the result of a single showwarning() call. + + Deprecated in 1.8.0 + + Notes + ----- + `WarningMessage` is copied from the Python 2.6 warnings module, + so it can be used in NumPy with older Python versions. + + """ + + _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file", + "line") + + def __init__(self, message, category, filename, lineno, file=None, + line=None): + local_values = locals() + for attr in self._WARNING_DETAILS: + setattr(self, attr, local_values[attr]) + if category: + self._category_name = category.__name__ + else: + self._category_name = None + + def __str__(self): + return ("{message : %r, category : %r, filename : %r, lineno : %s, " + "line : %r}" % (self.message, self._category_name, + self.filename, self.lineno, self.line)) + + +class WarningManager(object): + """ + A context manager that copies and restores the warnings filter upon + exiting the context. + + The 'record' argument specifies whether warnings should be captured by a + custom implementation of ``warnings.showwarning()`` and be appended to a + list returned by the context manager. Otherwise None is returned by the + context manager. The objects appended to the list are arguments whose + attributes mirror the arguments to ``showwarning()``. + + The 'module' argument is to specify an alternative module to the module + named 'warnings' and imported under that name. This argument is only useful + when testing the warnings module itself. + + Deprecated in 1.8.0 + + Notes + ----- + `WarningManager` is a copy of the ``catch_warnings`` context manager + from the Python 2.6 warnings module, with slight modifications. + It is copied so it can be used in NumPy with older Python versions. + + """ + + def __init__(self, record=False, module=None): + self._record = record + if module is None: + self._module = sys.modules['warnings'] + else: + self._module = module + self._entered = False + + def __enter__(self): + if self._entered: + raise RuntimeError("Cannot enter %r twice" % self) + self._entered = True + self._filters = self._module.filters + self._module.filters = self._filters[:] + self._showwarning = self._module.showwarning + if self._record: + log = [] + + def showwarning(*args, **kwargs): + log.append(WarningMessage(*args, **kwargs)) + self._module.showwarning = showwarning + return log + else: + return None + + def __exit__(self): + if not self._entered: + raise RuntimeError("Cannot exit %r without entering first" % self) + self._module.filters = self._filters + self._module.showwarning = self._showwarning + + +@contextlib.contextmanager +def _assert_warns_context(warning_class, name=None): + __tracebackhide__ = True # Hide traceback for py.test + with suppress_warnings() as sup: + l = sup.record(warning_class) + yield + if not len(l) > 0: + name_str = " when calling %s" % name if name is not None else "" + raise AssertionError("No warning raised" + name_str) + + +def assert_warns(warning_class, *args, **kwargs): + """ + Fail unless the given callable throws the specified warning. + + A warning of class warning_class should be thrown by the callable when + invoked with arguments args and keyword arguments kwargs. + If a different type of warning is thrown, it will not be caught. + + If called with all arguments other than the warning class omitted, may be + used as a context manager: + + with assert_warns(SomeWarning): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + .. versionadded:: 1.4.0 + + Parameters + ---------- + warning_class : class + The class defining the warning that `func` is expected to throw. + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + The value returned by `func`. + + """ + if not args: + return _assert_warns_context(warning_class) + + func = args[0] + args = args[1:] + with _assert_warns_context(warning_class, name=func.__name__): + return func(*args, **kwargs) + + +@contextlib.contextmanager +def _assert_no_warnings_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + with warnings.catch_warnings(record=True) as l: + warnings.simplefilter('always') + yield + if len(l) > 0: + name_str = " when calling %s" % name if name is not None else "" + raise AssertionError("Got warnings%s: %s" % (name_str, l)) + + +def assert_no_warnings(*args, **kwargs): + """ + Fail if the given callable produces any warnings. + + If called with all arguments omitted, may be used as a context manager: + + with assert_no_warnings(): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + The value returned by `func`. + + """ + if not args: + return _assert_no_warnings_context() + + func = args[0] + args = args[1:] + with _assert_no_warnings_context(name=func.__name__): + return func(*args, **kwargs) + + +def _gen_alignment_data(dtype=float32, type='binary', max_size=24): + """ + generator producing data with different alignment and offsets + to test simd vectorization + + Parameters + ---------- + dtype : dtype + data type to produce + type : string + 'unary': create data for unary operations, creates one input + and output array + 'binary': create data for unary operations, creates two input + and output array + max_size : integer + maximum size of data to produce + + Returns + ------- + if type is 'unary' yields one output, one input array and a message + containing information on the data + if type is 'binary' yields one output array, two input array and a message + containing information on the data + + """ + ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s' + bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s' + for o in range(3): + for s in range(o + 2, max(o + 3, max_size)): + if type == 'unary': + inp = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp(), ufmt % (o, o, s, dtype, 'out of place') + d = inp() + yield d, d, ufmt % (o, o, s, dtype, 'in place') + yield out[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'out of place') + yield inp()[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'aliased') + yield inp()[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'aliased') + if type == 'binary': + inp1 = lambda: arange(s, dtype=dtype)[o:] + inp2 = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp1(), inp2(), bfmt % \ + (o, o, o, s, dtype, 'out of place') + d = inp1() + yield d, d, inp2(), bfmt % \ + (o, o, o, s, dtype, 'in place1') + d = inp2() + yield d, inp1(), d, bfmt % \ + (o, o, o, s, dtype, 'in place2') + yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'out of place') + yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'aliased') + + +class IgnoreException(Exception): + "Ignoring this exception due to disabled feature" + + +@contextlib.contextmanager +def tempdir(*args, **kwargs): + """Context manager to provide a temporary test folder. + + All arguments are passed as this to the underlying tempfile.mkdtemp + function. + + """ + tmpdir = mkdtemp(*args, **kwargs) + try: + yield tmpdir + finally: + shutil.rmtree(tmpdir) + + +@contextlib.contextmanager +def temppath(*args, **kwargs): + """Context manager for temporary files. + + Context manager that returns the path to a closed temporary file. Its + parameters are the same as for tempfile.mkstemp and are passed directly + to that function. The underlying file is removed when the context is + exited, so it should be closed at that time. + + Windows does not allow a temporary file to be opened if it is already + open, so the underlying file must be closed after opening before it + can be opened again. + + """ + fd, path = mkstemp(*args, **kwargs) + os.close(fd) + try: + yield path + finally: + os.remove(path) + + +class clear_and_catch_warnings(warnings.catch_warnings): + """ Context manager that resets warning registry for catching warnings + + Warnings can be slippery, because, whenever a warning is triggered, Python + adds a ``__warningregistry__`` member to the *calling* module. This makes + it impossible to retrigger the warning in this module, whatever you put in + the warnings filters. This context manager accepts a sequence of `modules` + as a keyword argument to its constructor and: + + * stores and removes any ``__warningregistry__`` entries in given `modules` + on entry; + * resets ``__warningregistry__`` to its previous state on exit. + + This makes it possible to trigger any warning afresh inside the context + manager without disturbing the state of warnings outside. + + For compatibility with Python 3.0, please consider all arguments to be + keyword-only. + + Parameters + ---------- + record : bool, optional + Specifies whether warnings should be captured by a custom + implementation of ``warnings.showwarning()`` and be appended to a list + returned by the context manager. Otherwise None is returned by the + context manager. The objects appended to the list are arguments whose + attributes mirror the arguments to ``showwarning()``. + modules : sequence, optional + Sequence of modules for which to reset warnings registry on entry and + restore on exit. To work correctly, all 'ignore' filters should + filter by one of these modules. + + Examples + -------- + >>> import warnings + >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]): + ... warnings.simplefilter('always') + ... warnings.filterwarnings('ignore', module='np.core.fromnumeric') + ... # do something that raises a warning but ignore those in + ... # np.core.fromnumeric + """ + class_modules = () + + def __init__(self, record=False, modules=()): + self.modules = set(modules).union(self.class_modules) + self._warnreg_copies = {} + super(clear_and_catch_warnings, self).__init__(record=record) + + def __enter__(self): + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod_reg = mod.__warningregistry__ + self._warnreg_copies[mod] = mod_reg.copy() + mod_reg.clear() + return super(clear_and_catch_warnings, self).__enter__() + + def __exit__(self, *exc_info): + super(clear_and_catch_warnings, self).__exit__(*exc_info) + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod.__warningregistry__.clear() + if mod in self._warnreg_copies: + mod.__warningregistry__.update(self._warnreg_copies[mod]) + + +class suppress_warnings(object): + """ + Context manager and decorator doing much the same as + ``warnings.catch_warnings``. + + However, it also provides a filter mechanism to work around + http://bugs.python.org/issue4180. + + This bug causes Python before 3.4 to not reliably show warnings again + after they have been ignored once (even within catch_warnings). It + means that no "ignore" filter can be used easily, since following + tests might need to see the warning. Additionally it allows easier + specificity for testing warnings and can be nested. + + Parameters + ---------- + forwarding_rule : str, optional + One of "always", "once", "module", or "location". Analogous to + the usual warnings module filter mode, it is useful to reduce + noise mostly on the outmost level. Unsuppressed and unrecorded + warnings will be forwarded based on this rule. Defaults to "always". + "location" is equivalent to the warnings "default", match by exact + location the warning warning originated from. + + Notes + ----- + Filters added inside the context manager will be discarded again + when leaving it. Upon entering all filters defined outside a + context will be applied automatically. + + When a recording filter is added, matching warnings are stored in the + ``log`` attribute as well as in the list returned by ``record``. + + If filters are added and the ``module`` keyword is given, the + warning registry of this module will additionally be cleared when + applying it, entering the context, or exiting it. This could cause + warnings to appear a second time after leaving the context if they + were configured to be printed once (default) and were already + printed before the context was entered. + + Nesting this context manager will work as expected when the + forwarding rule is "always" (default). Unfiltered and unrecorded + warnings will be passed out and be matched by the outer level. + On the outmost level they will be printed (or caught by another + warnings context). The forwarding rule argument can modify this + behaviour. + + Like ``catch_warnings`` this context manager is not threadsafe. + + Examples + -------- + >>> with suppress_warnings() as sup: + ... sup.filter(DeprecationWarning, "Some text") + ... sup.filter(module=np.ma.core) + ... log = sup.record(FutureWarning, "Does this occur?") + ... command_giving_warnings() + ... # The FutureWarning was given once, the filtered warnings were + ... # ignored. All other warnings abide outside settings (may be + ... # printed/error) + ... assert_(len(log) == 1) + ... assert_(len(sup.log) == 1) # also stored in log attribute + + Or as a decorator: + + >>> sup = suppress_warnings() + >>> sup.filter(module=np.ma.core) # module must match exact + >>> @sup + >>> def some_function(): + ... # do something which causes a warning in np.ma.core + ... pass + """ + def __init__(self, forwarding_rule="always"): + self._entered = False + + # Suppressions are either instance or defined inside one with block: + self._suppressions = [] + + if forwarding_rule not in {"always", "module", "once", "location"}: + raise ValueError("unsupported forwarding rule.") + self._forwarding_rule = forwarding_rule + + def _clear_registries(self): + if hasattr(warnings, "_filters_mutated"): + # clearing the registry should not be necessary on new pythons, + # instead the filters should be mutated. + warnings._filters_mutated() + return + # Simply clear the registry, this should normally be harmless, + # note that on new pythons it would be invalidated anyway. + for module in self._tmp_modules: + if hasattr(module, "__warningregistry__"): + module.__warningregistry__.clear() + + def _filter(self, category=Warning, message="", module=None, record=False): + if record: + record = [] # The log where to store warnings + else: + record = None + if self._entered: + if module is None: + warnings.filterwarnings( + "always", category=category, message=message) + else: + module_regex = module.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=category, message=message, + module=module_regex) + self._tmp_modules.add(module) + self._clear_registries() + + self._tmp_suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + else: + self._suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + + return record + + def filter(self, category=Warning, message="", module=None): + """ + Add a new suppressing filter or apply it if the state is entered. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + self._filter(category=category, message=message, module=module, + record=False) + + def record(self, category=Warning, message="", module=None): + """ + Append a new recording filter or apply it if the state is entered. + + All warnings matching will be appended to the ``log`` attribute. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Returns + ------- + log : list + A list which will be filled with all matched warnings. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + return self._filter(category=category, message=message, module=module, + record=True) + + def __enter__(self): + if self._entered: + raise RuntimeError("cannot enter suppress_warnings twice.") + + self._orig_show = warnings.showwarning + self._filters = warnings.filters + warnings.filters = self._filters[:] + + self._entered = True + self._tmp_suppressions = [] + self._tmp_modules = set() + self._forwarded = set() + + self.log = [] # reset global log (no need to keep same list) + + for cat, mess, _, mod, log in self._suppressions: + if log is not None: + del log[:] # clear the log + if mod is None: + warnings.filterwarnings( + "always", category=cat, message=mess) + else: + module_regex = mod.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=cat, message=mess, + module=module_regex) + self._tmp_modules.add(mod) + warnings.showwarning = self._showwarning + self._clear_registries() + + return self + + def __exit__(self, *exc_info): + warnings.showwarning = self._orig_show + warnings.filters = self._filters + self._clear_registries() + self._entered = False + del self._orig_show + del self._filters + + def _showwarning(self, message, category, filename, lineno, + *args, **kwargs): + use_warnmsg = kwargs.pop("use_warnmsg", None) + for cat, _, pattern, mod, rec in ( + self._suppressions + self._tmp_suppressions)[::-1]: + if (issubclass(category, cat) and + pattern.match(message.args[0]) is not None): + if mod is None: + # Message and category match, either recorded or ignored + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + # Use startswith, because warnings strips the c or o from + # .pyc/.pyo files. + elif mod.__file__.startswith(filename): + # The message and module (filename) match + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + + # There is no filter in place, so pass to the outside handler + # unless we should only pass it once + if self._forwarding_rule == "always": + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, + *args, **kwargs) + else: + self._orig_showmsg(use_warnmsg) + return + + if self._forwarding_rule == "once": + signature = (message.args, category) + elif self._forwarding_rule == "module": + signature = (message.args, category, filename) + elif self._forwarding_rule == "location": + signature = (message.args, category, filename, lineno) + + if signature in self._forwarded: + return + self._forwarded.add(signature) + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, *args, + **kwargs) + else: + self._orig_showmsg(use_warnmsg) + + def __call__(self, func): + """ + Function decorator to apply certain suppressions to a whole + function. + """ + @wraps(func) + def new_func(*args, **kwargs): + with self: + return func(*args, **kwargs) + + return new_func diff -Nru python-numpy-1.13.3/numpy/testing/setup.py python-numpy-1.14.5/numpy/testing/setup.py --- python-numpy-1.13.3/numpy/testing/setup.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/testing/setup.py 2018-06-12 18:28:52.000000000 +0000 @@ -6,6 +6,7 @@ from numpy.distutils.misc_util import Configuration config = Configuration('testing', parent_package, top_path) + config.add_subpackage('nose_tools') config.add_data_dir('tests') return config diff -Nru python-numpy-1.13.3/numpy/testing/tests/test_decorators.py python-numpy-1.14.5/numpy/testing/tests/test_decorators.py --- python-numpy-1.13.3/numpy/testing/tests/test_decorators.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/testing/tests/test_decorators.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,3 +1,7 @@ +""" +Test the decorators from ``testing.decorators``. + +""" from __future__ import division, absolute_import, print_function import warnings @@ -13,6 +17,7 @@ assert_(slow_func.slow) + def test_setastest(): @dec.setastest() def f_default(a): @@ -30,6 +35,7 @@ assert_(f_istest.__test__) assert_(not f_isnottest.__test__) + class DidntSkipException(Exception): pass @@ -182,5 +188,13 @@ assert_raises(AssertionError, deprecated_func3) +@dec.parametrize('base, power, expected', + [(1, 1, 1), + (2, 1, 2), + (2, 2, 4)]) +def test_parametrize(base, power, expected): + assert_(base**power == expected) + + if __name__ == '__main__': run_module_suite() diff -Nru python-numpy-1.13.3/numpy/testing/tests/test_utils.py python-numpy-1.14.5/numpy/testing/tests/test_utils.py --- python-numpy-1.13.3/numpy/testing/tests/test_utils.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/testing/tests/test_utils.py 2018-06-12 18:28:52.000000000 +0000 @@ -4,6 +4,7 @@ import sys import os import itertools +import textwrap import numpy as np from numpy.testing import ( @@ -61,7 +62,7 @@ def test_objarray(self): """Test object arrays.""" - a = np.array([1, 1], dtype=np.object) + a = np.array([1, 1], dtype=object) self._test_equal(a, 1) def test_array_likes(self): @@ -134,14 +135,14 @@ def test_recarrays(self): """Test record arrays.""" - a = np.empty(2, [('floupi', np.float), ('floupa', np.float)]) + a = np.empty(2, [('floupi', float), ('floupa', float)]) a['floupi'] = [1, 2] a['floupa'] = [1, 2] b = a.copy() self._test_equal(a, b) - c = np.empty(2, [('floupipi', np.float), ('floupa', np.float)]) + c = np.empty(2, [('floupipi', float), ('floupa', float)]) c['floupipi'] = a['floupi'].copy() c['floupa'] = a['floupa'].copy() @@ -159,9 +160,9 @@ err_msg = 'There is a mismatch' a = build_err_msg([x, y], err_msg) - b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([ ' - '1.00001, 2.00002, 3.00003])\n DESIRED: array([ 1.00002, ' - '2.00003, 3.00004])') + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, ' + '2.00003, 3.00004])') self.assertEqual(a, b) def test_build_err_msg_no_verbose(self): @@ -179,8 +180,8 @@ err_msg = 'There is a mismatch' a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR')) - b = ('\nItems are not equal: There is a mismatch\n FOO: array([ ' - '1.00001, 2.00002, 3.00003])\n BAR: array([ 1.00002, 2.00003, ' + b = ('\nItems are not equal: There is a mismatch\n FOO: array([' + '1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, ' '3.00004])') self.assertEqual(a, b) @@ -190,9 +191,9 @@ err_msg = 'There is a mismatch' a = build_err_msg([x, y], err_msg, precision=10) - b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([ ' - '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array([ ' - '1.000000002, 2.00003 , 3.00004 ])') + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array([' + '1.000000002, 2.00003 , 3.00004 ])') self.assertEqual(a, b) @@ -212,6 +213,26 @@ self._assert_func([np.inf], [np.inf]) self._test_not_equal(np.inf, [np.inf]) + def test_datetime(self): + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "s") + ) + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "m") + ) + + # gh-10081 + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "s") + ) + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "m") + ) + def test_nat_items(self): # not a datetime nadt_no_unit = np.datetime64("NaT") @@ -267,14 +288,21 @@ try: self._assert_func(np.array([1, 2]), np.matrix([1, 2])) except AssertionError as e: - self.assertEqual( - str(e), - "\nArrays are not equal\n\n" - "(shapes (2,), (1, 2) mismatch)\n" - " x: array([1, 2])\n" - " y: [repr failed for : The truth value of an array " - "with more than one element is ambiguous. Use a.any() or " - "a.all()]") + msg = str(e) + msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)") + msg_reference = textwrap.dedent("""\ + + Arrays are not equal + + (shapes (2,), (1, 2) mismatch) + x: array([1, 2]) + y: matrix([[1, 2]])""") + try: + self.assertEqual(msg, msg_reference) + except AssertionError: + self.assertEqual(msg2, msg_reference) + else: + raise AssertionError("Did not raise") class TestArrayAlmostEqual(_GenericTest, unittest.TestCase): @@ -433,8 +461,8 @@ # test with a different amount of decimal digits # note that we only check for the formatting of the arrays themselves - b = ('x: array([ 1.00000000001, 2.00000000002, 3.00003 ' - ' ])\n y: array([ 1.00000000002, 2.00000000003, 3.00004 ])') + b = ('x: array([1.00000000001, 2.00000000002, 3.00003 ' + ' ])\n y: array([1.00000000002, 2.00000000003, 3.00004 ])') try: self._assert_func(x, y, decimal=12) except AssertionError as e: @@ -443,8 +471,8 @@ # with the default value of decimal digits, only the 3rd element differs # note that we only check for the formatting of the arrays themselves - b = ('x: array([ 1. , 2. , 3.00003])\n y: array([ 1. , ' - '2. , 3.00004])') + b = ('x: array([1. , 2. , 3.00003])\n y: array([1. , ' + '2. , 3.00004])') try: self._assert_func(x, y) except AssertionError as e: diff -Nru python-numpy-1.13.3/numpy/testing/utils.py python-numpy-1.14.5/numpy/testing/utils.py --- python-numpy-1.13.3/numpy/testing/utils.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/testing/utils.py 2018-06-12 18:28:52.000000000 +0000 @@ -1,29 +1,8 @@ """ -Utility function to facilitate testing. +Back compatibility utils module. It will import the appropriate +set of tools """ -from __future__ import division, absolute_import, print_function - -import os -import sys -import re -import operator -import warnings -from functools import partial, wraps -import shutil -import contextlib -from tempfile import mkdtemp, mkstemp -from unittest.case import SkipTest - -from numpy.core import( - float32, empty, arange, array_repr, ndarray, isnat, array) -from numpy.lib.utils import deprecate - -if sys.version_info[0] >= 3: - from io import StringIO -else: - from StringIO import StringIO - __all__ = [ 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', 'assert_array_equal', 'assert_array_less', 'assert_string_equal', @@ -34,2195 +13,8 @@ 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', - 'HAS_REFCOUNT', 'suppress_warnings' + 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare', + '_assert_valid_refcount', '_gen_alignment_data', ] - -class KnownFailureException(Exception): - '''Raise this exception to mark a test as a known failing test.''' - pass - - -KnownFailureTest = KnownFailureException # backwards compat -verbose = 0 - -IS_PYPY = '__pypy__' in sys.modules -HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None - - -def import_nose(): - """ Import nose only when needed. - """ - nose_is_good = True - minimum_nose_version = (1, 0, 0) - try: - import nose - except ImportError: - nose_is_good = False - else: - if nose.__versioninfo__ < minimum_nose_version: - nose_is_good = False - - if not nose_is_good: - msg = ('Need nose >= %d.%d.%d for tests - see ' - 'http://nose.readthedocs.io' % - minimum_nose_version) - raise ImportError(msg) - - return nose - - -def assert_(val, msg=''): - """ - Assert that works in release mode. - Accepts callable msg to allow deferring evaluation until failure. - - The Python built-in ``assert`` does not work when executing code in - optimized mode (the ``-O`` flag) - no byte-code is generated for it. - - For documentation on usage, refer to the Python documentation. - - """ - __tracebackhide__ = True # Hide traceback for py.test - if not val: - try: - smsg = msg() - except TypeError: - smsg = msg - raise AssertionError(smsg) - - -def gisnan(x): - """like isnan, but always raise an error if type not supported instead of - returning a TypeError object. - - Notes - ----- - isnan and other ufunc sometimes return a NotImplementedType object instead - of raising any exception. This function is a wrapper to make sure an - exception is always raised. - - This should be removed once this problem is solved at the Ufunc level.""" - from numpy.core import isnan - st = isnan(x) - if isinstance(st, type(NotImplemented)): - raise TypeError("isnan not supported for this type") - return st - - -def gisfinite(x): - """like isfinite, but always raise an error if type not supported instead of - returning a TypeError object. - - Notes - ----- - isfinite and other ufunc sometimes return a NotImplementedType object instead - of raising any exception. This function is a wrapper to make sure an - exception is always raised. - - This should be removed once this problem is solved at the Ufunc level.""" - from numpy.core import isfinite, errstate - with errstate(invalid='ignore'): - st = isfinite(x) - if isinstance(st, type(NotImplemented)): - raise TypeError("isfinite not supported for this type") - return st - - -def gisinf(x): - """like isinf, but always raise an error if type not supported instead of - returning a TypeError object. - - Notes - ----- - isinf and other ufunc sometimes return a NotImplementedType object instead - of raising any exception. This function is a wrapper to make sure an - exception is always raised. - - This should be removed once this problem is solved at the Ufunc level.""" - from numpy.core import isinf, errstate - with errstate(invalid='ignore'): - st = isinf(x) - if isinstance(st, type(NotImplemented)): - raise TypeError("isinf not supported for this type") - return st - - -@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. " - "Use numpy.random.rand instead.") -def rand(*args): - """Returns an array of random numbers with the given shape. - - This only uses the standard library, so it is useful for testing purposes. - """ - import random - from numpy.core import zeros, float64 - results = zeros(args, float64) - f = results.flat - for i in range(len(f)): - f[i] = random.random() - return results - - -if os.name == 'nt': - # Code "stolen" from enthought/debug/memusage.py - def GetPerformanceAttributes(object, counter, instance=None, - inum=-1, format=None, machine=None): - # NOTE: Many counters require 2 samples to give accurate results, - # including "% Processor Time" (as by definition, at any instant, a - # thread's CPU usage is either 0 or 100). To read counters like this, - # you should copy this function, but keep the counter open, and call - # CollectQueryData() each time you need to know. - # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp - # My older explanation for this was that the "AddCounter" process forced - # the CPU to 100%, but the above makes more sense :) - import win32pdh - if format is None: - format = win32pdh.PDH_FMT_LONG - path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter)) - hq = win32pdh.OpenQuery() - try: - hc = win32pdh.AddCounter(hq, path) - try: - win32pdh.CollectQueryData(hq) - type, val = win32pdh.GetFormattedCounterValue(hc, format) - return val - finally: - win32pdh.RemoveCounter(hc) - finally: - win32pdh.CloseQuery(hq) - - def memusage(processName="python", instance=0): - # from win32pdhutil, part of the win32all package - import win32pdh - return GetPerformanceAttributes("Process", "Virtual Bytes", - processName, instance, - win32pdh.PDH_FMT_LONG, None) -elif sys.platform[:5] == 'linux': - - def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())): - """ - Return virtual memory size in bytes of the running python. - - """ - try: - f = open(_proc_pid_stat, 'r') - l = f.readline().split(' ') - f.close() - return int(l[22]) - except: - return -else: - def memusage(): - """ - Return memory usage of running python. [Not implemented] - - """ - raise NotImplementedError - - -if sys.platform[:5] == 'linux': - def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()), - _load_time=[]): - """ - Return number of jiffies elapsed. - - Return number of jiffies (1/100ths of a second) that this - process has been scheduled in user mode. See man 5 proc. - - """ - import time - if not _load_time: - _load_time.append(time.time()) - try: - f = open(_proc_pid_stat, 'r') - l = f.readline().split(' ') - f.close() - return int(l[13]) - except: - return int(100*(time.time()-_load_time[0])) -else: - # os.getpid is not in all platforms available. - # Using time is safe but inaccurate, especially when process - # was suspended or sleeping. - def jiffies(_load_time=[]): - """ - Return number of jiffies elapsed. - - Return number of jiffies (1/100ths of a second) that this - process has been scheduled in user mode. See man 5 proc. - - """ - import time - if not _load_time: - _load_time.append(time.time()) - return int(100*(time.time()-_load_time[0])) - - -def build_err_msg(arrays, err_msg, header='Items are not equal:', - verbose=True, names=('ACTUAL', 'DESIRED'), precision=8): - msg = ['\n' + header] - if err_msg: - if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header): - msg = [msg[0] + ' ' + err_msg] - else: - msg.append(err_msg) - if verbose: - for i, a in enumerate(arrays): - - if isinstance(a, ndarray): - # precision argument is only needed if the objects are ndarrays - r_func = partial(array_repr, precision=precision) - else: - r_func = repr - - try: - r = r_func(a) - except Exception as exc: - r = '[repr failed for <{}>: {}]'.format(type(a).__name__, exc) - if r.count('\n') > 3: - r = '\n'.join(r.splitlines()[:3]) - r += '...' - msg.append(' %s: %s' % (names[i], r)) - return '\n'.join(msg) - - -def assert_equal(actual, desired, err_msg='', verbose=True): - """ - Raises an AssertionError if two objects are not equal. - - Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), - check that all elements of these objects are equal. An exception is raised - at the first conflicting values. - - Parameters - ---------- - actual : array_like - The object to check. - desired : array_like - The expected object. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal. - - Examples - -------- - >>> np.testing.assert_equal([4,5], [4,6]) - ... - : - Items are not equal: - item=1 - ACTUAL: 5 - DESIRED: 6 - - """ - __tracebackhide__ = True # Hide traceback for py.test - if isinstance(desired, dict): - if not isinstance(actual, dict): - raise AssertionError(repr(type(actual))) - assert_equal(len(actual), len(desired), err_msg, verbose) - for k, i in desired.items(): - if k not in actual: - raise AssertionError(repr(k)) - assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose) - return - if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): - assert_equal(len(actual), len(desired), err_msg, verbose) - for k in range(len(desired)): - assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose) - return - from numpy.core import ndarray, isscalar, signbit - from numpy.lib import iscomplexobj, real, imag - if isinstance(actual, ndarray) or isinstance(desired, ndarray): - return assert_array_equal(actual, desired, err_msg, verbose) - msg = build_err_msg([actual, desired], err_msg, verbose=verbose) - - # Handle complex numbers: separate into real/imag to handle - # nan/inf/negative zero correctly - # XXX: catch ValueError for subclasses of ndarray where iscomplex fail - try: - usecomplex = iscomplexobj(actual) or iscomplexobj(desired) - except ValueError: - usecomplex = False - - if usecomplex: - if iscomplexobj(actual): - actualr = real(actual) - actuali = imag(actual) - else: - actualr = actual - actuali = 0 - if iscomplexobj(desired): - desiredr = real(desired) - desiredi = imag(desired) - else: - desiredr = desired - desiredi = 0 - try: - assert_equal(actualr, desiredr) - assert_equal(actuali, desiredi) - except AssertionError: - raise AssertionError(msg) - - # isscalar test to check cases such as [np.nan] != np.nan - if isscalar(desired) != isscalar(actual): - raise AssertionError(msg) - - # Inf/nan/negative zero handling - try: - # If one of desired/actual is not finite, handle it specially here: - # check that both are nan if any is a nan, and test for equality - # otherwise - if not (gisfinite(desired) and gisfinite(actual)): - isdesnan = gisnan(desired) - isactnan = gisnan(actual) - if isdesnan or isactnan: - if not (isdesnan and isactnan): - raise AssertionError(msg) - else: - if not desired == actual: - raise AssertionError(msg) - return - elif desired == 0 and actual == 0: - if not signbit(desired) == signbit(actual): - raise AssertionError(msg) - # If TypeError or ValueError raised while using isnan and co, just handle - # as before - except (TypeError, ValueError, NotImplementedError): - pass - - try: - # If both are NaT (and have the same dtype -- datetime or timedelta) - # they are considered equal. - if (isnat(desired) == isnat(actual) and - array(desired).dtype.type == array(actual).dtype.type): - return - else: - raise AssertionError(msg) - - # If TypeError or ValueError raised while using isnan and co, just handle - # as before - except (TypeError, ValueError, NotImplementedError): - pass - - # Explicitly use __eq__ for comparison, ticket #2552 - if not (desired == actual): - raise AssertionError(msg) - - -def print_assert_equal(test_string, actual, desired): - """ - Test if two objects are equal, and print an error message if test fails. - - The test is performed with ``actual == desired``. - - Parameters - ---------- - test_string : str - The message supplied to AssertionError. - actual : object - The object to test for equality against `desired`. - desired : object - The expected result. - - Examples - -------- - >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) - >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) - Traceback (most recent call last): - ... - AssertionError: Test XYZ of func xyz failed - ACTUAL: - [0, 1] - DESIRED: - [0, 2] - - """ - __tracebackhide__ = True # Hide traceback for py.test - import pprint - - if not (actual == desired): - msg = StringIO() - msg.write(test_string) - msg.write(' failed\nACTUAL: \n') - pprint.pprint(actual, msg) - msg.write('DESIRED: \n') - pprint.pprint(desired, msg) - raise AssertionError(msg.getvalue()) - - -def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): - """ - Raises an AssertionError if two items are not equal up to desired - precision. - - .. note:: It is recommended to use one of `assert_allclose`, - `assert_array_almost_equal_nulp` or `assert_array_max_ulp` - instead of this function for more consistent floating point - comparisons. - - The test verifies that the elements of ``actual`` and ``desired`` satisfy. - - ``abs(desired-actual) < 1.5 * 10**(-decimal)`` - - That is a looser test than originally documented, but agrees with what the - actual implementation in `assert_array_almost_equal` did up to rounding - vagaries. An exception is raised at conflicting values. For ndarrays this - delegates to assert_array_almost_equal - - Parameters - ---------- - actual : array_like - The object to check. - desired : array_like - The expected object. - decimal : int, optional - Desired precision, default is 7. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Examples - -------- - >>> import numpy.testing as npt - >>> npt.assert_almost_equal(2.3333333333333, 2.33333334) - >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) - ... - : - Items are not equal: - ACTUAL: 2.3333333333333002 - DESIRED: 2.3333333399999998 - - >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]), - ... np.array([1.0,2.33333334]), decimal=9) - ... - : - Arrays are not almost equal - - (mismatch 50.0%) - x: array([ 1. , 2.33333333]) - y: array([ 1. , 2.33333334]) - - """ - __tracebackhide__ = True # Hide traceback for py.test - from numpy.core import ndarray - from numpy.lib import iscomplexobj, real, imag - - # Handle complex numbers: separate into real/imag to handle - # nan/inf/negative zero correctly - # XXX: catch ValueError for subclasses of ndarray where iscomplex fail - try: - usecomplex = iscomplexobj(actual) or iscomplexobj(desired) - except ValueError: - usecomplex = False - - def _build_err_msg(): - header = ('Arrays are not almost equal to %d decimals' % decimal) - return build_err_msg([actual, desired], err_msg, verbose=verbose, - header=header) - - if usecomplex: - if iscomplexobj(actual): - actualr = real(actual) - actuali = imag(actual) - else: - actualr = actual - actuali = 0 - if iscomplexobj(desired): - desiredr = real(desired) - desiredi = imag(desired) - else: - desiredr = desired - desiredi = 0 - try: - assert_almost_equal(actualr, desiredr, decimal=decimal) - assert_almost_equal(actuali, desiredi, decimal=decimal) - except AssertionError: - raise AssertionError(_build_err_msg()) - - if isinstance(actual, (ndarray, tuple, list)) \ - or isinstance(desired, (ndarray, tuple, list)): - return assert_array_almost_equal(actual, desired, decimal, err_msg) - try: - # If one of desired/actual is not finite, handle it specially here: - # check that both are nan if any is a nan, and test for equality - # otherwise - if not (gisfinite(desired) and gisfinite(actual)): - if gisnan(desired) or gisnan(actual): - if not (gisnan(desired) and gisnan(actual)): - raise AssertionError(_build_err_msg()) - else: - if not desired == actual: - raise AssertionError(_build_err_msg()) - return - except (NotImplementedError, TypeError): - pass - if abs(desired - actual) >= 1.5 * 10.0**(-decimal): - raise AssertionError(_build_err_msg()) - - -def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): - """ - Raises an AssertionError if two items are not equal up to significant - digits. - - .. note:: It is recommended to use one of `assert_allclose`, - `assert_array_almost_equal_nulp` or `assert_array_max_ulp` - instead of this function for more consistent floating point - comparisons. - - Given two numbers, check that they are approximately equal. - Approximately equal is defined as the number of significant digits - that agree. - - Parameters - ---------- - actual : scalar - The object to check. - desired : scalar - The expected object. - significant : int, optional - Desired precision, default is 7. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Examples - -------- - >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) - >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, - significant=8) - >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, - significant=8) - ... - : - Items are not equal to 8 significant digits: - ACTUAL: 1.234567e-021 - DESIRED: 1.2345672000000001e-021 - - the evaluated condition that raises the exception is - - >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1) - True - - """ - __tracebackhide__ = True # Hide traceback for py.test - import numpy as np - - (actual, desired) = map(float, (actual, desired)) - if desired == actual: - return - # Normalized the numbers to be in range (-10.0,10.0) - # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) - with np.errstate(invalid='ignore'): - scale = 0.5*(np.abs(desired) + np.abs(actual)) - scale = np.power(10, np.floor(np.log10(scale))) - try: - sc_desired = desired/scale - except ZeroDivisionError: - sc_desired = 0.0 - try: - sc_actual = actual/scale - except ZeroDivisionError: - sc_actual = 0.0 - msg = build_err_msg([actual, desired], err_msg, - header='Items are not equal to %d significant digits:' % - significant, - verbose=verbose) - try: - # If one of desired/actual is not finite, handle it specially here: - # check that both are nan if any is a nan, and test for equality - # otherwise - if not (gisfinite(desired) and gisfinite(actual)): - if gisnan(desired) or gisnan(actual): - if not (gisnan(desired) and gisnan(actual)): - raise AssertionError(msg) - else: - if not desired == actual: - raise AssertionError(msg) - return - except (TypeError, NotImplementedError): - pass - if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)): - raise AssertionError(msg) - - -def assert_array_compare(comparison, x, y, err_msg='', verbose=True, - header='', precision=6, equal_nan=True, - equal_inf=True): - __tracebackhide__ = True # Hide traceback for py.test - from numpy.core import array, isnan, isinf, any, inf - x = array(x, copy=False, subok=True) - y = array(y, copy=False, subok=True) - - def isnumber(x): - return x.dtype.char in '?bhilqpBHILQPefdgFDG' - - def istime(x): - return x.dtype.char in "Mm" - - def chk_same_position(x_id, y_id, hasval='nan'): - """Handling nan/inf: check that x and y have the nan/inf at the same - locations.""" - try: - assert_array_equal(x_id, y_id) - except AssertionError: - msg = build_err_msg([x, y], - err_msg + '\nx and y %s location mismatch:' - % (hasval), verbose=verbose, header=header, - names=('x', 'y'), precision=precision) - raise AssertionError(msg) - - try: - cond = (x.shape == () or y.shape == ()) or x.shape == y.shape - if not cond: - msg = build_err_msg([x, y], - err_msg - + '\n(shapes %s, %s mismatch)' % (x.shape, - y.shape), - verbose=verbose, header=header, - names=('x', 'y'), precision=precision) - raise AssertionError(msg) - - if isnumber(x) and isnumber(y): - has_nan = has_inf = False - if equal_nan: - x_isnan, y_isnan = isnan(x), isnan(y) - # Validate that NaNs are in the same place - has_nan = any(x_isnan) or any(y_isnan) - if has_nan: - chk_same_position(x_isnan, y_isnan, hasval='nan') - - if equal_inf: - x_isinf, y_isinf = isinf(x), isinf(y) - # Validate that infinite values are in the same place - has_inf = any(x_isinf) or any(y_isinf) - if has_inf: - # Check +inf and -inf separately, since they are different - chk_same_position(x == +inf, y == +inf, hasval='+inf') - chk_same_position(x == -inf, y == -inf, hasval='-inf') - - if has_nan and has_inf: - x = x[~(x_isnan | x_isinf)] - y = y[~(y_isnan | y_isinf)] - elif has_nan: - x = x[~x_isnan] - y = y[~y_isnan] - elif has_inf: - x = x[~x_isinf] - y = y[~y_isinf] - - # Only do the comparison if actual values are left - if x.size == 0: - return - - elif istime(x) and istime(y): - # If one is datetime64 and the other timedelta64 there is no point - if equal_nan and x.dtype.type == y.dtype.type: - x_isnat, y_isnat = isnat(x), isnat(y) - - if any(x_isnat) or any(y_isnat): - chk_same_position(x_isnat, y_isnat, hasval="NaT") - - if any(x_isnat) or any(y_isnat): - x = x[~x_isnat] - y = y[~y_isnat] - - val = comparison(x, y) - - if isinstance(val, bool): - cond = val - reduced = [0] - else: - reduced = val.ravel() - cond = reduced.all() - reduced = reduced.tolist() - if not cond: - match = 100-100.0*reduced.count(1)/len(reduced) - msg = build_err_msg([x, y], - err_msg - + '\n(mismatch %s%%)' % (match,), - verbose=verbose, header=header, - names=('x', 'y'), precision=precision) - if not cond: - raise AssertionError(msg) - except ValueError: - import traceback - efmt = traceback.format_exc() - header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header) - - msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header, - names=('x', 'y'), precision=precision) - raise ValueError(msg) - - -def assert_array_equal(x, y, err_msg='', verbose=True): - """ - Raises an AssertionError if two array_like objects are not equal. - - Given two array_like objects, check that the shape is equal and all - elements of these objects are equal. An exception is raised at - shape mismatch or conflicting values. In contrast to the standard usage - in numpy, NaNs are compared like numbers, no assertion is raised if - both objects have NaNs in the same positions. - - The usual caution for verifying equality with floating point numbers is - advised. - - Parameters - ---------- - x : array_like - The actual object to check. - y : array_like - The desired, expected object. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired objects are not equal. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Examples - -------- - The first assert does not raise an exception: - - >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], - ... [np.exp(0),2.33333, np.nan]) - - Assert fails with numerical inprecision with floats: - - >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], - ... [1, np.sqrt(np.pi)**2, np.nan]) - ... - : - AssertionError: - Arrays are not equal - - (mismatch 50.0%) - x: array([ 1. , 3.14159265, NaN]) - y: array([ 1. , 3.14159265, NaN]) - - Use `assert_allclose` or one of the nulp (number of floating point values) - functions for these cases instead: - - >>> np.testing.assert_allclose([1.0,np.pi,np.nan], - ... [1, np.sqrt(np.pi)**2, np.nan], - ... rtol=1e-10, atol=0) - - """ - __tracebackhide__ = True # Hide traceback for py.test - assert_array_compare(operator.__eq__, x, y, err_msg=err_msg, - verbose=verbose, header='Arrays are not equal') - - -def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): - """ - Raises an AssertionError if two objects are not equal up to desired - precision. - - .. note:: It is recommended to use one of `assert_allclose`, - `assert_array_almost_equal_nulp` or `assert_array_max_ulp` - instead of this function for more consistent floating point - comparisons. - - The test verifies identical shapes and that the elements of ``actual`` and - ``desired`` satisfy. - - ``abs(desired-actual) < 1.5 * 10**(-decimal)`` - - That is a looser test than originally documented, but agrees with what the - actual implementation did up to rounding vagaries. An exception is raised - at shape mismatch or conflicting values. In contrast to the standard usage - in numpy, NaNs are compared like numbers, no assertion is raised if both - objects have NaNs in the same positions. - - Parameters - ---------- - x : array_like - The actual object to check. - y : array_like - The desired, expected object. - decimal : int, optional - Desired precision, default is 6. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_allclose: Compare two array_like objects for equality with desired - relative and/or absolute precision. - assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal - - Examples - -------- - the first assert does not raise an exception - - >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], - [1.0,2.333,np.nan]) - - >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], - ... [1.0,2.33339,np.nan], decimal=5) - ... - : - AssertionError: - Arrays are not almost equal - - (mismatch 50.0%) - x: array([ 1. , 2.33333, NaN]) - y: array([ 1. , 2.33339, NaN]) - - >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], - ... [1.0,2.33333, 5], decimal=5) - : - ValueError: - Arrays are not almost equal - x: array([ 1. , 2.33333, NaN]) - y: array([ 1. , 2.33333, 5. ]) - - """ - __tracebackhide__ = True # Hide traceback for py.test - from numpy.core import around, number, float_, result_type, array - from numpy.core.numerictypes import issubdtype - from numpy.core.fromnumeric import any as npany - - def compare(x, y): - try: - if npany(gisinf(x)) or npany( gisinf(y)): - xinfid = gisinf(x) - yinfid = gisinf(y) - if not (xinfid == yinfid).all(): - return False - # if one item, x and y is +- inf - if x.size == y.size == 1: - return x == y - x = x[~xinfid] - y = y[~yinfid] - except (TypeError, NotImplementedError): - pass - - # make sure y is an inexact type to avoid abs(MIN_INT); will cause - # casting of x later. - dtype = result_type(y, 1.) - y = array(y, dtype=dtype, copy=False, subok=True) - z = abs(x - y) - - if not issubdtype(z.dtype, number): - z = z.astype(float_) # handle object arrays - - return z < 1.5 * 10.0**(-decimal) - - assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, - header=('Arrays are not almost equal to %d decimals' % decimal), - precision=decimal) - - -def assert_array_less(x, y, err_msg='', verbose=True): - """ - Raises an AssertionError if two array_like objects are not ordered by less - than. - - Given two array_like objects, check that the shape is equal and all - elements of the first object are strictly smaller than those of the - second object. An exception is raised at shape mismatch or incorrectly - ordered values. Shape mismatch does not raise if an object has zero - dimension. In contrast to the standard usage in numpy, NaNs are - compared, no assertion is raised if both objects have NaNs in the same - positions. - - - - Parameters - ---------- - x : array_like - The smaller object to check. - y : array_like - The larger object to compare. - err_msg : string - The error message to be printed in case of failure. - verbose : bool - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired objects are not equal. - - See Also - -------- - assert_array_equal: tests objects for equality - assert_array_almost_equal: test objects for equality up to precision - - - - Examples - -------- - >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan]) - >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan]) - ... - : - Arrays are not less-ordered - (mismatch 50.0%) - x: array([ 1., 1., NaN]) - y: array([ 1., 2., NaN]) - - >>> np.testing.assert_array_less([1.0, 4.0], 3) - ... - : - Arrays are not less-ordered - (mismatch 50.0%) - x: array([ 1., 4.]) - y: array(3) - - >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4]) - ... - : - Arrays are not less-ordered - (shapes (3,), (1,) mismatch) - x: array([ 1., 2., 3.]) - y: array([4]) - - """ - __tracebackhide__ = True # Hide traceback for py.test - assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, - verbose=verbose, - header='Arrays are not less-ordered', - equal_inf=False) - - -def runstring(astr, dict): - exec(astr, dict) - - -def assert_string_equal(actual, desired): - """ - Test if two strings are equal. - - If the given strings are equal, `assert_string_equal` does nothing. - If they are not equal, an AssertionError is raised, and the diff - between the strings is shown. - - Parameters - ---------- - actual : str - The string to test for equality against the expected string. - desired : str - The expected string. - - Examples - -------- - >>> np.testing.assert_string_equal('abc', 'abc') - >>> np.testing.assert_string_equal('abc', 'abcd') - Traceback (most recent call last): - File "", line 1, in - ... - AssertionError: Differences in strings: - - abc+ abcd? + - - """ - # delay import of difflib to reduce startup time - __tracebackhide__ = True # Hide traceback for py.test - import difflib - - if not isinstance(actual, str): - raise AssertionError(repr(type(actual))) - if not isinstance(desired, str): - raise AssertionError(repr(type(desired))) - if re.match(r'\A'+desired+r'\Z', actual, re.M): - return - - diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1))) - diff_list = [] - while diff: - d1 = diff.pop(0) - if d1.startswith(' '): - continue - if d1.startswith('- '): - l = [d1] - d2 = diff.pop(0) - if d2.startswith('? '): - l.append(d2) - d2 = diff.pop(0) - if not d2.startswith('+ '): - raise AssertionError(repr(d2)) - l.append(d2) - if diff: - d3 = diff.pop(0) - if d3.startswith('? '): - l.append(d3) - else: - diff.insert(0, d3) - if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]): - continue - diff_list.extend(l) - continue - raise AssertionError(repr(d1)) - if not diff_list: - return - msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip() - if actual != desired: - raise AssertionError(msg) - - -def rundocs(filename=None, raise_on_error=True): - """ - Run doctests found in the given file. - - By default `rundocs` raises an AssertionError on failure. - - Parameters - ---------- - filename : str - The path to the file for which the doctests are run. - raise_on_error : bool - Whether to raise an AssertionError when a doctest fails. Default is - True. - - Notes - ----- - The doctests can be run by the user/developer by adding the ``doctests`` - argument to the ``test()`` call. For example, to run all tests (including - doctests) for `numpy.lib`: - - >>> np.lib.test(doctests=True) #doctest: +SKIP - """ - from numpy.compat import npy_load_module - import doctest - if filename is None: - f = sys._getframe(1) - filename = f.f_globals['__file__'] - name = os.path.splitext(os.path.basename(filename))[0] - m = npy_load_module(name, filename) - - tests = doctest.DocTestFinder().find(m) - runner = doctest.DocTestRunner(verbose=False) - - msg = [] - if raise_on_error: - out = lambda s: msg.append(s) - else: - out = None - - for test in tests: - runner.run(test, out=out) - - if runner.failures > 0 and raise_on_error: - raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) - - -def raises(*args,**kwargs): - nose = import_nose() - return nose.tools.raises(*args,**kwargs) - - -def assert_raises(*args, **kwargs): - """ - assert_raises(exception_class, callable, *args, **kwargs) - assert_raises(exception_class) - - Fail unless an exception of class exception_class is thrown - by callable when invoked with arguments args and keyword - arguments kwargs. If a different type of exception is - thrown, it will not be caught, and the test case will be - deemed to have suffered an error, exactly as for an - unexpected exception. - - Alternatively, `assert_raises` can be used as a context manager: - - >>> from numpy.testing import assert_raises - >>> with assert_raises(ZeroDivisionError): - ... 1 / 0 - - is equivalent to - - >>> def div(x, y): - ... return x / y - >>> assert_raises(ZeroDivisionError, div, 1, 0) - - """ - __tracebackhide__ = True # Hide traceback for py.test - nose = import_nose() - return nose.tools.assert_raises(*args,**kwargs) - - -def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): - """ - assert_raises_regex(exception_class, expected_regexp, callable, *args, - **kwargs) - assert_raises_regex(exception_class, expected_regexp) - - Fail unless an exception of class exception_class and with message that - matches expected_regexp is thrown by callable when invoked with arguments - args and keyword arguments kwargs. - - Alternatively, can be used as a context manager like `assert_raises`. - - Name of this function adheres to Python 3.2+ reference, but should work in - all versions down to 2.6. - - Notes - ----- - .. versionadded:: 1.9.0 - - """ - __tracebackhide__ = True # Hide traceback for py.test - nose = import_nose() - - if sys.version_info.major >= 3: - funcname = nose.tools.assert_raises_regex - else: - # Only present in Python 2.7, missing from unittest in 2.6 - funcname = nose.tools.assert_raises_regexp - - return funcname(exception_class, expected_regexp, *args, **kwargs) - - -def decorate_methods(cls, decorator, testmatch=None): - """ - Apply a decorator to all methods in a class matching a regular expression. - - The given decorator is applied to all public methods of `cls` that are - matched by the regular expression `testmatch` - (``testmatch.search(methodname)``). Methods that are private, i.e. start - with an underscore, are ignored. - - Parameters - ---------- - cls : class - Class whose methods to decorate. - decorator : function - Decorator to apply to methods - testmatch : compiled regexp or str, optional - The regular expression. Default value is None, in which case the - nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) - is used. - If `testmatch` is a string, it is compiled to a regular expression - first. - - """ - if testmatch is None: - testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) - else: - testmatch = re.compile(testmatch) - cls_attr = cls.__dict__ - - # delayed import to reduce startup time - from inspect import isfunction - - methods = [_m for _m in cls_attr.values() if isfunction(_m)] - for function in methods: - try: - if hasattr(function, 'compat_func_name'): - funcname = function.compat_func_name - else: - funcname = function.__name__ - except AttributeError: - # not a function - continue - if testmatch.search(funcname) and not funcname.startswith('_'): - setattr(cls, funcname, decorator(function)) - return - - -def measure(code_str,times=1,label=None): - """ - Return elapsed time for executing code in the namespace of the caller. - - The supplied code string is compiled with the Python builtin ``compile``. - The precision of the timing is 10 milli-seconds. If the code will execute - fast on this timescale, it can be executed many times to get reasonable - timing accuracy. - - Parameters - ---------- - code_str : str - The code to be timed. - times : int, optional - The number of times the code is executed. Default is 1. The code is - only compiled once. - label : str, optional - A label to identify `code_str` with. This is passed into ``compile`` - as the second argument (for run-time error messages). - - Returns - ------- - elapsed : float - Total elapsed time in seconds for executing `code_str` `times` times. - - Examples - -------- - >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', - ... times=times) - >>> print("Time for a single execution : ", etime / times, "s") - Time for a single execution : 0.005 s - - """ - frame = sys._getframe(1) - locs, globs = frame.f_locals, frame.f_globals - - code = compile(code_str, - 'Test name: %s ' % label, - 'exec') - i = 0 - elapsed = jiffies() - while i < times: - i += 1 - exec(code, globs, locs) - elapsed = jiffies() - elapsed - return 0.01*elapsed - - -def _assert_valid_refcount(op): - """ - Check that ufuncs don't mishandle refcount of object `1`. - Used in a few regression tests. - """ - if not HAS_REFCOUNT: - return True - import numpy as np - - b = np.arange(100*100).reshape(100, 100) - c = b - i = 1 - - rc = sys.getrefcount(i) - for j in range(15): - d = op(b, c) - assert_(sys.getrefcount(i) >= rc) - del d # for pyflakes - - -def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, - err_msg='', verbose=True): - """ - Raises an AssertionError if two objects are not equal up to desired - tolerance. - - The test is equivalent to ``allclose(actual, desired, rtol, atol)``. - It compares the difference between `actual` and `desired` to - ``atol + rtol * abs(desired)``. - - .. versionadded:: 1.5.0 - - Parameters - ---------- - actual : array_like - Array obtained. - desired : array_like - Array desired. - rtol : float, optional - Relative tolerance. - atol : float, optional - Absolute tolerance. - equal_nan : bool, optional. - If True, NaNs will compare equal. - err_msg : str, optional - The error message to be printed in case of failure. - verbose : bool, optional - If True, the conflicting values are appended to the error message. - - Raises - ------ - AssertionError - If actual and desired are not equal up to specified precision. - - See Also - -------- - assert_array_almost_equal_nulp, assert_array_max_ulp - - Examples - -------- - >>> x = [1e-5, 1e-3, 1e-1] - >>> y = np.arccos(np.cos(x)) - >>> assert_allclose(x, y, rtol=1e-5, atol=0) - - """ - __tracebackhide__ = True # Hide traceback for py.test - import numpy as np - - def compare(x, y): - return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol, - equal_nan=equal_nan) - - actual, desired = np.asanyarray(actual), np.asanyarray(desired) - header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol) - assert_array_compare(compare, actual, desired, err_msg=str(err_msg), - verbose=verbose, header=header, equal_nan=equal_nan) - - -def assert_array_almost_equal_nulp(x, y, nulp=1): - """ - Compare two arrays relatively to their spacing. - - This is a relatively robust method to compare two arrays whose amplitude - is variable. - - Parameters - ---------- - x, y : array_like - Input arrays. - nulp : int, optional - The maximum number of unit in the last place for tolerance (see Notes). - Default is 1. - - Returns - ------- - None - - Raises - ------ - AssertionError - If the spacing between `x` and `y` for one or more elements is larger - than `nulp`. - - See Also - -------- - assert_array_max_ulp : Check that all items of arrays differ in at most - N Units in the Last Place. - spacing : Return the distance between x and the nearest adjacent number. - - Notes - ----- - An assertion is raised if the following condition is not met:: - - abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y))) - - Examples - -------- - >>> x = np.array([1., 1e-10, 1e-20]) - >>> eps = np.finfo(x.dtype).eps - >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) - - >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) - Traceback (most recent call last): - ... - AssertionError: X and Y are not equal to 1 ULP (max is 2) - - """ - __tracebackhide__ = True # Hide traceback for py.test - import numpy as np - ax = np.abs(x) - ay = np.abs(y) - ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) - if not np.all(np.abs(x-y) <= ref): - if np.iscomplexobj(x) or np.iscomplexobj(y): - msg = "X and Y are not equal to %d ULP" % nulp - else: - max_nulp = np.max(nulp_diff(x, y)) - msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp) - raise AssertionError(msg) - - -def assert_array_max_ulp(a, b, maxulp=1, dtype=None): - """ - Check that all items of arrays differ in at most N Units in the Last Place. - - Parameters - ---------- - a, b : array_like - Input arrays to be compared. - maxulp : int, optional - The maximum number of units in the last place that elements of `a` and - `b` can differ. Default is 1. - dtype : dtype, optional - Data-type to convert `a` and `b` to if given. Default is None. - - Returns - ------- - ret : ndarray - Array containing number of representable floating point numbers between - items in `a` and `b`. - - Raises - ------ - AssertionError - If one or more elements differ by more than `maxulp`. - - See Also - -------- - assert_array_almost_equal_nulp : Compare two arrays relatively to their - spacing. - - Examples - -------- - >>> a = np.linspace(0., 1., 100) - >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) - - """ - __tracebackhide__ = True # Hide traceback for py.test - import numpy as np - ret = nulp_diff(a, b, dtype) - if not np.all(ret <= maxulp): - raise AssertionError("Arrays are not almost equal up to %g ULP" % - maxulp) - return ret - - -def nulp_diff(x, y, dtype=None): - """For each item in x and y, return the number of representable floating - points between them. - - Parameters - ---------- - x : array_like - first input array - y : array_like - second input array - dtype : dtype, optional - Data-type to convert `x` and `y` to if given. Default is None. - - Returns - ------- - nulp : array_like - number of representable floating point numbers between each item in x - and y. - - Examples - -------- - # By definition, epsilon is the smallest number such as 1 + eps != 1, so - # there should be exactly one ULP between 1 and 1 + eps - >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) - 1.0 - """ - import numpy as np - if dtype: - x = np.array(x, dtype=dtype) - y = np.array(y, dtype=dtype) - else: - x = np.array(x) - y = np.array(y) - - t = np.common_type(x, y) - if np.iscomplexobj(x) or np.iscomplexobj(y): - raise NotImplementedError("_nulp not implemented for complex array") - - x = np.array(x, dtype=t) - y = np.array(y, dtype=t) - - if not x.shape == y.shape: - raise ValueError("x and y do not have the same shape: %s - %s" % - (x.shape, y.shape)) - - def _diff(rx, ry, vdt): - diff = np.array(rx-ry, dtype=vdt) - return np.abs(diff) - - rx = integer_repr(x) - ry = integer_repr(y) - return _diff(rx, ry, t) - - -def _integer_repr(x, vdt, comp): - # Reinterpret binary representation of the float as sign-magnitude: - # take into account two-complement representation - # See also - # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm - rx = x.view(vdt) - if not (rx.size == 1): - rx[rx < 0] = comp - rx[rx < 0] - else: - if rx < 0: - rx = comp - rx - - return rx - - -def integer_repr(x): - """Return the signed-magnitude interpretation of the binary representation of - x.""" - import numpy as np - if x.dtype == np.float32: - return _integer_repr(x, np.int32, np.int32(-2**31)) - elif x.dtype == np.float64: - return _integer_repr(x, np.int64, np.int64(-2**63)) - else: - raise ValueError("Unsupported dtype %s" % x.dtype) - - -# The following two classes are copied from python 2.6 warnings module (context -# manager) -class WarningMessage(object): - - """ - Holds the result of a single showwarning() call. - - Deprecated in 1.8.0 - - Notes - ----- - `WarningMessage` is copied from the Python 2.6 warnings module, - so it can be used in NumPy with older Python versions. - - """ - - _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file", - "line") - - def __init__(self, message, category, filename, lineno, file=None, - line=None): - local_values = locals() - for attr in self._WARNING_DETAILS: - setattr(self, attr, local_values[attr]) - if category: - self._category_name = category.__name__ - else: - self._category_name = None - - def __str__(self): - return ("{message : %r, category : %r, filename : %r, lineno : %s, " - "line : %r}" % (self.message, self._category_name, - self.filename, self.lineno, self.line)) - - -class WarningManager(object): - """ - A context manager that copies and restores the warnings filter upon - exiting the context. - - The 'record' argument specifies whether warnings should be captured by a - custom implementation of ``warnings.showwarning()`` and be appended to a - list returned by the context manager. Otherwise None is returned by the - context manager. The objects appended to the list are arguments whose - attributes mirror the arguments to ``showwarning()``. - - The 'module' argument is to specify an alternative module to the module - named 'warnings' and imported under that name. This argument is only useful - when testing the warnings module itself. - - Deprecated in 1.8.0 - - Notes - ----- - `WarningManager` is a copy of the ``catch_warnings`` context manager - from the Python 2.6 warnings module, with slight modifications. - It is copied so it can be used in NumPy with older Python versions. - - """ - - def __init__(self, record=False, module=None): - self._record = record - if module is None: - self._module = sys.modules['warnings'] - else: - self._module = module - self._entered = False - - def __enter__(self): - if self._entered: - raise RuntimeError("Cannot enter %r twice" % self) - self._entered = True - self._filters = self._module.filters - self._module.filters = self._filters[:] - self._showwarning = self._module.showwarning - if self._record: - log = [] - - def showwarning(*args, **kwargs): - log.append(WarningMessage(*args, **kwargs)) - self._module.showwarning = showwarning - return log - else: - return None - - def __exit__(self): - if not self._entered: - raise RuntimeError("Cannot exit %r without entering first" % self) - self._module.filters = self._filters - self._module.showwarning = self._showwarning - - -@contextlib.contextmanager -def _assert_warns_context(warning_class, name=None): - __tracebackhide__ = True # Hide traceback for py.test - with suppress_warnings() as sup: - l = sup.record(warning_class) - yield - if not len(l) > 0: - name_str = " when calling %s" % name if name is not None else "" - raise AssertionError("No warning raised" + name_str) - - -def assert_warns(warning_class, *args, **kwargs): - """ - Fail unless the given callable throws the specified warning. - - A warning of class warning_class should be thrown by the callable when - invoked with arguments args and keyword arguments kwargs. - If a different type of warning is thrown, it will not be caught. - - If called with all arguments other than the warning class omitted, may be - used as a context manager: - - with assert_warns(SomeWarning): - do_something() - - The ability to be used as a context manager is new in NumPy v1.11.0. - - .. versionadded:: 1.4.0 - - Parameters - ---------- - warning_class : class - The class defining the warning that `func` is expected to throw. - func : callable - The callable to test. - \\*args : Arguments - Arguments passed to `func`. - \\*\\*kwargs : Kwargs - Keyword arguments passed to `func`. - - Returns - ------- - The value returned by `func`. - - """ - if not args: - return _assert_warns_context(warning_class) - - func = args[0] - args = args[1:] - with _assert_warns_context(warning_class, name=func.__name__): - return func(*args, **kwargs) - - -@contextlib.contextmanager -def _assert_no_warnings_context(name=None): - __tracebackhide__ = True # Hide traceback for py.test - with warnings.catch_warnings(record=True) as l: - warnings.simplefilter('always') - yield - if len(l) > 0: - name_str = " when calling %s" % name if name is not None else "" - raise AssertionError("Got warnings%s: %s" % (name_str, l)) - - -def assert_no_warnings(*args, **kwargs): - """ - Fail if the given callable produces any warnings. - - If called with all arguments omitted, may be used as a context manager: - - with assert_no_warnings(): - do_something() - - The ability to be used as a context manager is new in NumPy v1.11.0. - - .. versionadded:: 1.7.0 - - Parameters - ---------- - func : callable - The callable to test. - \\*args : Arguments - Arguments passed to `func`. - \\*\\*kwargs : Kwargs - Keyword arguments passed to `func`. - - Returns - ------- - The value returned by `func`. - - """ - if not args: - return _assert_no_warnings_context() - - func = args[0] - args = args[1:] - with _assert_no_warnings_context(name=func.__name__): - return func(*args, **kwargs) - - -def _gen_alignment_data(dtype=float32, type='binary', max_size=24): - """ - generator producing data with different alignment and offsets - to test simd vectorization - - Parameters - ---------- - dtype : dtype - data type to produce - type : string - 'unary': create data for unary operations, creates one input - and output array - 'binary': create data for unary operations, creates two input - and output array - max_size : integer - maximum size of data to produce - - Returns - ------- - if type is 'unary' yields one output, one input array and a message - containing information on the data - if type is 'binary' yields one output array, two input array and a message - containing information on the data - - """ - ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s' - bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s' - for o in range(3): - for s in range(o + 2, max(o + 3, max_size)): - if type == 'unary': - inp = lambda: arange(s, dtype=dtype)[o:] - out = empty((s,), dtype=dtype)[o:] - yield out, inp(), ufmt % (o, o, s, dtype, 'out of place') - d = inp() - yield d, d, ufmt % (o, o, s, dtype, 'in place') - yield out[1:], inp()[:-1], ufmt % \ - (o + 1, o, s - 1, dtype, 'out of place') - yield out[:-1], inp()[1:], ufmt % \ - (o, o + 1, s - 1, dtype, 'out of place') - yield inp()[:-1], inp()[1:], ufmt % \ - (o, o + 1, s - 1, dtype, 'aliased') - yield inp()[1:], inp()[:-1], ufmt % \ - (o + 1, o, s - 1, dtype, 'aliased') - if type == 'binary': - inp1 = lambda: arange(s, dtype=dtype)[o:] - inp2 = lambda: arange(s, dtype=dtype)[o:] - out = empty((s,), dtype=dtype)[o:] - yield out, inp1(), inp2(), bfmt % \ - (o, o, o, s, dtype, 'out of place') - d = inp1() - yield d, d, inp2(), bfmt % \ - (o, o, o, s, dtype, 'in place1') - d = inp2() - yield d, inp1(), d, bfmt % \ - (o, o, o, s, dtype, 'in place2') - yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \ - (o + 1, o, o, s - 1, dtype, 'out of place') - yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \ - (o, o + 1, o, s - 1, dtype, 'out of place') - yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \ - (o, o, o + 1, s - 1, dtype, 'out of place') - yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \ - (o + 1, o, o, s - 1, dtype, 'aliased') - yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \ - (o, o + 1, o, s - 1, dtype, 'aliased') - yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \ - (o, o, o + 1, s - 1, dtype, 'aliased') - - -class IgnoreException(Exception): - "Ignoring this exception due to disabled feature" - - -@contextlib.contextmanager -def tempdir(*args, **kwargs): - """Context manager to provide a temporary test folder. - - All arguments are passed as this to the underlying tempfile.mkdtemp - function. - - """ - tmpdir = mkdtemp(*args, **kwargs) - try: - yield tmpdir - finally: - shutil.rmtree(tmpdir) - - -@contextlib.contextmanager -def temppath(*args, **kwargs): - """Context manager for temporary files. - - Context manager that returns the path to a closed temporary file. Its - parameters are the same as for tempfile.mkstemp and are passed directly - to that function. The underlying file is removed when the context is - exited, so it should be closed at that time. - - Windows does not allow a temporary file to be opened if it is already - open, so the underlying file must be closed after opening before it - can be opened again. - - """ - fd, path = mkstemp(*args, **kwargs) - os.close(fd) - try: - yield path - finally: - os.remove(path) - - -class clear_and_catch_warnings(warnings.catch_warnings): - """ Context manager that resets warning registry for catching warnings - - Warnings can be slippery, because, whenever a warning is triggered, Python - adds a ``__warningregistry__`` member to the *calling* module. This makes - it impossible to retrigger the warning in this module, whatever you put in - the warnings filters. This context manager accepts a sequence of `modules` - as a keyword argument to its constructor and: - - * stores and removes any ``__warningregistry__`` entries in given `modules` - on entry; - * resets ``__warningregistry__`` to its previous state on exit. - - This makes it possible to trigger any warning afresh inside the context - manager without disturbing the state of warnings outside. - - For compatibility with Python 3.0, please consider all arguments to be - keyword-only. - - Parameters - ---------- - record : bool, optional - Specifies whether warnings should be captured by a custom - implementation of ``warnings.showwarning()`` and be appended to a list - returned by the context manager. Otherwise None is returned by the - context manager. The objects appended to the list are arguments whose - attributes mirror the arguments to ``showwarning()``. - modules : sequence, optional - Sequence of modules for which to reset warnings registry on entry and - restore on exit. To work correctly, all 'ignore' filters should - filter by one of these modules. - - Examples - -------- - >>> import warnings - >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]): - ... warnings.simplefilter('always') - ... warnings.filterwarnings('ignore', module='np.core.fromnumeric') - ... # do something that raises a warning but ignore those in - ... # np.core.fromnumeric - """ - class_modules = () - - def __init__(self, record=False, modules=()): - self.modules = set(modules).union(self.class_modules) - self._warnreg_copies = {} - super(clear_and_catch_warnings, self).__init__(record=record) - - def __enter__(self): - for mod in self.modules: - if hasattr(mod, '__warningregistry__'): - mod_reg = mod.__warningregistry__ - self._warnreg_copies[mod] = mod_reg.copy() - mod_reg.clear() - return super(clear_and_catch_warnings, self).__enter__() - - def __exit__(self, *exc_info): - super(clear_and_catch_warnings, self).__exit__(*exc_info) - for mod in self.modules: - if hasattr(mod, '__warningregistry__'): - mod.__warningregistry__.clear() - if mod in self._warnreg_copies: - mod.__warningregistry__.update(self._warnreg_copies[mod]) - - -class suppress_warnings(object): - """ - Context manager and decorator doing much the same as - ``warnings.catch_warnings``. - - However, it also provides a filter mechanism to work around - http://bugs.python.org/issue4180. - - This bug causes Python before 3.4 to not reliably show warnings again - after they have been ignored once (even within catch_warnings). It - means that no "ignore" filter can be used easily, since following - tests might need to see the warning. Additionally it allows easier - specificity for testing warnings and can be nested. - - Parameters - ---------- - forwarding_rule : str, optional - One of "always", "once", "module", or "location". Analogous to - the usual warnings module filter mode, it is useful to reduce - noise mostly on the outmost level. Unsuppressed and unrecorded - warnings will be forwarded based on this rule. Defaults to "always". - "location" is equivalent to the warnings "default", match by exact - location the warning warning originated from. - - Notes - ----- - Filters added inside the context manager will be discarded again - when leaving it. Upon entering all filters defined outside a - context will be applied automatically. - - When a recording filter is added, matching warnings are stored in the - ``log`` attribute as well as in the list returned by ``record``. - - If filters are added and the ``module`` keyword is given, the - warning registry of this module will additionally be cleared when - applying it, entering the context, or exiting it. This could cause - warnings to appear a second time after leaving the context if they - were configured to be printed once (default) and were already - printed before the context was entered. - - Nesting this context manager will work as expected when the - forwarding rule is "always" (default). Unfiltered and unrecorded - warnings will be passed out and be matched by the outer level. - On the outmost level they will be printed (or caught by another - warnings context). The forwarding rule argument can modify this - behaviour. - - Like ``catch_warnings`` this context manager is not threadsafe. - - Examples - -------- - >>> with suppress_warnings() as sup: - ... sup.filter(DeprecationWarning, "Some text") - ... sup.filter(module=np.ma.core) - ... log = sup.record(FutureWarning, "Does this occur?") - ... command_giving_warnings() - ... # The FutureWarning was given once, the filtered warnings were - ... # ignored. All other warnings abide outside settings (may be - ... # printed/error) - ... assert_(len(log) == 1) - ... assert_(len(sup.log) == 1) # also stored in log attribute - - Or as a decorator: - - >>> sup = suppress_warnings() - >>> sup.filter(module=np.ma.core) # module must match exact - >>> @sup - >>> def some_function(): - ... # do something which causes a warning in np.ma.core - ... pass - """ - def __init__(self, forwarding_rule="always"): - self._entered = False - - # Suppressions are either instance or defined inside one with block: - self._suppressions = [] - - if forwarding_rule not in {"always", "module", "once", "location"}: - raise ValueError("unsupported forwarding rule.") - self._forwarding_rule = forwarding_rule - - def _clear_registries(self): - if hasattr(warnings, "_filters_mutated"): - # clearing the registry should not be necessary on new pythons, - # instead the filters should be mutated. - warnings._filters_mutated() - return - # Simply clear the registry, this should normally be harmless, - # note that on new pythons it would be invalidated anyway. - for module in self._tmp_modules: - if hasattr(module, "__warningregistry__"): - module.__warningregistry__.clear() - - def _filter(self, category=Warning, message="", module=None, record=False): - if record: - record = [] # The log where to store warnings - else: - record = None - if self._entered: - if module is None: - warnings.filterwarnings( - "always", category=category, message=message) - else: - module_regex = module.__name__.replace('.', r'\.') + '$' - warnings.filterwarnings( - "always", category=category, message=message, - module=module_regex) - self._tmp_modules.add(module) - self._clear_registries() - - self._tmp_suppressions.append( - (category, message, re.compile(message, re.I), module, record)) - else: - self._suppressions.append( - (category, message, re.compile(message, re.I), module, record)) - - return record - - def filter(self, category=Warning, message="", module=None): - """ - Add a new suppressing filter or apply it if the state is entered. - - Parameters - ---------- - category : class, optional - Warning class to filter - message : string, optional - Regular expression matching the warning message. - module : module, optional - Module to filter for. Note that the module (and its file) - must match exactly and cannot be a submodule. This may make - it unreliable for external modules. - - Notes - ----- - When added within a context, filters are only added inside - the context and will be forgotten when the context is exited. - """ - self._filter(category=category, message=message, module=module, - record=False) - - def record(self, category=Warning, message="", module=None): - """ - Append a new recording filter or apply it if the state is entered. - - All warnings matching will be appended to the ``log`` attribute. - - Parameters - ---------- - category : class, optional - Warning class to filter - message : string, optional - Regular expression matching the warning message. - module : module, optional - Module to filter for. Note that the module (and its file) - must match exactly and cannot be a submodule. This may make - it unreliable for external modules. - - Returns - ------- - log : list - A list which will be filled with all matched warnings. - - Notes - ----- - When added within a context, filters are only added inside - the context and will be forgotten when the context is exited. - """ - return self._filter(category=category, message=message, module=module, - record=True) - - def __enter__(self): - if self._entered: - raise RuntimeError("cannot enter suppress_warnings twice.") - - self._orig_show = warnings.showwarning - self._filters = warnings.filters - warnings.filters = self._filters[:] - - self._entered = True - self._tmp_suppressions = [] - self._tmp_modules = set() - self._forwarded = set() - - self.log = [] # reset global log (no need to keep same list) - - for cat, mess, _, mod, log in self._suppressions: - if log is not None: - del log[:] # clear the log - if mod is None: - warnings.filterwarnings( - "always", category=cat, message=mess) - else: - module_regex = mod.__name__.replace('.', r'\.') + '$' - warnings.filterwarnings( - "always", category=cat, message=mess, - module=module_regex) - self._tmp_modules.add(mod) - warnings.showwarning = self._showwarning - self._clear_registries() - - return self - - def __exit__(self, *exc_info): - warnings.showwarning = self._orig_show - warnings.filters = self._filters - self._clear_registries() - self._entered = False - del self._orig_show - del self._filters - - def _showwarning(self, message, category, filename, lineno, - *args, **kwargs): - use_warnmsg = kwargs.pop("use_warnmsg", None) - for cat, _, pattern, mod, rec in ( - self._suppressions + self._tmp_suppressions)[::-1]: - if (issubclass(category, cat) and - pattern.match(message.args[0]) is not None): - if mod is None: - # Message and category match, either recorded or ignored - if rec is not None: - msg = WarningMessage(message, category, filename, - lineno, **kwargs) - self.log.append(msg) - rec.append(msg) - return - # Use startswith, because warnings strips the c or o from - # .pyc/.pyo files. - elif mod.__file__.startswith(filename): - # The message and module (filename) match - if rec is not None: - msg = WarningMessage(message, category, filename, - lineno, **kwargs) - self.log.append(msg) - rec.append(msg) - return - - # There is no filter in place, so pass to the outside handler - # unless we should only pass it once - if self._forwarding_rule == "always": - if use_warnmsg is None: - self._orig_show(message, category, filename, lineno, - *args, **kwargs) - else: - self._orig_showmsg(use_warnmsg) - return - - if self._forwarding_rule == "once": - signature = (message.args, category) - elif self._forwarding_rule == "module": - signature = (message.args, category, filename) - elif self._forwarding_rule == "location": - signature = (message.args, category, filename, lineno) - - if signature in self._forwarded: - return - self._forwarded.add(signature) - if use_warnmsg is None: - self._orig_show(message, category, filename, lineno, *args, - **kwargs) - else: - self._orig_showmsg(use_warnmsg) - - def __call__(self, func): - """ - Function decorator to apply certain suppressions to a whole - function. - """ - @wraps(func) - def new_func(*args, **kwargs): - with self: - return func(*args, **kwargs) - - return new_func +from .nose_tools.utils import * diff -Nru python-numpy-1.13.3/numpy/tests/test_ctypeslib.py python-numpy-1.14.5/numpy/tests/test_ctypeslib.py --- python-numpy-1.13.3/numpy/tests/test_ctypeslib.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/tests/test_ctypeslib.py 2018-06-12 18:28:52.000000000 +0000 @@ -5,7 +5,7 @@ import numpy as np from numpy.ctypeslib import ndpointer, load_library from numpy.distutils.misc_util import get_shared_lib_extension -from numpy.testing import TestCase, run_module_suite, dec +from numpy.testing import run_module_suite, assert_, assert_raises, dec try: cdll = None @@ -20,7 +20,7 @@ except ImportError: _HAS_CTYPE = False -class TestLoadLibrary(TestCase): +class TestLoadLibrary(object): @dec.skipif(not _HAS_CTYPE, "ctypes not available on this python installation") @dec.knownfailureif(sys.platform == @@ -53,65 +53,65 @@ " (import error was: %s)" % str(e)) print(msg) -class TestNdpointer(TestCase): +class TestNdpointer(object): def test_dtype(self): dt = np.intc p = ndpointer(dtype=dt) - self.assertTrue(p.from_param(np.array([1], dt))) + assert_(p.from_param(np.array([1], dt))) dt = 'i4') p = ndpointer(dtype=dt) p.from_param(np.array([1], dt)) - self.assertRaises(TypeError, p.from_param, + assert_raises(TypeError, p.from_param, np.array([1], dt.newbyteorder('swap'))) dtnames = ['x', 'y'] dtformats = [np.intc, np.float64] dtdescr = {'names': dtnames, 'formats': dtformats} dt = np.dtype(dtdescr) p = ndpointer(dtype=dt) - self.assertTrue(p.from_param(np.zeros((10,), dt))) + assert_(p.from_param(np.zeros((10,), dt))) samedt = np.dtype(dtdescr) p = ndpointer(dtype=samedt) - self.assertTrue(p.from_param(np.zeros((10,), dt))) + assert_(p.from_param(np.zeros((10,), dt))) dt2 = np.dtype(dtdescr, align=True) if dt.itemsize != dt2.itemsize: - self.assertRaises(TypeError, p.from_param, np.zeros((10,), dt2)) + assert_raises(TypeError, p.from_param, np.zeros((10,), dt2)) else: - self.assertTrue(p.from_param(np.zeros((10,), dt2))) + assert_(p.from_param(np.zeros((10,), dt2))) def test_ndim(self): p = ndpointer(ndim=0) - self.assertTrue(p.from_param(np.array(1))) - self.assertRaises(TypeError, p.from_param, np.array([1])) + assert_(p.from_param(np.array(1))) + assert_raises(TypeError, p.from_param, np.array([1])) p = ndpointer(ndim=1) - self.assertRaises(TypeError, p.from_param, np.array(1)) - self.assertTrue(p.from_param(np.array([1]))) + assert_raises(TypeError, p.from_param, np.array(1)) + assert_(p.from_param(np.array([1]))) p = ndpointer(ndim=2) - self.assertTrue(p.from_param(np.array([[1]]))) + assert_(p.from_param(np.array([[1]]))) def test_shape(self): p = ndpointer(shape=(1, 2)) - self.assertTrue(p.from_param(np.array([[1, 2]]))) - self.assertRaises(TypeError, p.from_param, np.array([[1], [2]])) + assert_(p.from_param(np.array([[1, 2]]))) + assert_raises(TypeError, p.from_param, np.array([[1], [2]])) p = ndpointer(shape=()) - self.assertTrue(p.from_param(np.array(1))) + assert_(p.from_param(np.array(1))) def test_flags(self): x = np.array([[1, 2], [3, 4]], order='F') p = ndpointer(flags='FORTRAN') - self.assertTrue(p.from_param(x)) + assert_(p.from_param(x)) p = ndpointer(flags='CONTIGUOUS') - self.assertRaises(TypeError, p.from_param, x) + assert_raises(TypeError, p.from_param, x) p = ndpointer(flags=x.flags.num) - self.assertTrue(p.from_param(x)) - self.assertRaises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) + assert_(p.from_param(x)) + assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) def test_cache(self): a1 = ndpointer(dtype=np.float64) a2 = ndpointer(dtype=np.float64) - self.assertEqual(a1, a2) + assert_(a1 == a2) if __name__ == "__main__": diff -Nru python-numpy-1.13.3/numpy/tests/test_matlib.py python-numpy-1.14.5/numpy/tests/test_matlib.py --- python-numpy-1.13.3/numpy/tests/test_matlib.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/tests/test_matlib.py 2018-06-12 18:28:52.000000000 +0000 @@ -24,14 +24,23 @@ assert_array_equal(numpy.matlib.zeros(2), np.matrix([[ 0., 0.]])) def test_identity(): - x = numpy.matlib.identity(2, dtype=np.int) + x = numpy.matlib.identity(2, dtype=int) assert_array_equal(x, np.matrix([[1, 0], [0, 1]])) def test_eye(): - x = numpy.matlib.eye(3, k=1, dtype=int) - assert_array_equal(x, np.matrix([[ 0, 1, 0], - [ 0, 0, 1], - [ 0, 0, 0]])) + xc = numpy.matlib.eye(3, k=1, dtype=int) + assert_array_equal(xc, np.matrix([[ 0, 1, 0], + [ 0, 0, 1], + [ 0, 0, 0]])) + assert xc.flags.c_contiguous + assert not xc.flags.f_contiguous + + xf = numpy.matlib.eye(3, 4, dtype=int, order='F') + assert_array_equal(xf, np.matrix([[ 1, 0, 0, 0], + [ 0, 1, 0, 0], + [ 0, 0, 1, 0]])) + assert not xf.flags.c_contiguous + assert xf.flags.f_contiguous def test_rand(): x = numpy.matlib.rand(3) diff -Nru python-numpy-1.13.3/numpy/tests/test_scripts.py python-numpy-1.14.5/numpy/tests/test_scripts.py --- python-numpy-1.13.3/numpy/tests/test_scripts.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/tests/test_scripts.py 2018-06-12 18:28:52.000000000 +0000 @@ -11,8 +11,7 @@ import numpy as np from numpy.compat.py3k import basestring from nose.tools import assert_equal -from numpy.testing.decorators import skipif -from numpy.testing import assert_ +from numpy.testing import assert_, dec is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) @@ -59,7 +58,7 @@ return proc.returncode, stdout, stderr -@skipif(is_inplace) +@dec.skipif(is_inplace) def test_f2py(): # test that we can run f2py script if sys.platform == 'win32': @@ -87,7 +86,7 @@ assert_equal(stdout.strip(), b'2') success = True break - except: + except Exception: pass msg = "Warning: neither %s nor %s nor %s found in path" % f2py_cmds assert_(success, msg) diff -Nru python-numpy-1.13.3/numpy/tests/test_warnings.py python-numpy-1.14.5/numpy/tests/test_warnings.py --- python-numpy-1.13.3/numpy/tests/test_warnings.py 2017-09-29 17:31:46.000000000 +0000 +++ python-numpy-1.14.5/numpy/tests/test_warnings.py 2018-06-12 18:28:52.000000000 +0000 @@ -13,9 +13,7 @@ import ast import tokenize import numpy - from numpy.testing import run_module_suite - from numpy.testing.decorators import slow - + from numpy.testing import run_module_suite, dec class ParseCall(ast.NodeVisitor): def __init__(self): @@ -63,7 +61,7 @@ "{} on line {}".format(self.__filename, node.lineno)) - @slow + @dec.slow def test_warning_calls(): # combined "ignore" and stacklevel error base = Path(numpy.__file__).parent diff -Nru python-numpy-1.13.3/numpy/version.py python-numpy-1.14.5/numpy/version.py --- python-numpy-1.13.3/numpy/version.py 2017-09-29 18:22:08.000000000 +0000 +++ python-numpy-1.14.5/numpy/version.py 2018-06-12 18:29:44.000000000 +0000 @@ -2,10 +2,10 @@ # THIS FILE IS GENERATED FROM NUMPY SETUP.PY # # To compare versions robustly, use `numpy.lib.NumpyVersion` -short_version = '1.13.3' -version = '1.13.3' -full_version = '1.13.3' -git_revision = '31465473c491829d636c9104c390062cba005681' +short_version = '1.14.5' +version = '1.14.5' +full_version = '1.14.5' +git_revision = 'd3348c1123d3862a42d50a7fee14e50b268944a4' release = True if not release: diff -Nru python-numpy-1.13.3/PKG-INFO python-numpy-1.14.5/PKG-INFO --- python-numpy-1.13.3/PKG-INFO 2017-09-29 18:22:10.000000000 +0000 +++ python-numpy-1.14.5/PKG-INFO 2018-06-12 18:29:46.000000000 +0000 @@ -1,12 +1,13 @@ Metadata-Version: 1.2 Name: numpy -Version: 1.13.3 +Version: 1.14.5 Summary: NumPy: array processing for numbers, strings, records, and objects. Home-page: http://www.numpy.org -Author: NumPy Developers -Author-email: numpy-discussion@python.org +Author: Travis E. Oliphant et al. +Maintainer: NumPy Developers +Maintainer-email: numpy-discussion@python.org License: BSD -Download-URL: http://sourceforge.net/projects/numpy/files/NumPy/ +Download-URL: https://pypi.python.org/pypi/numpy Description: NumPy is a general-purpose array-processing package designed to efficiently manipulate large multi-dimensional arrays of arbitrary records without sacrificing too much speed for small multi-dimensional diff -Nru python-numpy-1.13.3/setup.py python-numpy-1.14.5/setup.py --- python-numpy-1.13.3/setup.py 2017-09-29 18:08:58.000000000 +0000 +++ python-numpy-1.14.5/setup.py 2018-06-12 18:28:52.000000000 +0000 @@ -62,8 +62,8 @@ """ MAJOR = 1 -MINOR = 13 -MICRO = 3 +MINOR = 14 +MICRO = 5 ISRELEASED = True VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) @@ -81,7 +81,7 @@ env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' - out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0] + out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out try: @@ -147,8 +147,8 @@ a = open(filename, 'w') try: a.write(cnt % {'version': VERSION, - 'full_version' : FULLVERSION, - 'git_revision' : GIT_REVISION, + 'full_version': FULLVERSION, + 'git_revision': GIT_REVISION, 'isrelease': str(ISRELEASED)}) finally: a.close() @@ -164,6 +164,7 @@ quiet=True) config.add_subpackage('numpy') + config.add_data_files(('numpy', 'LICENSE.txt')) config.get_version('numpy/version.py') # sets config.version @@ -351,7 +352,7 @@ long_description = "\n".join(DOCLINES[2:]), url = "http://www.numpy.org", author = "Travis E. Oliphant et al.", - download_url = "http://sourceforge.net/projects/numpy/files/NumPy/", + download_url = "https://pypi.python.org/pypi/numpy", license = 'BSD', classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], @@ -362,6 +363,7 @@ if "--force" in sys.argv: run_build = True + sys.argv.remove('--force') else: # Raise errors for unsupported commands, improve help output, etc. run_build = parse_setuppy_commands() diff -Nru python-numpy-1.13.3/site.cfg.example python-numpy-1.14.5/site.cfg.example --- python-numpy-1.13.3/site.cfg.example 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/site.cfg.example 2018-06-12 18:28:52.000000000 +0000 @@ -153,29 +153,32 @@ # runtime_library_dirs = /home/username/blis/lib # MKL -#---- -# MKL is Intel's very optimized yet proprietary implementation of BLAS and -# Lapack. -# For recent (9.0.21, for example) mkl, you need to change the names of the -# lapack library. Assuming you installed the mkl in /opt, for a 32 bits cpu: +#---- +# Intel MKL is Intel's very optimized yet proprietary implementation of BLAS and +# Lapack. Find the latest info on building numpy with Intel MKL in this article: +# https://software.intel.com/en-us/articles/numpyscipy-with-intel-mkl +# Assuming you installed the mkl in /opt/intel/compilers_and_libraries_2018/linux/mkl, +# for 64 bits code at Linux: +# [mkl] +# library_dirs = /opt/intel/compilers_and_libraries_2018/linux/mkl/lib/intel64 +# include_dirs = /opt/intel/compilers_and_libraries_2018/linux/mkl/include +# mkl_libs = mkl_rt +# lapack_libs =  +# +# For 32 bit code at Linux: # [mkl] -# library_dirs = /opt/intel/mkl/9.1.023/lib/32/ -# lapack_libs = mkl_lapack -# -# For 10.*, on 32 bits machines: -# [mkl] -# library_dirs = /opt/intel/mkl/10.0.1.014/lib/32/ -# lapack_libs = mkl_lapack -# mkl_libs = mkl, guide -# -# On win-64, the following options compiles numpy with the MKL library -# dynamically linked. -# [mkl] -# include_dirs = C:\Program Files (x86)\Intel\Composer XE 2015\mkl\include -# library_dirs = C:\Program Files (x86)\Intel\Composer XE 2015\mkl\lib\intel64 -# mkl_libs = mkl_core_dll, mkl_intel_lp64_dll, mkl_intel_thread_dll -# lapack_libs = mkl_lapack95_lp64 - +# library_dirs = /opt/intel/compilers_and_libraries_2018/linux/mkl/lib/ia32 +# include_dirs = /opt/intel/compilers_and_libraries_2018/linux/mkl/include +# mkl_libs = mkl_rt +# lapack_libs =  +# +# On win-64, the following options compiles numpy with the MKL library +# dynamically linked. +# [mkl] +# include_dirs = C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\mkl\include +# library_dirs = C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\mkl\lib\intel64 +# mkl_libs = mkl_rt +# lapack_libs = # UMFPACK # ------- diff -Nru python-numpy-1.13.3/tools/swig/numpy.i python-numpy-1.14.5/tools/swig/numpy.i --- python-numpy-1.13.3/tools/swig/numpy.i 2017-09-17 13:29:38.000000000 +0000 +++ python-numpy-1.14.5/tools/swig/numpy.i 2018-06-12 18:28:52.000000000 +0000 @@ -80,6 +80,7 @@ %#define array_data(a) (((PyArrayObject*)a)->data) %#define array_descr(a) (((PyArrayObject*)a)->descr) %#define array_flags(a) (((PyArrayObject*)a)->flags) +%#define array_clearflags(a,f) (((PyArrayObject*)a)->flags) &= ~f %#define array_enableflags(a,f) (((PyArrayObject*)a)->flags) = f %#define array_is_fortran(a) (PyArray_ISFORTRAN((PyArrayObject*)a)) %#else @@ -94,6 +95,7 @@ %#define array_descr(a) PyArray_DESCR((PyArrayObject*)a) %#define array_flags(a) PyArray_FLAGS((PyArrayObject*)a) %#define array_enableflags(a,f) PyArray_ENABLEFLAGS((PyArrayObject*)a,f) +%#define array_clearflags(a,f) PyArray_CLEARFLAGS((PyArrayObject*)a,f) %#define array_is_fortran(a) (PyArray_IS_F_CONTIGUOUS((PyArrayObject*)a)) %#endif %#define array_is_contiguous(a) (PyArray_ISCONTIGUOUS((PyArrayObject*)a)) @@ -538,7 +540,13 @@ int i; npy_intp * strides = array_strides(ary); if (array_is_fortran(ary)) return success; + int n_non_one = 0; /* Set the Fortran ordered flag */ + const npy_intp *dims = array_dimensions(ary); + for (i=0; i < nd; ++i) + n_non_one += (dims[i] != 1) ? 1 : 0; + if (n_non_one > 1) + array_clearflags(ary,NPY_ARRAY_CARRAY); array_enableflags(ary,NPY_ARRAY_FARRAY); /* Recompute the strides */ strides[0] = strides[nd-1]; diff -Nru python-numpy-1.13.3/tox.ini python-numpy-1.14.5/tox.ini --- python-numpy-1.13.3/tox.ini 1970-01-01 00:00:00.000000000 +0000 +++ python-numpy-1.14.5/tox.ini 2018-06-12 17:31:56.000000000 +0000 @@ -0,0 +1,50 @@ +# 'Tox' is a tool for automating sdist/build/test cycles against +# multiple Python versions: +# http://pypi.python.org/pypi/tox +# http://tox.testrun.org/ + +# Running the command 'tox' while in the root of the numpy source +# directory will: +# - Create a numpy source distribution (setup.py sdist) +# - Then for every supported version of Python: +# - Create a virtualenv in .tox/py$VERSION and install +# dependencies. (These virtualenvs are cached across runs unless +# you use --recreate.) +# - Use pip to install the numpy sdist into the virtualenv +# - Run the numpy tests +# To run against a specific subset of Python versions, use: +# tox -e py27 + +# Extra arguments will be passed to test-installed-numpy.py. To run +# the full testsuite: +# tox full +# To run with extra verbosity: +# tox -- -v + +# Tox assumes that you have appropriate Python interpreters already +# installed and that they can be run as 'python2.7', 'python3.3', etc. + +[tox] +envlist = + py27,py34,py35,py36, + py27-not-relaxed-strides,py34-not-relaxed-strides + +[testenv] +deps= + nose +changedir={envdir} +commands={envpython} {toxinidir}/tools/test-installed-numpy.py --mode=full {posargs:} + +[testenv:py27-not-relaxed-strides] +basepython=python2.7 +env=NPY_RELAXED_STRIDES_CHECKING=0 + +[testenv:py34-not-relaxed-strides] +basepython=python3.4 +env=NPY_RELAXED_STRIDES_CHECKING=0 + +# Not run by default. Set up the way you want then use 'tox -e debug' +# if you want it: +[testenv:debug] +basepython=python-dbg +commands=gdb --args {envpython} {toxinidir}/tools/test-installed-numpy.py --mode=full {posargs:}