diff -Nru pypy-4.0.1+dfsg/debian/changelog pypy-5.0.1+dfsg/debian/changelog --- pypy-4.0.1+dfsg/debian/changelog 2016-02-07 12:47:57.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/changelog 2016-04-04 23:54:53.000000000 +0000 @@ -1,8 +1,54 @@ -pypy (4.0.1+dfsg-1build1) xenial; urgency=medium +pypy (5.0.1+dfsg-4) unstable; urgency=medium - * No-change rebuild for ncurses6 transition. + * Disable building with pypy on ppc64, again. It has a JIT, but pypy itself + is just too buggy on big-endian, to manage a translation. + * hg-updates: fix wait() on big endian (which should resolve the above + issue). + * kfreebsd-tests: Skip PTY tests that hang forever on kFreeBSD. - -- Matthias Klose Sun, 07 Feb 2016 12:47:57 +0000 + -- Stefano Rivera Mon, 04 Apr 2016 16:54:44 -0700 + +pypy (5.0.1+dfsg-3) unstable; urgency=medium + + * Re-enable pypy builds on ppc64. + * ARM JIT backend: Only execute NEON instructions on CPUs with NEON support. + * hg-updates: Apply patches staged in upstream's release branch. + + -- Stefano Rivera Fri, 25 Mar 2016 21:42:35 -0500 + +pypy (5.0.1+dfsg-2) unstable; urgency=medium + + * Drop libunwind build-dep, not used any more, in PyPy 5.0. + * Port the recent vmprof changes to kFreeBSD. + * Make pypy-lib an architecture-dependant package, so that we remain + installable on buildds, after Architecture: all build has been installed. + * Temporarily build with python2.7 on ppc64, to re-bootstrap. + + -- Stefano Rivera Tue, 22 Mar 2016 20:46:30 -0700 + +pypy (5.0.1+dfsg-1) unstable; urgency=medium + + * New upstream patch release. + * Recognise armv8l architecture, so we can build in an armhf chroot on + arm64. + * Build with pypy on architectures with a JITted PyPy. This is now necessary + on armhf, as we otherwise can't build a jitted pypy in a 3GB address + space (32bit kernel userspace). + + -- Stefano Rivera Sun, 20 Mar 2016 08:58:13 -0400 + +pypy (5.0+dfsg-1) unstable; urgency=medium + + * New upstream release. + * Refresh patches. + * Drop soabi patch, superseded upstream. + * Bump copyright years. + * Bump Standards-Version to 3.9.7, no changes needed. + * Switch both Vcs fields to the same https URL. + * Patch: sandbox-unlink allow the sandbox REPL to start, by supporting + unlink() (and rejecting it). + + -- Stefano Rivera Sun, 13 Mar 2016 23:57:01 -0700 pypy (4.0.1+dfsg-1) unstable; urgency=medium diff -Nru pypy-4.0.1+dfsg/debian/control pypy-5.0.1+dfsg/debian/control --- pypy-4.0.1+dfsg/debian/control 2015-11-20 19:22:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/control 2016-04-04 23:54:53.000000000 +0000 @@ -3,9 +3,10 @@ Priority: extra Maintainer: Stefano Rivera Build-Depends: - debhelper (>= 9), + debhelper (>= 9.20141010~), dh-exec, dh-python, + dpkg-dev (>= 1.17.14~), libbz2-dev, libexpat1-dev, libffi-dev, @@ -13,10 +14,10 @@ libncurses-dev, libsqlite3-dev, libssl-dev, - libunwind-dev [amd64] | libunwind8-dev [amd64] | libunwind7-dev [amd64], netbase, pkg-config, procps, + pypy [amd64 armhf i386 ppc64el] , python (>= 2.6.6-11~), python-docutils, python-sphinx (>= 1.0.7+dfsg), @@ -26,18 +27,17 @@ valgrind [amd64 arm64 armhf i386 mips mips64el mipsel powerpc ppc64 ppc64el s390x], zlib1g-dev Build-Depends-Indep: graphviz -Standards-Version: 3.9.6 +Standards-Version: 3.9.7 Homepage: http://pypy.org/ -Vcs-Git: git://anonscm.debian.org/collab-maint/pypy.git -Vcs-Browser: https://anonscm.debian.org/cgit/collab-maint/pypy.git +Vcs-Git: https://anonscm.debian.org/git/collab-maint/pypy.git +Vcs-Browser: https://anonscm.debian.org/git/collab-maint/pypy.git Package: pypy Architecture: kfreebsd-any linux-any -Depends: pypy-lib (= ${source:Version}), ${misc:Depends}, ${shlibs:Depends} +Depends: pypy-lib (= ${binary:Version}), ${misc:Depends}, ${shlibs:Depends} Breaks: pypy-dev (<< ${source:Version}) Provides: ${pypy-abi} Suggests: pypy-doc, pypy-tk (= ${binary:Version}) -Recommends: ${pypy:Recommends} Pre-Depends: dpkg (>= 1.15.6~), ${misc:Pre-Depends} Description: fast alternative implementation of Python - PyPy interpreter PyPy is a fast, compliant alternative implementation of the Python language @@ -69,7 +69,7 @@ Package: python-pypy.sandbox Architecture: kfreebsd-any linux-any Depends: - pypy-lib (= ${source:Version}), + pypy-lib (= ${binary:Version}), ${misc:Depends}, ${python:Depends}, ${shlibs:Depends} @@ -87,7 +87,8 @@ (2.7). Package: pypy-lib -Architecture: all +Architecture: kfreebsd-any linux-any +Multi-Arch: same Depends: ${misc:Depends} Pre-Depends: dpkg (>= 1.15.6~), ${misc:Pre-Depends} Provides: ${cffi:Provides} @@ -101,7 +102,7 @@ Package: pypy-lib-testsuite Architecture: all -Depends: pypy, pypy-lib (= ${source:Version}), ${misc:Depends} +Depends: pypy, pypy-lib (>= ${source:Version}), ${misc:Depends} Replaces: pypy-lib (<< 2.2) Pre-Depends: dpkg (>= 1.15.6~), ${misc:Pre-Depends} Description: standard library test suite for PyPy (an alternative Python interpreter) diff -Nru pypy-4.0.1+dfsg/debian/copyright pypy-5.0.1+dfsg/debian/copyright --- pypy-4.0.1+dfsg/debian/copyright 2015-11-20 19:22:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/copyright 2016-04-04 23:54:53.000000000 +0000 @@ -9,36 +9,36 @@ disable Files: * -Copyright: 2003-2015, +Copyright: 2003-2016, Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz Amaury Forgeot d'Arc Antonio Cuni Samuele Pedroni + Matti Picus Alex Gaynor Brian Kearns - Matti Picus Philip Jenvey Michael Hudson + Ronan Lamy David Schneider + Manuel Jacob Holger Krekel Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson + Richard Plangger Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen - Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen + Remi Meier Lukas Diekmann Sven Hager Anders Lehmann - Remi Meier Aurelien Campeas Niklaus Haldimann Camillo Bruni @@ -47,8 +47,8 @@ Romain Guillebert Leonardo Santagada Seo Sanghyeon - Justin Peel Ronny Pfannschmidt + Justin Peel David Edelsohn Anders Hammarquist Jakub Gustak @@ -70,6 +70,7 @@ Tyler Wade Michael Foord Stephan Diehl + Vincent Legoll Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -80,9 +81,9 @@ Jean-Paul Calderone Timo Paulssen Squeaky + Marius Gedminas Alexandre Fayolle Simon Burton - Marius Gedminas Martin Matusiak Konstantin Lopuhin Wenzhu Man @@ -91,16 +92,20 @@ Ivan Sichmann Freitas Greg Price Dario Bertini + Stefano Rivera Mark Pearse Simon Cross Andreas Stührk - Stefano Rivera + Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski + Spenser Bauman Paul deGrandis Ilya Osadchiy + marky1991 Tobias Oberstein Adrian Kuhn Boris Feigin @@ -109,14 +114,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett + Tobias Pape Wanja Saatkamp Gerald Klix Mike Blume - Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -128,6 +131,8 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Tim Felgentreff + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -137,12 +142,12 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Tim Felgentreff + Yichao Yu Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Yichao Yu + Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -166,33 +171,32 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi - Elmo M?ntynen + Elmo Mäntynen + Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani Beatrice During Alex Perry - Vincent Legoll + Vaibhav Sood Alan McIntyre - Spenser Bauman + William Leslie Alexander Sedov Attila Gobi Christopher Pope - Devin Jeanpierre - Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -200,6 +204,7 @@ Jakub Stasiak Nathan Taylor Vladimir Kryachko + Omer Katz Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -209,11 +214,13 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -225,20 +232,21 @@ Tomo Cocoa Kim Jin Su Toni Mattis + Amber Brown Lucas Stadler Julian Berman Markus Holtermann roberto@goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia - Faye Zhao timo@eistee.fritz.box Andrew Thompson Yusei Tahara + Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon @@ -248,6 +256,7 @@ Christopher Armstrong Michael Hudson-Doyle Anders Sigfridsson + Nikolay Zinov Yasir Suhail Jason Michalski rafalgalczynski@gmail.com @@ -257,6 +266,7 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Matt Bogosian Christian Muirhead Berker Peksag James Lan @@ -291,9 +301,9 @@ Stefan Marr jiaaro Mads Kiilerich - Richard Lancaster opassembler.py Antony Lee + Jason Madden Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer @@ -302,6 +312,7 @@ squeaky Zearin soareschen + Jonas Pfannschmidt Kurt Griffiths Mike Bayer Matthew Miller @@ -504,7 +515,7 @@ License: Apache-2.0 Files: debian/* -Copyright: 2011-2015, Stefano Rivera +Copyright: 2011-2016, Stefano Rivera License: Expat License: Apache-2.0 diff -Nru pypy-4.0.1+dfsg/debian/patches/armv8l pypy-5.0.1+dfsg/debian/patches/armv8l --- pypy-4.0.1+dfsg/debian/patches/armv8l 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/patches/armv8l 2016-04-04 23:54:53.000000000 +0000 @@ -0,0 +1,15 @@ +Description: Recognise armv8l machine type + This is a 32bit personality on an ARMv8 (64bit) CPU +Author: Stefano Rivera +Origin: upstream, https://bitbucket.org/pypy/pypy/commits/0206c67c661f766c1d43be3d30a604c804a8a038 + +--- a/rpython/jit/backend/detect_cpu.py ++++ b/rpython/jit/backend/detect_cpu.py +@@ -64,6 +64,7 @@ + 'x86_64': MODEL_X86, + 'amd64': MODEL_X86, # freebsd + 'AMD64': MODEL_X86, # win64 ++ 'armv8l': MODEL_ARM, # 32-bit ARMv8 + 'armv7l': MODEL_ARM, + 'armv6l': MODEL_ARM, + 'arm': MODEL_ARM, # freebsd diff -Nru pypy-4.0.1+dfsg/debian/patches/ensure-valid-term pypy-5.0.1+dfsg/debian/patches/ensure-valid-term --- pypy-4.0.1+dfsg/debian/patches/ensure-valid-term 2015-11-20 19:22:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/patches/ensure-valid-term 2016-04-04 23:54:53.000000000 +0000 @@ -2,7 +2,7 @@ Some tests use readline on an internal pty. These fail if TERM is not set to a terminal that supports "clear". Author: Stefano Rivera -Last-Update: 2015-06-14 +Forwarded: https://bitbucket.org/pypy/pypy/commits/accad9a1fe9c6c7b9514117296f92288fd810093 --- a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py +++ b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py @@ -15,7 +15,7 @@ # this test case should contain as-verbatim-as-possible versions of # (applicable) bug reports -@@ -61,13 +61,14 @@ +@@ -62,13 +62,14 @@ mfd, sfd = pty.openpty() try: @@ -72,9 +72,9 @@ +from .infrastructure import sane_term + - @pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform") - def test_raw_input(): -@@ -11,7 +13,8 @@ + @pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " + "'kfreebsd' in sys.platform") +@@ -12,7 +14,8 @@ readline_wrapper = _ReadlineWrapper(slave, slave) os.write(master, b'input\n') diff -Nru pypy-4.0.1+dfsg/debian/patches/hg-updates pypy-5.0.1+dfsg/debian/patches/hg-updates --- pypy-4.0.1+dfsg/debian/patches/hg-updates 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/patches/hg-updates 2016-04-04 23:54:53.000000000 +0000 @@ -0,0 +1,151 @@ +Description: Updates from upstream's 5.x release branch + Up to and including 83410:b5d52a043b5e +Origin: upstream, https://bitbucket.org/pypy/pypy/branch/release-5.x + +--- a/rpython/jit/backend/llsupport/gc.py ++++ b/rpython/jit/backend/llsupport/gc.py +@@ -164,13 +164,11 @@ + array_index = moving_obj_tracker.get_array_index(v) + + size, offset, _ = unpack_arraydescr(moving_obj_tracker.ptr_array_descr) +- scale = size ++ array_index = array_index * size + offset + args = [moving_obj_tracker.const_ptr_gcref_array, + ConstInt(array_index), +- ConstInt(scale), +- ConstInt(offset), + ConstInt(size)] +- load_op = ResOperation(rop.GC_LOAD_INDEXED_R, args) ++ load_op = ResOperation(rop.GC_LOAD_R, args) + newops.append(load_op) + op.setarg(arg_i, load_op) + # +--- a/rpython/rlib/rvmprof/src/vmprof_config.h ++++ b/rpython/rlib/rvmprof/src/vmprof_config.h +@@ -1,5 +1,11 @@ + #define HAVE_SYS_UCONTEXT_H +-#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__APPLE__) ++#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) ++#if defined(__i386__) ++#define PC_FROM_UCONTEXT uc_mcontext.mc_eip ++#else ++#define PC_FROM_UCONTEXT uc_mcontext.mc_rip ++#endif ++#elif defined(__APPLE__) + #define PC_FROM_UCONTEXT uc_mcontext.mc_rip + #else + #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP] +--- a/rpython/translator/c/src/thread.h ++++ b/rpython/translator/c/src/thread.h +@@ -42,13 +42,13 @@ + RPY_EXTERN long rpy_fastgil; + + static inline void _RPyGilAcquire(void) { +- long old_fastgil = lock_test_and_set(&rpy_fastgil, 1); ++ long old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1); + if (old_fastgil != 0) + RPyGilAcquireSlowPath(old_fastgil); + } + static inline void _RPyGilRelease(void) { + assert(RPY_FASTGIL_LOCKED(rpy_fastgil)); +- lock_release(&rpy_fastgil); ++ pypy_lock_release(&rpy_fastgil); + } + static inline long *_RPyFetchFastGil(void) { + return &rpy_fastgil; +--- a/rpython/translator/c/src/thread_gil.c ++++ b/rpython/translator/c/src/thread_gil.c +@@ -70,7 +70,7 @@ + { + /* Acquires the GIL. This assumes that we already did: + +- old_fastgil = lock_test_and_set(&rpy_fastgil, 1); ++ old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1); + */ + if (!RPY_FASTGIL_LOCKED(old_fastgil)) { + /* The fastgil was not previously locked: success. +@@ -122,7 +122,7 @@ + released. + */ + if (!RPY_FASTGIL_LOCKED(rpy_fastgil)) { +- old_fastgil = lock_test_and_set(&rpy_fastgil, 1); ++ old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1); + if (!RPY_FASTGIL_LOCKED(old_fastgil)) + /* yes, got a non-held value! Now we hold it. */ + break; +--- a/rpython/translator/c/src/thread_nt.c ++++ b/rpython/translator/c/src/thread_nt.c +@@ -245,7 +245,7 @@ + LeaveCriticalSection(mutex); + } + +-//#define lock_test_and_set(ptr, value) see thread_nt.h ++//#define pypy_lock_test_and_set(ptr, value) see thread_nt.h + #define atomic_increment(ptr) InterlockedIncrement(ptr) + #define atomic_decrement(ptr) InterlockedDecrement(ptr) + +--- a/rpython/translator/c/src/thread_nt.h ++++ b/rpython/translator/c/src/thread_nt.h +@@ -34,8 +34,8 @@ + + #ifdef _M_IA64 + /* On Itanium, use 'acquire' memory ordering semantics */ +-#define lock_test_and_set(ptr, value) InterlockedExchangeAcquire(ptr, value) ++#define pypy_lock_test_and_set(ptr, value) InterlockedExchangeAcquire(ptr,value) + #else +-#define lock_test_and_set(ptr, value) InterlockedExchange(ptr, value) ++#define pypy_lock_test_and_set(ptr, value) InterlockedExchange(ptr, value) + #endif +-#define lock_release(ptr) (*((volatile long *)ptr) = 0) ++#define pypy_lock_release(ptr) (*((volatile long *)ptr) = 0) +--- a/rpython/translator/c/src/thread_pthread.c ++++ b/rpython/translator/c/src/thread_pthread.c +@@ -546,7 +546,7 @@ + return result; + } + +-//#define lock_test_and_set(ptr, value) see thread_pthread.h ++//#define pypy_lock_test_and_set(ptr, value) see thread_pthread.h + #define atomic_increment(ptr) __sync_fetch_and_add(ptr, 1) + #define atomic_decrement(ptr) __sync_fetch_and_sub(ptr, 1) + #define HAVE_PTHREAD_ATFORK 1 +--- a/rpython/translator/c/src/thread_pthread.h ++++ b/rpython/translator/c/src/thread_pthread.h +@@ -80,5 +80,5 @@ + void RPyThreadAfterFork(void); + + +-#define lock_test_and_set(ptr, value) __sync_lock_test_and_set(ptr, value) +-#define lock_release(ptr) __sync_lock_release(ptr) ++#define pypy_lock_test_and_set(ptr, value) __sync_lock_test_and_set(ptr, value) ++#define pypy_lock_release(ptr) __sync_lock_release(ptr) +--- a/rpython/translator/c/src/threadlocal.c ++++ b/rpython/translator/c/src/threadlocal.c +@@ -15,14 +15,14 @@ + static int check_valid(void); + + void _RPython_ThreadLocals_Acquire(void) { +- while (!lock_test_and_set(&pypy_threadlocal_lock, 1)) { ++ while (!pypy_lock_test_and_set(&pypy_threadlocal_lock, 1)) { + /* busy loop */ + } + assert(check_valid()); + } + void _RPython_ThreadLocals_Release(void) { + assert(check_valid()); +- lock_release(&pypy_threadlocal_lock); ++ pypy_lock_release(&pypy_threadlocal_lock); + } + + +--- a/rpython/rlib/rposix.py ++++ b/rpython/rlib/rposix.py +@@ -827,7 +827,7 @@ + lltype.free(status_p, flavor='raw') + + def _make_waitmacro(name): +- c_func = external(name, [lltype.Signed], lltype.Signed, ++ c_func = external(name, [rffi.INT], lltype.Signed, + macro=_MACRO_ON_POSIX) + returning_int = name in ('WEXITSTATUS', 'WSTOPSIG', 'WTERMSIG') + diff -Nru pypy-4.0.1+dfsg/debian/patches/kfreebsd-tests pypy-5.0.1+dfsg/debian/patches/kfreebsd-tests --- pypy-4.0.1+dfsg/debian/patches/kfreebsd-tests 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/patches/kfreebsd-tests 2016-04-04 23:54:53.000000000 +0000 @@ -0,0 +1,40 @@ +Description: Skip PTY tests that hang forever on kFreeBSD + https://bugs.debian.org/742965 seems relevant +Author: Stefano Rivera +Forwarded: https://bitbucket.org/pypy/pypy/commits/9059c53718bb034bf60f93e60975a541e84e13f8 + +--- a/pypy/module/_file/test/test_file.py ++++ b/pypy/module/_file/test/test_file.py +@@ -285,6 +285,8 @@ + from posix import openpty, fdopen, write, close + except ImportError: + skip('no openpty on this platform') ++ if 'gnukfreebsd' in sys.platform: ++ skip('close() hangs forever on kFreeBSD') + read_fd, write_fd = openpty() + write(write_fd, 'Abc\n') + close(write_fd) +--- a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py ++++ b/pypy/module/test_lib_pypy/pyrepl/test_bugs.py +@@ -46,7 +46,8 @@ + read_spec(spec, HistoricalTestReader) + + +-@pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform") ++@pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " ++ "'kfreebsd' in sys.platform") + def test_signal_failure(monkeypatch): + import os + import pty +--- a/pypy/module/test_lib_pypy/pyrepl/test_readline.py ++++ b/pypy/module/test_lib_pypy/pyrepl/test_readline.py +@@ -1,7 +1,8 @@ + import pytest + + +-@pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform") ++@pytest.mark.skipif("os.name != 'posix' or 'darwin' in sys.platform or " ++ "'kfreebsd' in sys.platform") + def test_raw_input(): + import os + import pty diff -Nru pypy-4.0.1+dfsg/debian/patches/multiarch pypy-5.0.1+dfsg/debian/patches/multiarch --- pypy-4.0.1+dfsg/debian/patches/multiarch 2015-11-20 19:22:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/patches/multiarch 2016-04-04 23:54:53.000000000 +0000 @@ -134,7 +134,7 @@ space.newtuple([w('.pyc'), w('rb'), w(importing.PY_COMPILED)]), --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py -@@ -723,11 +723,12 @@ +@@ -731,11 +731,12 @@ def test_abi_tag(self): space1 = maketestobjspace(make_config(None, soabi='TEST')) space2 = maketestobjspace(make_config(None, soabi='')) diff -Nru pypy-4.0.1+dfsg/debian/patches/neon pypy-5.0.1+dfsg/debian/patches/neon --- pypy-4.0.1+dfsg/debian/patches/neon 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/patches/neon 2016-04-04 23:54:53.000000000 +0000 @@ -0,0 +1,83 @@ +Description: Only execute NEON instructions on CPUs supporting NEON +Author: Stefano Rivera +Origin: upstream, https://bitbucket.org/pypy/pypy/commits/fc95c9347679f28b1e2998cf4f67bc6859dfb6f8 + +--- a/rpython/jit/backend/arm/detect.py ++++ b/rpython/jit/backend/arm/detect.py +@@ -1,6 +1,7 @@ + import os + + from rpython.translator.tool.cbuild import ExternalCompilationInfo ++from rpython.rtyper.lltypesystem import lltype, rffi + from rpython.rtyper.tool import rffi_platform + from rpython.rlib.clibffi import FFI_DEFAULT_ABI, FFI_SYSV, FFI_VFP + from rpython.translator.platform import CompilationError +@@ -15,6 +16,7 @@ + asm volatile("VMOV s0, s1"); + } + """]) ++getauxval = rffi.llexternal("getauxval", [lltype.Unsigned], lltype.Unsigned) + + def detect_hardfloat(): + return FFI_DEFAULT_ABI == FFI_VFP +@@ -63,3 +65,10 @@ + "falling back to", "ARMv%d" % n) + debug_stop("jit-backend-arch") + return n ++ ++ ++def detect_neon(): ++ AT_HWCAP = 16 ++ HWCAP_NEON = 1 << 12 ++ hwcap = getauxval(AT_HWCAP) ++ return bool(hwcap & HWCAP_NEON) +--- a/rpython/jit/backend/arm/opassembler.py ++++ b/rpython/jit/backend/arm/opassembler.py +@@ -1092,8 +1092,8 @@ + self.mc.VCVT_int_to_float(res.value, r.svfp_ip.value) + return fcond + +- # the following five instructions are only ARMv7; +- # regalloc.py won't call them at all on ARMv6 ++ # the following five instructions are only ARMv7 with NEON; ++ # regalloc.py won't call them at all, in other cases + emit_opx_llong_add = gen_emit_float_op('llong_add', 'VADD_i64') + emit_opx_llong_sub = gen_emit_float_op('llong_sub', 'VSUB_i64') + emit_opx_llong_and = gen_emit_float_op('llong_and', 'VAND_i64') +--- a/rpython/jit/backend/arm/regalloc.py ++++ b/rpython/jit/backend/arm/regalloc.py +@@ -530,7 +530,7 @@ + EffectInfo.OS_LLONG_AND, + EffectInfo.OS_LLONG_OR, + EffectInfo.OS_LLONG_XOR): +- if self.cpu.cpuinfo.arch_version >= 7: ++ if self.cpu.cpuinfo.neon: + args = self._prepare_llong_binop_xx(op, fcond) + self.perform_extra(op, args, fcond) + return +--- a/rpython/jit/backend/arm/runner.py ++++ b/rpython/jit/backend/arm/runner.py +@@ -7,13 +7,14 @@ + from rpython.rlib.jit_hooks import LOOP_RUN_CONTAINER + from rpython.rtyper.lltypesystem import lltype, llmemory + from rpython.jit.backend.arm.detect import detect_hardfloat +-from rpython.jit.backend.arm.detect import detect_arch_version ++from rpython.jit.backend.arm.detect import detect_arch_version, detect_neon + + jitframe.STATICSIZE = JITFRAME_FIXED_SIZE + + class CPUInfo(object): + hf_abi = False + arch_version = 6 ++ neon = False + + class AbstractARMCPU(AbstractLLCPU): + +@@ -48,6 +49,7 @@ + def setup_once(self): + self.cpuinfo.arch_version = detect_arch_version() + self.cpuinfo.hf_abi = detect_hardfloat() ++ self.cpuinfo.neon = detect_neon() + #self.codemap.setup() + self.assembler.setup_once() + diff -Nru pypy-4.0.1+dfsg/debian/patches/pep3147-core pypy-5.0.1+dfsg/debian/patches/pep3147-core --- pypy-4.0.1+dfsg/debian/patches/pep3147-core 2015-11-20 19:22:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/patches/pep3147-core 2016-04-04 23:54:53.000000000 +0000 @@ -6,7 +6,7 @@ --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py -@@ -179,6 +179,11 @@ +@@ -178,6 +178,11 @@ cmdline="--soabi", default=None), @@ -44,7 +44,7 @@ @@ -39,6 +39,7 @@ # split the two usages again. #DEFAULT_SOABI = 'pypy-%d%d' % PYPY_VERSION[:2] - DEFAULT_SOABI = 'pypy-26' + DEFAULT_SOABI = 'pypy-41' +DEFAULT_MAGIC_TAG = DEFAULT_SOABI @specialize.memo() @@ -123,16 +123,16 @@ def parse_source_module(space, pathname, source): """ Parse a source file and return the corresponding code object """ -@@ -890,7 +951,7 @@ +@@ -889,7 +950,7 @@ + w = space.wrap - if space.config.objspace.usepycfiles: - src_stat = os.fstat(fd) -- cpathname = pathname + 'c' -+ cpathname = make_compiled_pathname(space, pathname) - mtime = int(src_stat[stat.ST_MTIME]) - mode = src_stat[stat.ST_MODE] - stream = check_compiled_module(space, cpathname, mtime) -@@ -909,7 +970,7 @@ + src_stat = os.fstat(fd) +- cpathname = pathname + 'c' ++ cpathname = make_compiled_pathname(space, pathname) + mtime = int(src_stat[stat.ST_MTIME]) + mode = src_stat[stat.ST_MODE] + stream = check_compiled_module(space, cpathname, mtime) +@@ -903,7 +964,7 @@ stream.close() except StreamErrors: pass @@ -141,7 +141,7 @@ else: code_w = parse_source_module(space, pathname, source) -@@ -925,6 +986,7 @@ +@@ -919,6 +980,7 @@ if optimize >= 2: code_w.remove_docstrings(space) @@ -149,7 +149,7 @@ update_code_filenames(space, code_w, pathname) exec_code_module(space, w_mod, code_w) -@@ -1060,6 +1122,19 @@ +@@ -1054,6 +1116,19 @@ raise #print "Problem while marshalling %s, skipping" % cpathname return @@ -238,7 +238,7 @@ import tempfile, marshal from pypy.module.imp import importing -@@ -101,12 +101,18 @@ +@@ -105,12 +105,18 @@ # create compiled/x.py and a corresponding pyc file p = setuppkg("compiled", x = "x = 84") @@ -258,9 +258,9 @@ else: w = space.wrap w_modname = w("compiled.x") -@@ -121,8 +127,9 @@ +@@ -125,8 +131,9 @@ stream.close() - if space.config.objspace.usepycfiles: + if not space.config.translation.sandbox: # also create a lone .pyc file - p.join('lone.pyc').write(p.join('x.pyc').read(mode='rb'), - mode='wb') @@ -270,7 +270,7 @@ # create a .pyw file p = setuppkg("windows", x = "x = 78") -@@ -746,6 +753,8 @@ +@@ -754,6 +761,8 @@ class TestPycStuff: # ___________________ .pyc related stuff _________________ @@ -279,7 +279,7 @@ def test_check_compiled_module(self): space = self.space -@@ -887,7 +896,8 @@ +@@ -895,7 +904,8 @@ ret = space.int_w(w_ret) assert ret == 42 @@ -289,7 +289,7 @@ assert cpathname.check() cpathname.remove() -@@ -905,7 +915,8 @@ +@@ -913,7 +923,8 @@ write_pyc=False) finally: stream.close() @@ -299,7 +299,7 @@ assert not cpathname.check() def test_load_source_module_dont_write_bytecode(self): -@@ -925,7 +936,8 @@ +@@ -933,7 +944,8 @@ space.setattr(space.sys, space.wrap('dont_write_bytecode'), space.w_False) stream.close() @@ -309,7 +309,7 @@ assert not cpathname.check() def test_load_source_module_syntaxerror(self): -@@ -945,7 +957,8 @@ +@@ -953,7 +965,8 @@ pass stream.close() @@ -319,7 +319,7 @@ assert not cpathname.check() def test_load_source_module_importerror(self): -@@ -966,7 +979,8 @@ +@@ -974,7 +987,8 @@ stream.close() # And the .pyc has been generated @@ -329,7 +329,7 @@ assert cpathname.check() def test_write_compiled_module(self): -@@ -983,7 +997,8 @@ +@@ -991,7 +1005,8 @@ pycode = w_ret assert type(pycode) is pypy.interpreter.pycode.PyCode @@ -339,7 +339,7 @@ mode = 0777 mtime = 12345 importing.write_compiled_module(space, -@@ -1055,6 +1070,271 @@ +@@ -1063,6 +1078,271 @@ finally: stream.close() @@ -611,17 +611,41 @@ def test_PYTHONPATH_takes_precedence(space): if sys.platform == "win32": -@@ -1359,17 +1639,18 @@ +@@ -1371,24 +1651,21 @@ + def test_default(self): + import os.path + from test_bytecode import a +- assert a.__file__.endswith('a.py') +- assert os.path.exists(a.__file__ + 'c') == (not self.sandbox) ++ assert os.path.exists(a.__cached__) == (not self.sandbox) + + def test_write_bytecode(self): + import os.path + import sys + sys.dont_write_bytecode = False + from test_bytecode import b +- assert b.__file__.endswith('b.py') +- assert os.path.exists(b.__file__ + 'c') ++ assert os.path.exists(b.__cached__) + + def test_dont_write_bytecode(self): + import os.path + import sys + sys.dont_write_bytecode = True + from test_bytecode import c +- assert c.__file__.endswith('c.py') +- assert not os.path.exists(c.__file__ + 'c') ++ assert not os.path.exists(c.__cached__) + + + class AppTestWriteBytecodeSandbox(AppTestWriteBytecode): +@@ -1408,25 +1685,21 @@ def test_import_possibly_from_pyc(self): from compiled import x +- assert x.__file__.endswith('x.pyc') + assert x.__file__.endswith('x.py') - if self.usepycfiles: -- assert x.__file__.endswith('x.pyc') -+ assert x.__cached__.endswith('.pyc') - else: -- assert x.__file__.endswith('x.py') -+ assert x.__cached__ is None ++ assert x.__cached__.endswith('.pyc') try: from compiled import lone except ImportError: @@ -633,15 +657,13 @@ + assert self.lonepycfiles, "should not have found 'lone.TAG.pyc'" + assert lone.__cached__.endswith('.pyc') - class AppTestNoLonePycFile(AppTestNoPycFile): + class AppTestNoLonePycFile(_AppTestLonePycFileBase): spaceconfig = { -@@ -1377,12 +1658,6 @@ "objspace.lonepycfiles": False } --class AppTestLonePycFile(AppTestNoPycFile): +-class AppTestLonePycFile(_AppTestLonePycFileBase): - spaceconfig = { -- "objspace.usepycfiles": True, - "objspace.lonepycfiles": True - } - diff -Nru pypy-4.0.1+dfsg/debian/patches/sandbox-unlink pypy-5.0.1+dfsg/debian/patches/sandbox-unlink --- pypy-4.0.1+dfsg/debian/patches/sandbox-unlink 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/patches/sandbox-unlink 2016-04-04 23:54:53.000000000 +0000 @@ -0,0 +1,19 @@ +Description: Support unlink() in the sandbox VFS + Now that pyc files aren't disableable, we try to unlink() one during startup. + EPERM is handled correctly, but runtime errors are unexpected. + So, let's just reject all unlink()s. +Author: Stefano Rivera +Origin: upstream, https://bitbucket.org/pypy/pypy/commits/a806698962918799797af8e2c1da49207b8669d8 + +--- a/rpython/translator/sandbox/sandlib.py ++++ b/rpython/translator/sandbox/sandlib.py +@@ -540,6 +540,9 @@ + node = self.get_node(vpathname) + return node.keys() + ++ def do_ll_os__ll_os_unlink(self, vpathname): ++ raise OSError(errno.EPERM, "write access denied") ++ + def do_ll_os__ll_os_getuid(self): + return UID + do_ll_os__ll_os_geteuid = do_ll_os__ll_os_getuid diff -Nru pypy-4.0.1+dfsg/debian/patches/series pypy-5.0.1+dfsg/debian/patches/series --- pypy-4.0.1+dfsg/debian/patches/series 2015-11-20 19:22:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/patches/series 2016-04-04 23:54:53.000000000 +0000 @@ -3,7 +3,6 @@ noise # Upstream features -soabi pep3147-core pep3147-stdlib pep3147-issue11254 @@ -13,6 +12,12 @@ test_termios fpic-archs skip-test_multiprocessing +sandbox-unlink +armv8l +vmprof-kfreebsd +neon +kfreebsd-tests +hg-updates # from python2.7 ensure-valid-term diff -Nru pypy-4.0.1+dfsg/debian/patches/soabi pypy-5.0.1+dfsg/debian/patches/soabi --- pypy-4.0.1+dfsg/debian/patches/soabi 2015-11-20 19:22:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/patches/soabi 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -Description: Expose SOABI in sysconfig - So that dh_pypy can use it to generate useful dependencies. -Author: Stefano Rivera -Last-Update: 2015-11-01 -Bug-Debian: https://bugs.debian.org/803689 - ---- a/lib-python/2.7/sysconfig.py -+++ b/lib-python/2.7/sysconfig.py -@@ -524,6 +524,13 @@ - import _osx_support - _osx_support.customize_config_vars(_CONFIG_VARS) - -+ # PyPy: -+ import imp -+ for suffix, mode, type_ in imp.get_suffixes(): -+ if type_ == imp.C_EXTENSION: -+ _CONFIG_VARS['SOABI'] = suffix.split('.')[1] -+ break -+ - if args: - vals = [] - for name in args: diff -Nru pypy-4.0.1+dfsg/debian/patches/version-info pypy-5.0.1+dfsg/debian/patches/version-info --- pypy-4.0.1+dfsg/debian/patches/version-info 2015-11-20 19:22:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/patches/version-info 2016-04-04 23:54:53.000000000 +0000 @@ -26,7 +26,7 @@ if os.path.isdir(os.path.join(root, '.git')): --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py -@@ -83,6 +83,7 @@ +@@ -84,6 +84,7 @@ return space.wrap(('PyPy', '', '')) def get_repo_info(space): diff -Nru pypy-4.0.1+dfsg/debian/patches/vmprof-kfreebsd pypy-5.0.1+dfsg/debian/patches/vmprof-kfreebsd --- pypy-4.0.1+dfsg/debian/patches/vmprof-kfreebsd 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/patches/vmprof-kfreebsd 2016-04-04 23:54:53.000000000 +0000 @@ -0,0 +1,33 @@ +Description: Support kFreeBSD in vmprof +Author: Stefano Rivera +Forwarded: https://bitbucket.org/pypy/pypy/commits/936b7e8d9b6c7ee56da6ea651c3f28d2fd384646 + +--- a/rpython/rlib/rvmprof/src/vmprof_config.h ++++ b/rpython/rlib/rvmprof/src/vmprof_config.h +@@ -1,5 +1,5 @@ + #define HAVE_SYS_UCONTEXT_H +-#if defined(__FreeBSD__) || defined(__APPLE__) ++#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__APPLE__) + #define PC_FROM_UCONTEXT uc_mcontext.mc_rip + #else + #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP] +--- a/rpython/translator/c/src/thread_pthread.c ++++ b/rpython/translator/c/src/thread_pthread.c +@@ -37,7 +37,7 @@ + # define THREAD_STACK_SIZE 0 /* use default stack size */ + # endif + +-# if (defined(__APPLE__) || defined(__FreeBSD__)) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0 ++# if (defined(__APPLE__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0 + /* The default stack size for new threads on OSX is small enough that + * we'll get hard crashes instead of 'maximum recursion depth exceeded' + * exceptions. +@@ -84,7 +84,7 @@ + if (tss != 0) + pthread_attr_setstacksize(&attrs, tss); + #endif +-#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !defined(__FreeBSD__) ++#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !(defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) + pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM); + #endif + diff -Nru pypy-4.0.1+dfsg/debian/pypy.postinst.in pypy-5.0.1+dfsg/debian/pypy.postinst.in --- pypy-4.0.1+dfsg/debian/pypy.postinst.in 2015-11-20 19:22:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/pypy.postinst.in 2016-04-04 23:54:53.000000000 +0000 @@ -36,7 +36,7 @@ pypyclean /usr/lib/pypy/lib-python /usr/lib/pypy/lib_pypy fi - pypycompile -p pypy -p pypy-lib + pypycompile -p pypy -p 'pypy-lib:#ARCH#' old_version=$(echo $2 | sed 's/\([[:digit:]]*\.[[:digit:]]*\).*/\1/') new_version=$(echo "#VERSION#" | sed 's/\([[:digit:]]*\.[[:digit:]]*\).*/\1/') diff -Nru pypy-4.0.1+dfsg/debian/rules pypy-5.0.1+dfsg/debian/rules --- pypy-4.0.1+dfsg/debian/rules 2015-11-20 19:22:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/rules 2016-04-04 23:54:53.000000000 +0000 @@ -1,8 +1,7 @@ #!/usr/bin/make -f -PYTHON=python2.7 # Take advantage of PyPy if it's installed -#PYTHON=$(shell command -v pypy > /dev/null && echo pypy || echo python2.7) +PYTHON=$(shell command -v pypy > /dev/null && echo pypy || echo python2.7) # PyPy doesn't harden well, and handles opt and debug itself BUILDFLAGS_ENV := DEB_BUILD_MAINT_OPTIONS=hardening=-stackprotector,-fortify @@ -68,6 +67,7 @@ debian/scripts/gen-backend-versions.py override_dh_fixperms-arch: + debian/scripts/cleanup-lib.sh pypy-lib find debian/pypy-tk \( -name '*.pyc' -o -name '__pycache__' \) -delete # Fix interpreters find debian/pypy-tk \ @@ -76,25 +76,7 @@ dh_fixperms -a override_dh_fixperms-indep: - find debian/pypy-lib debian/pypy-lib-testsuite \ - -name '*.pyc' -delete - find debian/pypy-lib debian/pypy-lib-testsuite \ - -name '__pycache__' -delete - # Don't need these, and lintian will make a noise about them - find debian/pypy-lib \( \ - -name 'regen' \ - -o -name '*.bat' \ - -o -name 'fetch_*' \ - -o -name '*.pickle' \ - \) -delete - # Remove empty directories, because pypyclean will - find debian/pypy-lib*/usr/lib/pypy/lib-python -type d -empty -delete - # Nothing in the stdlib should be executable - chmod -R a-x+X debian/pypy-lib*/usr/lib/pypy/lib* - # Fix interpreters - find debian/pypy-lib debian/pypy-lib-testsuite \ - -name '*.py' -print0 \ - | xargs -0 sed -i -e '1s|^#!.*python.*|#!/usr/bin/pypy|' + debian/scripts/cleanup-lib.sh pypy-lib-testsuite dh_fixperms -i override_dh_sphinxdoc-arch: @@ -114,9 +96,6 @@ done dh_installdeb -override_dh_gencontrol: - dh_gencontrol -- $(shell dpkg-query -f '-Vpypy:Recommends=$${Package} $${Status}\n' --show 'libunwind*-dev' | grep ' installed$$' | cut -d' ' -f 1) - override_dh_builddeb: dh_builddeb -- -Zxz diff -Nru pypy-4.0.1+dfsg/debian/scripts/cleanup-lib.sh pypy-5.0.1+dfsg/debian/scripts/cleanup-lib.sh --- pypy-4.0.1+dfsg/debian/scripts/cleanup-lib.sh 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/scripts/cleanup-lib.sh 2016-04-04 23:54:53.000000000 +0000 @@ -0,0 +1,32 @@ +#!/bin/sh + +set -euxf + +pkg=$1 + +find debian/$pkg \ + -name '*.pyc' -delete +find debian/$pkg \ + -name '__pycache__' -delete + +# Don't need these, and lintian will make a noise about them +find debian/$pkg \( \ + -name 'regen' \ + -o -name '*.bat' \ + -o -name 'fetch_*' \ + -o -name '*.pickle' \ + \) -delete + +# Remove empty directories, because pypyclean will +find debian/$pkg/usr/lib/pypy/lib-python -type d -empty -delete + +# Nothing in the stdlib should be executable +chmod -R a-x+X debian/$pkg/usr/lib/pypy/lib-python/ +if [ "$pkg" = "pypy-lib" ]; then + chmod -R a-x+X debian/$pkg/usr/lib/pypy/lib_pypy/ +fi + +# Fix interpreters +find debian/$pkg \ + -name '*.py' -print0 \ + | xargs -0 sed -i -e '1s|^#!.*python.*|#!/usr/bin/pypy|' diff -Nru pypy-4.0.1+dfsg/debian/scripts/translate.sh pypy-5.0.1+dfsg/debian/scripts/translate.sh --- pypy-4.0.1+dfsg/debian/scripts/translate.sh 2015-11-20 19:22:00.000000000 +0000 +++ pypy-5.0.1+dfsg/debian/scripts/translate.sh 2016-04-04 23:54:53.000000000 +0000 @@ -89,6 +89,13 @@ TARGETOPTS="$TARGETOPTS --withoutmod-_continuation" fi +if echo "$PYTHON" | grep -Fq pypy; then + if [ $(dpkg-architecture -q DEB_HOST_ARCH_BITS) -eq 32 ]; then + export PYPY_GC_MAX_DELTA=200MB + PYTHON="$PYTHON --jit loop_longevity=300" + fi +fi + set -x cd pypy/goal -exec "$PYTHON" -u ../../rpython/bin/rpython $RPYOPTS targetpypystandalone $TARGETOPTS 2>&1 +exec $PYTHON -u ../../rpython/bin/rpython $RPYOPTS targetpypystandalone $TARGETOPTS 2>&1 diff -Nru pypy-4.0.1+dfsg/dotviewer/drawgraph.py pypy-5.0.1+dfsg/dotviewer/drawgraph.py --- pypy-4.0.1+dfsg/dotviewer/drawgraph.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/dotviewer/drawgraph.py 2016-03-19 16:40:11.000000000 +0000 @@ -14,12 +14,661 @@ FONT = os.path.join(this_dir, 'font', 'DroidSans.ttf') FIXEDFONT = os.path.join(this_dir, 'font', 'DroidSansMono.ttf') COLOR = { - 'black': (0,0,0), - 'white': (255,255,255), - 'red': (255,0,0), - 'green': (0,255,0), - 'blue': (0,0,255), - 'yellow': (255,255,0), + 'aliceblue': (240, 248, 255), + 'antiquewhite': (250, 235, 215), + 'antiquewhite1': (255, 239, 219), + 'antiquewhite2': (238, 223, 204), + 'antiquewhite3': (205, 192, 176), + 'antiquewhite4': (139, 131, 120), + 'aquamarine': (127, 255, 212), + 'aquamarine1': (127, 255, 212), + 'aquamarine2': (118, 238, 198), + 'aquamarine3': (102, 205, 170), + 'aquamarine4': (69, 139, 116), + 'azure': (240, 255, 255), + 'azure1': (240, 255, 255), + 'azure2': (224, 238, 238), + 'azure3': (193, 205, 205), + 'azure4': (131, 139, 139), + 'beige': (245, 245, 220), + 'bisque': (255, 228, 196), + 'bisque1': (255, 228, 196), + 'bisque2': (238, 213, 183), + 'bisque3': (205, 183, 158), + 'bisque4': (139, 125, 107), + 'black': (0, 0, 0), + 'blanchedalmond': (255, 235, 205), + 'blue': (0, 0, 255), + 'blue1': (0, 0, 255), + 'blue2': (0, 0, 238), + 'blue3': (0, 0, 205), + 'blue4': (0, 0, 139), + 'blueviolet': (138, 43, 226), + 'brown': (165, 42, 42), + 'brown1': (255, 64, 64), + 'brown2': (238, 59, 59), + 'brown3': (205, 51, 51), + 'brown4': (139, 35, 35), + 'burlywood': (222, 184, 135), + 'burlywood1': (255, 211, 155), + 'burlywood2': (238, 197, 145), + 'burlywood3': (205, 170, 125), + 'burlywood4': (139, 115, 85), + 'cadetblue': (95, 158, 160), + 'cadetblue1': (152, 245, 255), + 'cadetblue2': (142, 229, 238), + 'cadetblue3': (122, 197, 205), + 'cadetblue4': (83, 134, 139), + 'chartreuse': (127, 255, 0), + 'chartreuse1': (127, 255, 0), + 'chartreuse2': (118, 238, 0), + 'chartreuse3': (102, 205, 0), + 'chartreuse4': (69, 139, 0), + 'chocolate': (210, 105, 30), + 'chocolate1': (255, 127, 36), + 'chocolate2': (238, 118, 33), + 'chocolate3': (205, 102, 29), + 'chocolate4': (139, 69, 19), + 'coral': (255, 127, 80), + 'coral1': (255, 114, 86), + 'coral2': (238, 106, 80), + 'coral3': (205, 91, 69), + 'coral4': (139, 62, 47), + 'cornflowerblue': (100, 149, 237), + 'cornsilk': (255, 248, 220), + 'cornsilk1': (255, 248, 220), + 'cornsilk2': (238, 232, 205), + 'cornsilk3': (205, 200, 177), + 'cornsilk4': (139, 136, 120), + 'crimson': (220, 20, 60), + 'cyan': (0, 255, 255), + 'cyan1': (0, 255, 255), + 'cyan2': (0, 238, 238), + 'cyan3': (0, 205, 205), + 'cyan4': (0, 139, 139), + 'darkgoldenrod': (184, 134, 11), + 'darkgoldenrod1': (255, 185, 15), + 'darkgoldenrod2': (238, 173, 14), + 'darkgoldenrod3': (205, 149, 12), + 'darkgoldenrod4': (139, 101, 8), + 'darkgreen': (0, 100, 0), + 'darkkhaki': (189, 183, 107), + 'darkolivegreen': (85, 107, 47), + 'darkolivegreen1': (202, 255, 112), + 'darkolivegreen2': (188, 238, 104), + 'darkolivegreen3': (162, 205, 90), + 'darkolivegreen4': (110, 139, 61), + 'darkorange': (255, 140, 0), + 'darkorange1': (255, 127, 0), + 'darkorange2': (238, 118, 0), + 'darkorange3': (205, 102, 0), + 'darkorange4': (139, 69, 0), + 'darkorchid': (153, 50, 204), + 'darkorchid1': (191, 62, 255), + 'darkorchid2': (178, 58, 238), + 'darkorchid3': (154, 50, 205), + 'darkorchid4': (104, 34, 139), + 'darksalmon': (233, 150, 122), + 'darkseagreen': (143, 188, 143), + 'darkseagreen1': (193, 255, 193), + 'darkseagreen2': (180, 238, 180), + 'darkseagreen3': (155, 205, 155), + 'darkseagreen4': (105, 139, 105), + 'darkslateblue': (72, 61, 139), + 'darkslategray': (47, 79, 79), + 'darkslategray1': (151, 255, 255), + 'darkslategray2': (141, 238, 238), + 'darkslategray3': (121, 205, 205), + 'darkslategray4': (82, 139, 139), + 'darkslategrey': (47, 79, 79), + 'darkturquoise': (0, 206, 209), + 'darkviolet': (148, 0, 211), + 'deeppink': (255, 20, 147), + 'deeppink1': (255, 20, 147), + 'deeppink2': (238, 18, 137), + 'deeppink3': (205, 16, 118), + 'deeppink4': (139, 10, 80), + 'deepskyblue': (0, 191, 255), + 'deepskyblue1': (0, 191, 255), + 'deepskyblue2': (0, 178, 238), + 'deepskyblue3': (0, 154, 205), + 'deepskyblue4': (0, 104, 139), + 'dimgray': (105, 105, 105), + 'dimgrey': (105, 105, 105), + 'dodgerblue': (30, 144, 255), + 'dodgerblue1': (30, 144, 255), + 'dodgerblue2': (28, 134, 238), + 'dodgerblue3': (24, 116, 205), + 'dodgerblue4': (16, 78, 139), + 'firebrick': (178, 34, 34), + 'firebrick1': (255, 48, 48), + 'firebrick2': (238, 44, 44), + 'firebrick3': (205, 38, 38), + 'firebrick4': (139, 26, 26), + 'floralwhite': (255, 250, 240), + 'forestgreen': (34, 139, 34), + 'gainsboro': (220, 220, 220), + 'ghostwhite': (248, 248, 255), + 'gold': (255, 215, 0), + 'gold1': (255, 215, 0), + 'gold2': (238, 201, 0), + 'gold3': (205, 173, 0), + 'gold4': (139, 117, 0), + 'goldenrod': (218, 165, 32), + 'goldenrod1': (255, 193, 37), + 'goldenrod2': (238, 180, 34), + 'goldenrod3': (205, 155, 29), + 'goldenrod4': (139, 105, 20), + 'gray': (192, 192, 192), + 'gray0': (0, 0, 0), + 'gray1': (3, 3, 3), + 'gray10': (26, 26, 26), + 'gray100': (255, 255, 255), + 'gray11': (28, 28, 28), + 'gray12': (31, 31, 31), + 'gray13': (33, 33, 33), + 'gray14': (36, 36, 36), + 'gray15': (38, 38, 38), + 'gray16': (41, 41, 41), + 'gray17': (43, 43, 43), + 'gray18': (46, 46, 46), + 'gray19': (48, 48, 48), + 'gray2': (5, 5, 5), + 'gray20': (51, 51, 51), + 'gray21': (54, 54, 54), + 'gray22': (56, 56, 56), + 'gray23': (59, 59, 59), + 'gray24': (61, 61, 61), + 'gray25': (64, 64, 64), + 'gray26': (66, 66, 66), + 'gray27': (69, 69, 69), + 'gray28': (71, 71, 71), + 'gray29': (74, 74, 74), + 'gray3': (8, 8, 8), + 'gray30': (77, 77, 77), + 'gray31': (79, 79, 79), + 'gray32': (82, 82, 82), + 'gray33': (84, 84, 84), + 'gray34': (87, 87, 87), + 'gray35': (89, 89, 89), + 'gray36': (92, 92, 92), + 'gray37': (94, 94, 94), + 'gray38': (97, 97, 97), + 'gray39': (99, 99, 99), + 'gray4': (10, 10, 10), + 'gray40': (102, 102, 102), + 'gray41': (105, 105, 105), + 'gray42': (107, 107, 107), + 'gray43': (110, 110, 110), + 'gray44': (112, 112, 112), + 'gray45': (115, 115, 115), + 'gray46': (117, 117, 117), + 'gray47': (120, 120, 120), + 'gray48': (122, 122, 122), + 'gray49': (125, 125, 125), + 'gray5': (13, 13, 13), + 'gray50': (127, 127, 127), + 'gray51': (130, 130, 130), + 'gray52': (133, 133, 133), + 'gray53': (135, 135, 135), + 'gray54': (138, 138, 138), + 'gray55': (140, 140, 140), + 'gray56': (143, 143, 143), + 'gray57': (145, 145, 145), + 'gray58': (148, 148, 148), + 'gray59': (150, 150, 150), + 'gray6': (15, 15, 15), + 'gray60': (153, 153, 153), + 'gray61': (156, 156, 156), + 'gray62': (158, 158, 158), + 'gray63': (161, 161, 161), + 'gray64': (163, 163, 163), + 'gray65': (166, 166, 166), + 'gray66': (168, 168, 168), + 'gray67': (171, 171, 171), + 'gray68': (173, 173, 173), + 'gray69': (176, 176, 176), + 'gray7': (18, 18, 18), + 'gray70': (179, 179, 179), + 'gray71': (181, 181, 181), + 'gray72': (184, 184, 184), + 'gray73': (186, 186, 186), + 'gray74': (189, 189, 189), + 'gray75': (191, 191, 191), + 'gray76': (194, 194, 194), + 'gray77': (196, 196, 196), + 'gray78': (199, 199, 199), + 'gray79': (201, 201, 201), + 'gray8': (20, 20, 20), + 'gray80': (204, 204, 204), + 'gray81': (207, 207, 207), + 'gray82': (209, 209, 209), + 'gray83': (212, 212, 212), + 'gray84': (214, 214, 214), + 'gray85': (217, 217, 217), + 'gray86': (219, 219, 219), + 'gray87': (222, 222, 222), + 'gray88': (224, 224, 224), + 'gray89': (227, 227, 227), + 'gray9': (23, 23, 23), + 'gray90': (229, 229, 229), + 'gray91': (232, 232, 232), + 'gray92': (235, 235, 235), + 'gray93': (237, 237, 237), + 'gray94': (240, 240, 240), + 'gray95': (242, 242, 242), + 'gray96': (245, 245, 245), + 'gray97': (247, 247, 247), + 'gray98': (250, 250, 250), + 'gray99': (252, 252, 252), + 'green': (0, 255, 0), + 'green1': (0, 255, 0), + 'green2': (0, 238, 0), + 'green3': (0, 205, 0), + 'green4': (0, 139, 0), + 'greenyellow': (173, 255, 47), + 'grey': (192, 192, 192), + 'grey0': (0, 0, 0), + 'grey1': (3, 3, 3), + 'grey10': (26, 26, 26), + 'grey100': (255, 255, 255), + 'grey11': (28, 28, 28), + 'grey12': (31, 31, 31), + 'grey13': (33, 33, 33), + 'grey14': (36, 36, 36), + 'grey15': (38, 38, 38), + 'grey16': (41, 41, 41), + 'grey17': (43, 43, 43), + 'grey18': (46, 46, 46), + 'grey19': (48, 48, 48), + 'grey2': (5, 5, 5), + 'grey20': (51, 51, 51), + 'grey21': (54, 54, 54), + 'grey22': (56, 56, 56), + 'grey23': (59, 59, 59), + 'grey24': (61, 61, 61), + 'grey25': (64, 64, 64), + 'grey26': (66, 66, 66), + 'grey27': (69, 69, 69), + 'grey28': (71, 71, 71), + 'grey29': (74, 74, 74), + 'grey3': (8, 8, 8), + 'grey30': (77, 77, 77), + 'grey31': (79, 79, 79), + 'grey32': (82, 82, 82), + 'grey33': (84, 84, 84), + 'grey34': (87, 87, 87), + 'grey35': (89, 89, 89), + 'grey36': (92, 92, 92), + 'grey37': (94, 94, 94), + 'grey38': (97, 97, 97), + 'grey39': (99, 99, 99), + 'grey4': (10, 10, 10), + 'grey40': (102, 102, 102), + 'grey41': (105, 105, 105), + 'grey42': (107, 107, 107), + 'grey43': (110, 110, 110), + 'grey44': (112, 112, 112), + 'grey45': (115, 115, 115), + 'grey46': (117, 117, 117), + 'grey47': (120, 120, 120), + 'grey48': (122, 122, 122), + 'grey49': (125, 125, 125), + 'grey5': (13, 13, 13), + 'grey50': (127, 127, 127), + 'grey51': (130, 130, 130), + 'grey52': (133, 133, 133), + 'grey53': (135, 135, 135), + 'grey54': (138, 138, 138), + 'grey55': (140, 140, 140), + 'grey56': (143, 143, 143), + 'grey57': (145, 145, 145), + 'grey58': (148, 148, 148), + 'grey59': (150, 150, 150), + 'grey6': (15, 15, 15), + 'grey60': (153, 153, 153), + 'grey61': (156, 156, 156), + 'grey62': (158, 158, 158), + 'grey63': (161, 161, 161), + 'grey64': (163, 163, 163), + 'grey65': (166, 166, 166), + 'grey66': (168, 168, 168), + 'grey67': (171, 171, 171), + 'grey68': (173, 173, 173), + 'grey69': (176, 176, 176), + 'grey7': (18, 18, 18), + 'grey70': (179, 179, 179), + 'grey71': (181, 181, 181), + 'grey72': (184, 184, 184), + 'grey73': (186, 186, 186), + 'grey74': (189, 189, 189), + 'grey75': (191, 191, 191), + 'grey76': (194, 194, 194), + 'grey77': (196, 196, 196), + 'grey78': (199, 199, 199), + 'grey79': (201, 201, 201), + 'grey8': (20, 20, 20), + 'grey80': (204, 204, 204), + 'grey81': (207, 207, 207), + 'grey82': (209, 209, 209), + 'grey83': (212, 212, 212), + 'grey84': (214, 214, 214), + 'grey85': (217, 217, 217), + 'grey86': (219, 219, 219), + 'grey87': (222, 222, 222), + 'grey88': (224, 224, 224), + 'grey89': (227, 227, 227), + 'grey9': (23, 23, 23), + 'grey90': (229, 229, 229), + 'grey91': (232, 232, 232), + 'grey92': (235, 235, 235), + 'grey93': (237, 237, 237), + 'grey94': (240, 240, 240), + 'grey95': (242, 242, 242), + 'grey96': (245, 245, 245), + 'grey97': (247, 247, 247), + 'grey98': (250, 250, 250), + 'grey99': (252, 252, 252), + 'honeydew': (240, 255, 240), + 'honeydew1': (240, 255, 240), + 'honeydew2': (224, 238, 224), + 'honeydew3': (193, 205, 193), + 'honeydew4': (131, 139, 131), + 'hotpink': (255, 105, 180), + 'hotpink1': (255, 110, 180), + 'hotpink2': (238, 106, 167), + 'hotpink3': (205, 96, 144), + 'hotpink4': (139, 58, 98), + 'indianred': (205, 92, 92), + 'indianred1': (255, 106, 106), + 'indianred2': (238, 99, 99), + 'indianred3': (205, 85, 85), + 'indianred4': (139, 58, 58), + 'indigo': (75, 0, 130), + 'invis': (255, 255, 254), + 'ivory': (255, 255, 240), + 'ivory1': (255, 255, 240), + 'ivory2': (238, 238, 224), + 'ivory3': (205, 205, 193), + 'ivory4': (139, 139, 131), + 'khaki': (240, 230, 140), + 'khaki1': (255, 246, 143), + 'khaki2': (238, 230, 133), + 'khaki3': (205, 198, 115), + 'khaki4': (139, 134, 78), + 'lavender': (230, 230, 250), + 'lavenderblush': (255, 240, 245), + 'lavenderblush1': (255, 240, 245), + 'lavenderblush2': (238, 224, 229), + 'lavenderblush3': (205, 193, 197), + 'lavenderblush4': (139, 131, 134), + 'lawngreen': (124, 252, 0), + 'lemonchiffon': (255, 250, 205), + 'lemonchiffon1': (255, 250, 205), + 'lemonchiffon2': (238, 233, 191), + 'lemonchiffon3': (205, 201, 165), + 'lemonchiffon4': (139, 137, 112), + 'lightblue': (173, 216, 230), + 'lightblue1': (191, 239, 255), + 'lightblue2': (178, 223, 238), + 'lightblue3': (154, 192, 205), + 'lightblue4': (104, 131, 139), + 'lightcoral': (240, 128, 128), + 'lightcyan': (224, 255, 255), + 'lightcyan1': (224, 255, 255), + 'lightcyan2': (209, 238, 238), + 'lightcyan3': (180, 205, 205), + 'lightcyan4': (122, 139, 139), + 'lightgoldenrod': (238, 221, 130), + 'lightgoldenrod1': (255, 236, 139), + 'lightgoldenrod2': (238, 220, 130), + 'lightgoldenrod3': (205, 190, 112), + 'lightgoldenrod4': (139, 129, 76), + 'lightgoldenrodyellow': (250, 250, 210), + 'lightgray': (211, 211, 211), + 'lightgrey': (211, 211, 211), + 'lightpink': (255, 182, 193), + 'lightpink1': (255, 174, 185), + 'lightpink2': (238, 162, 173), + 'lightpink3': (205, 140, 149), + 'lightpink4': (139, 95, 101), + 'lightsalmon': (255, 160, 122), + 'lightsalmon1': (255, 160, 122), + 'lightsalmon2': (238, 149, 114), + 'lightsalmon3': (205, 129, 98), + 'lightsalmon4': (139, 87, 66), + 'lightseagreen': (32, 178, 170), + 'lightskyblue': (135, 206, 250), + 'lightskyblue1': (176, 226, 255), + 'lightskyblue2': (164, 211, 238), + 'lightskyblue3': (141, 182, 205), + 'lightskyblue4': (96, 123, 139), + 'lightslateblue': (132, 112, 255), + 'lightslategray': (119, 136, 153), + 'lightslategrey': (119, 136, 153), + 'lightsteelblue': (176, 196, 222), + 'lightsteelblue1': (202, 225, 255), + 'lightsteelblue2': (188, 210, 238), + 'lightsteelblue3': (162, 181, 205), + 'lightsteelblue4': (110, 123, 139), + 'lightyellow': (255, 255, 224), + 'lightyellow1': (255, 255, 224), + 'lightyellow2': (238, 238, 209), + 'lightyellow3': (205, 205, 180), + 'lightyellow4': (139, 139, 122), + 'limegreen': (50, 205, 50), + 'linen': (250, 240, 230), + 'magenta': (255, 0, 255), + 'magenta1': (255, 0, 255), + 'magenta2': (238, 0, 238), + 'magenta3': (205, 0, 205), + 'magenta4': (139, 0, 139), + 'maroon': (176, 48, 96), + 'maroon1': (255, 52, 179), + 'maroon2': (238, 48, 167), + 'maroon3': (205, 41, 144), + 'maroon4': (139, 28, 98), + 'mediumaquamarine': (102, 205, 170), + 'mediumblue': (0, 0, 205), + 'mediumorchid': (186, 85, 211), + 'mediumorchid1': (224, 102, 255), + 'mediumorchid2': (209, 95, 238), + 'mediumorchid3': (180, 82, 205), + 'mediumorchid4': (122, 55, 139), + 'mediumpurple': (147, 112, 219), + 'mediumpurple1': (171, 130, 255), + 'mediumpurple2': (159, 121, 238), + 'mediumpurple3': (137, 104, 205), + 'mediumpurple4': (93, 71, 139), + 'mediumseagreen': (60, 179, 113), + 'mediumslateblue': (123, 104, 238), + 'mediumspringgreen': (0, 250, 154), + 'mediumturquoise': (72, 209, 204), + 'mediumvioletred': (199, 21, 133), + 'midnightblue': (25, 25, 112), + 'mintcream': (245, 255, 250), + 'mistyrose': (255, 228, 225), + 'mistyrose1': (255, 228, 225), + 'mistyrose2': (238, 213, 210), + 'mistyrose3': (205, 183, 181), + 'mistyrose4': (139, 125, 123), + 'moccasin': (255, 228, 181), + 'navajowhite': (255, 222, 173), + 'navajowhite1': (255, 222, 173), + 'navajowhite2': (238, 207, 161), + 'navajowhite3': (205, 179, 139), + 'navajowhite4': (139, 121, 94), + 'navy': (0, 0, 128), + 'navyblue': (0, 0, 128), + 'none': (255, 255, 254), + 'oldlace': (253, 245, 230), + 'olivedrab': (107, 142, 35), + 'olivedrab1': (192, 255, 62), + 'olivedrab2': (179, 238, 58), + 'olivedrab3': (154, 205, 50), + 'olivedrab4': (105, 139, 34), + 'orange': (255, 165, 0), + 'orange1': (255, 165, 0), + 'orange2': (238, 154, 0), + 'orange3': (205, 133, 0), + 'orange4': (139, 90, 0), + 'orangered': (255, 69, 0), + 'orangered1': (255, 69, 0), + 'orangered2': (238, 64, 0), + 'orangered3': (205, 55, 0), + 'orangered4': (139, 37, 0), + 'orchid': (218, 112, 214), + 'orchid1': (255, 131, 250), + 'orchid2': (238, 122, 233), + 'orchid3': (205, 105, 201), + 'orchid4': (139, 71, 137), + 'palegoldenrod': (238, 232, 170), + 'palegreen': (152, 251, 152), + 'palegreen1': (154, 255, 154), + 'palegreen2': (144, 238, 144), + 'palegreen3': (124, 205, 124), + 'palegreen4': (84, 139, 84), + 'paleturquoise': (175, 238, 238), + 'paleturquoise1': (187, 255, 255), + 'paleturquoise2': (174, 238, 238), + 'paleturquoise3': (150, 205, 205), + 'paleturquoise4': (102, 139, 139), + 'palevioletred': (219, 112, 147), + 'palevioletred1': (255, 130, 171), + 'palevioletred2': (238, 121, 159), + 'palevioletred3': (205, 104, 137), + 'palevioletred4': (139, 71, 93), + 'papayawhip': (255, 239, 213), + 'peachpuff': (255, 218, 185), + 'peachpuff1': (255, 218, 185), + 'peachpuff2': (238, 203, 173), + 'peachpuff3': (205, 175, 149), + 'peachpuff4': (139, 119, 101), + 'peru': (205, 133, 63), + 'pink': (255, 192, 203), + 'pink1': (255, 181, 197), + 'pink2': (238, 169, 184), + 'pink3': (205, 145, 158), + 'pink4': (139, 99, 108), + 'plum': (221, 160, 221), + 'plum1': (255, 187, 255), + 'plum2': (238, 174, 238), + 'plum3': (205, 150, 205), + 'plum4': (139, 102, 139), + 'powderblue': (176, 224, 230), + 'purple': (160, 32, 240), + 'purple1': (155, 48, 255), + 'purple2': (145, 44, 238), + 'purple3': (125, 38, 205), + 'purple4': (85, 26, 139), + 'red': (255, 0, 0), + 'red1': (255, 0, 0), + 'red2': (238, 0, 0), + 'red3': (205, 0, 0), + 'red4': (139, 0, 0), + 'rosybrown': (188, 143, 143), + 'rosybrown1': (255, 193, 193), + 'rosybrown2': (238, 180, 180), + 'rosybrown3': (205, 155, 155), + 'rosybrown4': (139, 105, 105), + 'royalblue': (65, 105, 225), + 'royalblue1': (72, 118, 255), + 'royalblue2': (67, 110, 238), + 'royalblue3': (58, 95, 205), + 'royalblue4': (39, 64, 139), + 'saddlebrown': (139, 69, 19), + 'salmon': (250, 128, 114), + 'salmon1': (255, 140, 105), + 'salmon2': (238, 130, 98), + 'salmon3': (205, 112, 84), + 'salmon4': (139, 76, 57), + 'sandybrown': (244, 164, 96), + 'seagreen': (46, 139, 87), + 'seagreen1': (84, 255, 159), + 'seagreen2': (78, 238, 148), + 'seagreen3': (67, 205, 128), + 'seagreen4': (46, 139, 87), + 'seashell': (255, 245, 238), + 'seashell1': (255, 245, 238), + 'seashell2': (238, 229, 222), + 'seashell3': (205, 197, 191), + 'seashell4': (139, 134, 130), + 'sienna': (160, 82, 45), + 'sienna1': (255, 130, 71), + 'sienna2': (238, 121, 66), + 'sienna3': (205, 104, 57), + 'sienna4': (139, 71, 38), + 'skyblue': (135, 206, 235), + 'skyblue1': (135, 206, 255), + 'skyblue2': (126, 192, 238), + 'skyblue3': (108, 166, 205), + 'skyblue4': (74, 112, 139), + 'slateblue': (106, 90, 205), + 'slateblue1': (131, 111, 255), + 'slateblue2': (122, 103, 238), + 'slateblue3': (105, 89, 205), + 'slateblue4': (71, 60, 139), + 'slategray': (112, 128, 144), + 'slategray1': (198, 226, 255), + 'slategray2': (185, 211, 238), + 'slategray3': (159, 182, 205), + 'slategray4': (108, 123, 139), + 'slategrey': (112, 128, 144), + 'snow': (255, 250, 250), + 'snow1': (255, 250, 250), + 'snow2': (238, 233, 233), + 'snow3': (205, 201, 201), + 'snow4': (139, 137, 137), + 'springgreen': (0, 255, 127), + 'springgreen1': (0, 255, 127), + 'springgreen2': (0, 238, 118), + 'springgreen3': (0, 205, 102), + 'springgreen4': (0, 139, 69), + 'steelblue': (70, 130, 180), + 'steelblue1': (99, 184, 255), + 'steelblue2': (92, 172, 238), + 'steelblue3': (79, 148, 205), + 'steelblue4': (54, 100, 139), + 'tan': (210, 180, 140), + 'tan1': (255, 165, 79), + 'tan2': (238, 154, 73), + 'tan3': (205, 133, 63), + 'tan4': (139, 90, 43), + 'thistle': (216, 191, 216), + 'thistle1': (255, 225, 255), + 'thistle2': (238, 210, 238), + 'thistle3': (205, 181, 205), + 'thistle4': (139, 123, 139), + 'tomato': (255, 99, 71), + 'tomato1': (255, 99, 71), + 'tomato2': (238, 92, 66), + 'tomato3': (205, 79, 57), + 'tomato4': (139, 54, 38), + 'transparent': (255, 255, 254), + 'turquoise': (64, 224, 208), + 'turquoise1': (0, 245, 255), + 'turquoise2': (0, 229, 238), + 'turquoise3': (0, 197, 205), + 'turquoise4': (0, 134, 139), + 'violet': (238, 130, 238), + 'violetred': (208, 32, 144), + 'violetred1': (255, 62, 150), + 'violetred2': (238, 58, 140), + 'violetred3': (205, 50, 120), + 'violetred4': (139, 34, 82), + 'wheat': (245, 222, 179), + 'wheat1': (255, 231, 186), + 'wheat2': (238, 216, 174), + 'wheat3': (205, 186, 150), + 'wheat4': (139, 126, 102), + 'white': (255, 255, 255), + 'whitesmoke': (245, 245, 245), + 'yellow': (255, 255, 0), + 'yellow1': (255, 255, 0), + 'yellow2': (238, 238, 0), + 'yellow3': (205, 205, 0), + 'yellow4': (139, 139, 0), + 'yellowgreen': (154, 205, 50), } re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)') re_linewidth=re.compile(r'setlinewidth\((\d+(\.\d*)?|\.\d+)\)') diff -Nru pypy-4.0.1+dfsg/.gitignore pypy-5.0.1+dfsg/.gitignore --- pypy-4.0.1+dfsg/.gitignore 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/.gitignore 2016-03-19 16:40:11.000000000 +0000 @@ -29,4 +29,4 @@ release/ !pypy/tool/release/ rpython/_cache/ -__pycache__/ +.cache/ diff -Nru pypy-4.0.1+dfsg/lib_pypy/cffi/api.py pypy-5.0.1+dfsg/lib_pypy/cffi/api.py --- pypy-4.0.1+dfsg/lib_pypy/cffi/api.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/cffi/api.py 2016-03-19 16:40:12.000000000 +0000 @@ -72,6 +72,9 @@ self._cdefsources = [] self._included_ffis = [] self._windows_unicode = None + self._init_once_cache = {} + self._cdef_version = None + self._embedding = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -99,12 +102,21 @@ If 'packed' is specified as True, all structs declared inside this cdef are packed, i.e. laid out without any field alignment at all. """ + self._cdef(csource, override=override, packed=packed) + + def embedding_api(self, csource, packed=False): + self._cdef(csource, packed=packed, dllexport=True) + if self._embedding is None: + self._embedding = '' + + def _cdef(self, csource, override=False, **options): if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: - self._parser.parse(csource, override=override, packed=packed) + self._cdef_version = object() + self._parser.parse(csource, override=override, **options) self._cdefsources.append(csource) if override: for cache in self._function_caches: @@ -530,6 +542,53 @@ ('_UNICODE', '1')] kwds['define_macros'] = defmacros + def _apply_embedding_fix(self, kwds): + # must include an argument like "-lpython2.7" for the compiler + def ensure(key, value): + lst = kwds.setdefault(key, []) + if value not in lst: + lst.append(value) + # + if '__pypy__' in sys.builtin_module_names: + import os + if sys.platform == "win32": + # we need 'libpypy-c.lib'. Current distributions of + # pypy (>= 4.1) contain it as 'libs/python27.lib'. + pythonlib = "python27" + if hasattr(sys, 'prefix'): + ensure('library_dirs', os.path.join(sys.prefix, 'libs')) + else: + # we need 'libpypy-c.{so,dylib}', which should be by + # default located in 'sys.prefix/bin' for installed + # systems. + pythonlib = "pypy-c" + if hasattr(sys, 'prefix'): + ensure('library_dirs', os.path.join(sys.prefix, 'bin')) + # On uninstalled pypy's, the libpypy-c is typically found in + # .../pypy/goal/. + if hasattr(sys, 'prefix'): + ensure('library_dirs', os.path.join(sys.prefix, 'pypy', 'goal')) + else: + if sys.platform == "win32": + template = "python%d%d" + if hasattr(sys, 'gettotalrefcount'): + template += '_d' + else: + try: + import sysconfig + except ImportError: # 2.6 + from distutils import sysconfig + template = "python%d.%d" + if sysconfig.get_config_var('DEBUG_EXT'): + template += sysconfig.get_config_var('DEBUG_EXT') + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + if hasattr(sys, 'abiflags'): + pythonlib += sys.abiflags + ensure('libraries', pythonlib) + if sys.platform == "win32": + ensure('extra_link_args', '/MANIFEST') + def set_source(self, module_name, source, source_extension='.c', **kwds): if hasattr(self, '_assigned_source'): raise ValueError("set_source() cannot be called several times " @@ -589,14 +648,78 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.'): + def compile(self, tmpdir='.', verbose=0, target=None): + """The 'target' argument gives the final file name of the + compiled DLL. Use '*' to force distutils' choice, suitable for + regular CPython C API modules. Use a file name ending in '.*' + to ask for the system's default extension for dynamic libraries + (.so/.dll/.dylib). + + The default is '*' when building a non-embedded C API extension, + and (module_name + '.*') when building an embedded library. + """ from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before compile()") module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, - source_extension=source_extension, **kwds) + target=target, source_extension=source_extension, + compiler_verbose=verbose, **kwds) + + def init_once(self, func, tag): + # Read _init_once_cache[tag], which is either (False, lock) if + # we're calling the function now in some thread, or (True, result). + # Don't call setdefault() in most cases, to avoid allocating and + # immediately freeing a lock; but still use setdefaut() to avoid + # races. + try: + x = self._init_once_cache[tag] + except KeyError: + x = self._init_once_cache.setdefault(tag, (False, allocate_lock())) + # Common case: we got (True, result), so we return the result. + if x[0]: + return x[1] + # Else, it's a lock. Acquire it to serialize the following tests. + with x[1]: + # Read again from _init_once_cache the current status. + x = self._init_once_cache[tag] + if x[0]: + return x[1] + # Call the function and store the result back. + result = func() + self._init_once_cache[tag] = (True, result) + return result + + def embedding_init_code(self, pysource): + if self._embedding: + raise ValueError("embedding_init_code() can only be called once") + # fix 'pysource' before it gets dumped into the C file: + # - remove empty lines at the beginning, so it starts at "line 1" + # - dedent, if all non-empty lines are indented + # - check for SyntaxErrors + import re + match = re.match(r'\s*\n', pysource) + if match: + pysource = pysource[match.end():] + lines = pysource.splitlines() or [''] + prefix = re.match(r'\s*', lines[0]).group() + for i in range(1, len(lines)): + line = lines[i] + if line.rstrip(): + while not line.startswith(prefix): + prefix = prefix[:-1] + i = len(prefix) + lines = [line[i:]+'\n' for line in lines] + pysource = ''.join(lines) + # + compile(pysource, "cffi_init", "exec") + # + self._embedding = pysource + + def def_extern(self, *args, **kwds): + raise ValueError("ffi.def_extern() is only available on API-mode FFI " + "objects") def _load_backend_lib(backend, name, flags): @@ -620,70 +743,70 @@ import os backend = ffi._backend backendlib = _load_backend_lib(backend, libname, flags) - copied_enums = [] # - def make_accessor_locked(name): + def accessor_function(name): key = 'function ' + name - if key in ffi._parser._declarations: - tp, _ = ffi._parser._declarations[key] - BType = ffi._get_cached_btype(tp) - try: - value = backendlib.load_function(BType, name) - except KeyError as e: - raise AttributeError('%s: %s' % (name, e)) - library.__dict__[name] = value - return - # + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + try: + value = backendlib.load_function(BType, name) + except KeyError as e: + raise AttributeError('%s: %s' % (name, e)) + library.__dict__[name] = value + # + def accessor_variable(name): key = 'variable ' + name - if key in ffi._parser._declarations: - tp, _ = ffi._parser._declarations[key] - BType = ffi._get_cached_btype(tp) - read_variable = backendlib.read_variable - write_variable = backendlib.write_variable - setattr(FFILibrary, name, property( - lambda self: read_variable(BType, name), - lambda self, value: write_variable(BType, name, value))) + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + read_variable = backendlib.read_variable + write_variable = backendlib.write_variable + setattr(FFILibrary, name, property( + lambda self: read_variable(BType, name), + lambda self, value: write_variable(BType, name, value))) + # + def accessor_constant(name): + raise NotImplementedError("non-integer constant '%s' cannot be " + "accessed from a dlopen() library" % (name,)) + # + def accessor_int_constant(name): + library.__dict__[name] = ffi._parser._int_constants[name] + # + accessors = {} + accessors_version = [False] + # + def update_accessors(): + if accessors_version[0] is ffi._cdef_version: return # - if not copied_enums: - from . import model - error = None - for key, (tp, _) in ffi._parser._declarations.items(): - if not isinstance(tp, model.EnumType): - continue - try: - tp.check_not_partial() - except Exception as e: - error = e - continue - for enumname, enumval in zip(tp.enumerators, tp.enumvalues): - if enumname not in library.__dict__: - library.__dict__[enumname] = enumval - if error is not None: - if name in library.__dict__: - return # ignore error, about a different enum - raise error - - for key, val in ffi._parser._int_constants.items(): - if key not in library.__dict__: - library.__dict__[key] = val - - copied_enums.append(True) - if name in library.__dict__: - return - # - key = 'constant ' + name - if key in ffi._parser._declarations: - raise NotImplementedError("fetching a non-integer constant " - "after dlopen()") - # - raise AttributeError(name) + from . import model + for key, (tp, _) in ffi._parser._declarations.items(): + if not isinstance(tp, model.EnumType): + tag, name = key.split(' ', 1) + if tag == 'function': + accessors[name] = accessor_function + elif tag == 'variable': + accessors[name] = accessor_variable + elif tag == 'constant': + accessors[name] = accessor_constant + else: + for i, enumname in enumerate(tp.enumerators): + def accessor_enum(name, tp=tp, i=i): + tp.check_not_partial() + library.__dict__[name] = tp.enumvalues[i] + accessors[enumname] = accessor_enum + for name in ffi._parser._int_constants: + accessors.setdefault(name, accessor_int_constant) + accessors_version[0] = ffi._cdef_version # def make_accessor(name): with ffi._lock: if name in library.__dict__ or name in FFILibrary.__dict__: return # added by another thread while waiting for the lock - make_accessor_locked(name) + if name not in accessors: + update_accessors() + if name not in accessors: + raise AttributeError(name) + accessors[name](name) # class FFILibrary(object): def __getattr__(self, name): @@ -697,6 +820,10 @@ setattr(self, name, value) else: property.__set__(self, value) + def __dir__(self): + with ffi._lock: + update_accessors() + return accessors.keys() # if libname is not None: try: diff -Nru pypy-4.0.1+dfsg/lib_pypy/cffi/_cffi_include.h pypy-5.0.1+dfsg/lib_pypy/cffi/_cffi_include.h --- pypy-4.0.1+dfsg/lib_pypy/cffi/_cffi_include.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/cffi/_cffi_include.h 2016-03-19 16:40:12.000000000 +0000 @@ -146,7 +146,10 @@ ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) #define _cffi_convert_array_from_object \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) -#define _CFFI_NUM_EXPORTS 25 +#define _CFFI_CPIDX 25 +#define _cffi_call_python \ + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX]) +#define _CFFI_NUM_EXPORTS 26 typedef struct _ctypedescr CTypeDescrObject; @@ -201,8 +204,12 @@ the others follow */ } -#endif /********** end CPython-specific section **********/ +#else +_CFFI_UNUSED_FN +static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *); +# define _cffi_call_python _cffi_call_python_org +#endif #define _cffi_array_len(array) (sizeof(array) / sizeof((array)[0])) @@ -224,6 +231,12 @@ ((got_nonpos) == (expected <= 0) && \ (got) == (unsigned long long)expected) +#ifdef MS_WIN32 +# define _cffi_stdcall __stdcall +#else +# define _cffi_stdcall /* nothing */ +#endif + #ifdef __cplusplus } #endif diff -Nru pypy-4.0.1+dfsg/lib_pypy/cffi/cffi_opcode.py pypy-5.0.1+dfsg/lib_pypy/cffi/cffi_opcode.py --- pypy-4.0.1+dfsg/lib_pypy/cffi/cffi_opcode.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/cffi/cffi_opcode.py 2016-03-19 16:40:12.000000000 +0000 @@ -54,6 +54,7 @@ OP_DLOPEN_FUNC = 35 OP_DLOPEN_CONST = 37 OP_GLOBAL_VAR_F = 39 +OP_EXTERN_PYTHON = 41 PRIM_VOID = 0 PRIM_BOOL = 1 diff -Nru pypy-4.0.1+dfsg/lib_pypy/cffi/cparser.py pypy-5.0.1+dfsg/lib_pypy/cffi/cparser.py --- pypy-4.0.1+dfsg/lib_pypy/cffi/cparser.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/cffi/cparser.py 2016-03-19 16:40:12.000000000 +0000 @@ -29,6 +29,7 @@ _r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") _r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") _r_cdecl = re.compile(r"\b__cdecl\b") +_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.') _r_star_const_space = re.compile( # matches "* const " r"[*]\s*((const|volatile|restrict)\b\s*)+") @@ -80,6 +81,47 @@ parts.append(csource) return ''.join(parts) +def _preprocess_extern_python(csource): + # input: `extern "Python" int foo(int);` or + # `extern "Python" { int foo(int); }` + # output: + # void __cffi_extern_python_start; + # int foo(int); + # void __cffi_extern_python_stop; + parts = [] + while True: + match = _r_extern_python.search(csource) + if not match: + break + endpos = match.end() - 1 + #print + #print ''.join(parts)+csource + #print '=>' + parts.append(csource[:match.start()]) + parts.append('void __cffi_extern_python_start; ') + if csource[endpos] == '{': + # grouping variant + closing = csource.find('}', endpos) + if closing < 0: + raise api.CDefError("'extern \"Python\" {': no '}' found") + if csource.find('{', endpos + 1, closing) >= 0: + raise NotImplementedError("cannot use { } inside a block " + "'extern \"Python\" { ... }'") + parts.append(csource[endpos+1:closing]) + csource = csource[closing+1:] + else: + # non-grouping variant + semicolon = csource.find(';', endpos) + if semicolon < 0: + raise api.CDefError("'extern \"Python\": no ';' found") + parts.append(csource[endpos:semicolon+1]) + csource = csource[semicolon+1:] + parts.append(' void __cffi_extern_python_stop;') + #print ''.join(parts)+csource + #print + parts.append(csource) + return ''.join(parts) + def _preprocess(csource): # Remove comments. NOTE: this only work because the cdef() section # should not contain any string literal! @@ -103,8 +145,13 @@ csource = _r_stdcall2.sub(' volatile volatile const(', csource) csource = _r_stdcall1.sub(' volatile volatile const ', csource) csource = _r_cdecl.sub(' ', csource) + # + # Replace `extern "Python"` with start/end markers + csource = _preprocess_extern_python(csource) + # # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) + # # Replace "...}" with "__dotdotdotNUM__}". This construction should # occur only at the end of enums; at the end of structs we have "...;}" # and at the end of vararg functions "...);". Also replace "=...[,}]" @@ -173,8 +220,7 @@ self._included_declarations = set() self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() - self._override = False - self._packed = False + self._options = {} self._int_constants = {} self._recomplete = [] self._uses_new_feature = None @@ -234,16 +280,15 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False, packed=False): - prev_override = self._override - prev_packed = self._packed + def parse(self, csource, override=False, packed=False, dllexport=False): + prev_options = self._options try: - self._override = override - self._packed = packed + self._options = {'override': override, + 'packed': packed, + 'dllexport': dllexport} self._internal_parse(csource) finally: - self._override = prev_override - self._packed = prev_packed + self._options = prev_options def _internal_parse(self, csource): ast, macros, csource = self._parse(csource) @@ -257,6 +302,7 @@ break # try: + self._inside_extern_python = False for decl in iterator: if isinstance(decl, pycparser.c_ast.Decl): self._parse_decl(decl) @@ -326,13 +372,22 @@ ' #define %s %s' % (key, key, key, value)) + def _declare_function(self, tp, quals, decl): + tp = self._get_type_pointer(tp, quals) + if self._options.get('dllexport'): + tag = 'dllexport_python ' + elif self._inside_extern_python: + tag = 'extern_python ' + else: + tag = 'function ' + self._declare(tag + decl.name, tp) + def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): tp, quals = self._get_type_and_quals(node, name=decl.name) assert isinstance(tp, model.RawFunctionType) - tp = self._get_type_pointer(tp, quals) - self._declare('function ' + decl.name, tp) + self._declare_function(tp, quals, decl) else: if isinstance(node, pycparser.c_ast.Struct): self._get_struct_union_enum_type('struct', node) @@ -348,8 +403,7 @@ tp, quals = self._get_type_and_quals(node, partial_length_ok=True) if tp.is_raw_function: - tp = self._get_type_pointer(tp, quals) - self._declare('function ' + decl.name, tp) + self._declare_function(tp, quals, decl) elif (tp.is_integer_type() and hasattr(decl, 'init') and hasattr(decl.init, 'value') and @@ -362,10 +416,23 @@ _r_int_literal.match(decl.init.expr.value)): self._add_integer_constant(decl.name, '-' + decl.init.expr.value) - elif (quals & model.Q_CONST) and not tp.is_array_type: - self._declare('constant ' + decl.name, tp, quals=quals) + elif (tp is model.void_type and + decl.name.startswith('__cffi_extern_python_')): + # hack: `extern "Python"` in the C source is replaced + # with "void __cffi_extern_python_start;" and + # "void __cffi_extern_python_stop;" + self._inside_extern_python = not self._inside_extern_python + assert self._inside_extern_python == ( + decl.name == '__cffi_extern_python_start') else: - self._declare('variable ' + decl.name, tp, quals=quals) + if self._inside_extern_python: + raise api.CDefError( + "cannot declare constants or " + "variables with 'extern \"Python\"'") + if (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) + else: + self._declare('variable ' + decl.name, tp, quals=quals) def parse_type(self, cdecl): return self.parse_type_and_quals(cdecl)[0] @@ -383,7 +450,7 @@ prevobj, prevquals = self._declarations[name] if prevobj is obj and prevquals == quals: return - if not self._override: + if not self._options.get('override'): raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) @@ -662,7 +729,7 @@ if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) - tp.packed = self._packed + tp.packed = self._options.get('packed') if tp.completed: # must be re-completed: it is not opaque any more tp.completed = 0 self._recomplete.append(tp) diff -Nru pypy-4.0.1+dfsg/lib_pypy/cffi/_embedding.h pypy-5.0.1+dfsg/lib_pypy/cffi/_embedding.h --- pypy-4.0.1+dfsg/lib_pypy/cffi/_embedding.h 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/cffi/_embedding.h 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,517 @@ + +/***** Support code for embedding *****/ + +#if defined(_MSC_VER) +# define CFFI_DLLEXPORT __declspec(dllexport) +#elif defined(__GNUC__) +# define CFFI_DLLEXPORT __attribute__((visibility("default"))) +#else +# define CFFI_DLLEXPORT /* nothing */ +#endif + + +/* There are two global variables of type _cffi_call_python_fnptr: + + * _cffi_call_python, which we declare just below, is the one called + by ``extern "Python"`` implementations. + + * _cffi_call_python_org, which on CPython is actually part of the + _cffi_exports[] array, is the function pointer copied from + _cffi_backend. + + After initialization is complete, both are equal. However, the + first one remains equal to &_cffi_start_and_call_python until the + very end of initialization, when we are (or should be) sure that + concurrent threads also see a completely initialized world, and + only then is it changed. +*/ +#undef _cffi_call_python +typedef void (*_cffi_call_python_fnptr)(struct _cffi_externpy_s *, char *); +static void _cffi_start_and_call_python(struct _cffi_externpy_s *, char *); +static _cffi_call_python_fnptr _cffi_call_python = &_cffi_start_and_call_python; + + +#ifndef _MSC_VER + /* --- Assuming a GCC not infinitely old --- */ +# define cffi_compare_and_swap(l,o,n) __sync_bool_compare_and_swap(l,o,n) +# define cffi_write_barrier() __sync_synchronize() +# if !defined(__amd64__) && !defined(__x86_64__) && \ + !defined(__i386__) && !defined(__i386) +# define cffi_read_barrier() __sync_synchronize() +# else +# define cffi_read_barrier() (void)0 +# endif +#else + /* --- Windows threads version --- */ +# include +# define cffi_compare_and_swap(l,o,n) \ + (InterlockedCompareExchangePointer(l,n,o) == (o)) +# define cffi_write_barrier() InterlockedCompareExchange(&_cffi_dummy,0,0) +# define cffi_read_barrier() (void)0 +static volatile LONG _cffi_dummy; +#endif + +#ifdef WITH_THREAD +# ifndef _MSC_VER +# include + static pthread_mutex_t _cffi_embed_startup_lock; +# else + static CRITICAL_SECTION _cffi_embed_startup_lock; +# endif + static char _cffi_embed_startup_lock_ready = 0; +#endif + +static void _cffi_acquire_reentrant_mutex(void) +{ + static void *volatile lock = NULL; + + while (!cffi_compare_and_swap(&lock, NULL, (void *)1)) { + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: pthread_mutex_init() should be very fast, and + this is only run at start-up anyway. */ + } + +#ifdef WITH_THREAD + if (!_cffi_embed_startup_lock_ready) { +# ifndef _MSC_VER + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&_cffi_embed_startup_lock, &attr); +# else + InitializeCriticalSection(&_cffi_embed_startup_lock); +# endif + _cffi_embed_startup_lock_ready = 1; + } +#endif + + while (!cffi_compare_and_swap(&lock, (void *)1, NULL)) + ; + +#ifndef _MSC_VER + pthread_mutex_lock(&_cffi_embed_startup_lock); +#else + EnterCriticalSection(&_cffi_embed_startup_lock); +#endif +} + +static void _cffi_release_reentrant_mutex(void) +{ +#ifndef _MSC_VER + pthread_mutex_unlock(&_cffi_embed_startup_lock); +#else + LeaveCriticalSection(&_cffi_embed_startup_lock); +#endif +} + + +/********** CPython-specific section **********/ +#ifndef PYPY_VERSION + + +#define _cffi_call_python_org _cffi_exports[_CFFI_CPIDX] + +PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(void); /* forward */ + +static void _cffi_py_initialize(void) +{ + /* XXX use initsigs=0, which "skips initialization registration of + signal handlers, which might be useful when Python is + embedded" according to the Python docs. But review and think + if it should be a user-controllable setting. + + XXX we should also give a way to write errors to a buffer + instead of to stderr. + + XXX if importing 'site' fails, CPython (any version) calls + exit(). Should we try to work around this behavior here? + */ + Py_InitializeEx(0); +} + +static int _cffi_initialize_python(void) +{ + /* This initializes Python, imports _cffi_backend, and then the + present .dll/.so is set up as a CPython C extension module. + */ + int result; + PyGILState_STATE state; + PyObject *pycode=NULL, *global_dict=NULL, *x; + +#if PY_MAJOR_VERSION >= 3 + /* see comments in _cffi_carefully_make_gil() about the + Python2/Python3 difference + */ +#else + /* Acquire the GIL. We have no threadstate here. If Python is + already initialized, it is possible that there is already one + existing for this thread, but it is not made current now. + */ + PyEval_AcquireLock(); + + _cffi_py_initialize(); + + /* The Py_InitializeEx() sometimes made a threadstate for us, but + not always. Indeed Py_InitializeEx() could be called and do + nothing. So do we have a threadstate, or not? We don't know, + but we can replace it with NULL in all cases. + */ + (void)PyThreadState_Swap(NULL); + + /* Now we can release the GIL and re-acquire immediately using the + logic of PyGILState(), which handles making or installing the + correct threadstate. + */ + PyEval_ReleaseLock(); +#endif + state = PyGILState_Ensure(); + + /* Call the initxxx() function from the present module. It will + create and initialize us as a CPython extension module, instead + of letting the startup Python code do it---it might reimport + the same .dll/.so and get maybe confused on some platforms. + It might also have troubles locating the .dll/.so again for all + I know. + */ + (void)_CFFI_PYTHON_STARTUP_FUNC(); + if (PyErr_Occurred()) + goto error; + + /* Now run the Python code provided to ffi.embedding_init_code(). + */ + pycode = Py_CompileString(_CFFI_PYTHON_STARTUP_CODE, + "", + Py_file_input); + if (pycode == NULL) + goto error; + global_dict = PyDict_New(); + if (global_dict == NULL) + goto error; + if (PyDict_SetItemString(global_dict, "__builtins__", + PyThreadState_GET()->interp->builtins) < 0) + goto error; + x = PyEval_EvalCode( +#if PY_MAJOR_VERSION < 3 + (PyCodeObject *) +#endif + pycode, global_dict, global_dict); + if (x == NULL) + goto error; + Py_DECREF(x); + + /* Done! Now if we've been called from + _cffi_start_and_call_python() in an ``extern "Python"``, we can + only hope that the Python code did correctly set up the + corresponding @ffi.def_extern() function. Otherwise, the + general logic of ``extern "Python"`` functions (inside the + _cffi_backend module) will find that the reference is still + missing and print an error. + */ + result = 0; + done: + Py_XDECREF(pycode); + Py_XDECREF(global_dict); + PyGILState_Release(state); + return result; + + error:; + { + /* Print as much information as potentially useful. + Debugging load-time failures with embedding is not fun + */ + PyObject *exception, *v, *tb, *f, *modules, *mod; + PyErr_Fetch(&exception, &v, &tb); + if (exception != NULL) { + PyErr_NormalizeException(&exception, &v, &tb); + PyErr_Display(exception, v, tb); + } + Py_XDECREF(exception); + Py_XDECREF(v); + Py_XDECREF(tb); + + f = PySys_GetObject((char *)"stderr"); + if (f != NULL && f != Py_None) { + PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME + "\ncompiled with cffi version: 1.5.2" + "\n_cffi_backend module: ", f); + modules = PyImport_GetModuleDict(); + mod = PyDict_GetItemString(modules, "_cffi_backend"); + if (mod == NULL) { + PyFile_WriteString("not loaded", f); + } + else { + v = PyObject_GetAttrString(mod, "__file__"); + PyFile_WriteObject(v, f, 0); + Py_XDECREF(v); + } + PyFile_WriteString("\nsys.path: ", f); + PyFile_WriteObject(PySys_GetObject((char *)"path"), f, 0); + PyFile_WriteString("\n\n", f); + } + } + result = -1; + goto done; +} + +PyAPI_DATA(char *) _PyParser_TokenNames[]; /* from CPython */ + +static int _cffi_carefully_make_gil(void) +{ + /* This does the basic initialization of Python. It can be called + completely concurrently from unrelated threads. It assumes + that we don't hold the GIL before (if it exists), and we don't + hold it afterwards. + + What it really does is completely different in Python 2 and + Python 3. + + Python 2 + ======== + + Initialize the GIL, without initializing the rest of Python, + by calling PyEval_InitThreads(). + + PyEval_InitThreads() must not be called concurrently at all. + So we use a global variable as a simple spin lock. This global + variable must be from 'libpythonX.Y.so', not from this + cffi-based extension module, because it must be shared from + different cffi-based extension modules. We choose + _PyParser_TokenNames[0] as a completely arbitrary pointer value + that is never written to. The default is to point to the + string "ENDMARKER". We change it temporarily to point to the + next character in that string. (Yes, I know it's REALLY + obscure.) + + Python 3 + ======== + + In Python 3, PyEval_InitThreads() cannot be called before + Py_InitializeEx() any more. So this function calls + Py_InitializeEx() first. It uses the same obscure logic to + make sure we never call it concurrently. + + Arguably, this is less good on the spinlock, because + Py_InitializeEx() takes much longer to run than + PyEval_InitThreads(). But I didn't find a way around it. + */ + +#ifdef WITH_THREAD + char *volatile *lock = (char *volatile *)_PyParser_TokenNames; + char *old_value; + + while (1) { /* spin loop */ + old_value = *lock; + if (old_value[0] == 'E') { + assert(old_value[1] == 'N'); + if (cffi_compare_and_swap(lock, old_value, old_value + 1)) + break; + } + else { + assert(old_value[0] == 'N'); + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: PyEval_InitThreads() should be very fast, and + this is only run at start-up anyway. */ + } + } +#endif + +#if PY_MAJOR_VERSION >= 3 + /* Python 3: call Py_InitializeEx() */ + { + PyGILState_STATE state = PyGILState_UNLOCKED; + if (!Py_IsInitialized()) + _cffi_py_initialize(); + else + state = PyGILState_Ensure(); + + PyEval_InitThreads(); + PyGILState_Release(state); + } +#else + /* Python 2: call PyEval_InitThreads() */ +# ifdef WITH_THREAD + if (!PyEval_ThreadsInitialized()) { + PyEval_InitThreads(); /* makes the GIL */ + PyEval_ReleaseLock(); /* then release it */ + } + /* else: there is already a GIL, but we still needed to do the + spinlock dance to make sure that we see it as fully ready */ +# endif +#endif + +#ifdef WITH_THREAD + /* release the lock */ + while (!cffi_compare_and_swap(lock, old_value + 1, old_value)) + ; +#endif + + return 0; +} + +/********** end CPython-specific section **********/ + + +#else + + +/********** PyPy-specific section **********/ + +PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(const void *[]); /* forward */ + +static struct _cffi_pypy_init_s { + const char *name; + void (*func)(const void *[]); + const char *code; +} _cffi_pypy_init = { + _CFFI_MODULE_NAME, + _CFFI_PYTHON_STARTUP_FUNC, + _CFFI_PYTHON_STARTUP_CODE, +}; + +extern int pypy_carefully_make_gil(const char *); +extern int pypy_init_embedded_cffi_module(int, struct _cffi_pypy_init_s *); + +static int _cffi_carefully_make_gil(void) +{ + return pypy_carefully_make_gil(_CFFI_MODULE_NAME); +} + +static int _cffi_initialize_python(void) +{ + return pypy_init_embedded_cffi_module(0xB011, &_cffi_pypy_init); +} + +/********** end PyPy-specific section **********/ + + +#endif + + +#ifdef __GNUC__ +__attribute__((noinline)) +#endif +static _cffi_call_python_fnptr _cffi_start_python(void) +{ + /* Delicate logic to initialize Python. This function can be + called multiple times concurrently, e.g. when the process calls + its first ``extern "Python"`` functions in multiple threads at + once. It can also be called recursively, in which case we must + ignore it. We also have to consider what occurs if several + different cffi-based extensions reach this code in parallel + threads---it is a different copy of the code, then, and we + can't have any shared global variable unless it comes from + 'libpythonX.Y.so'. + + Idea: + + * _cffi_carefully_make_gil(): "carefully" call + PyEval_InitThreads() (possibly with Py_InitializeEx() first). + + * then we use a (local) custom lock to make sure that a call to this + cffi-based extension will wait if another call to the *same* + extension is running the initialization in another thread. + It is reentrant, so that a recursive call will not block, but + only one from a different thread. + + * then we grab the GIL and (Python 2) we call Py_InitializeEx(). + At this point, concurrent calls to Py_InitializeEx() are not + possible: we have the GIL. + + * do the rest of the specific initialization, which may + temporarily release the GIL but not the custom lock. + Only release the custom lock when we are done. + */ + static char called = 0; + + if (_cffi_carefully_make_gil() != 0) + return NULL; + + _cffi_acquire_reentrant_mutex(); + + /* Here the GIL exists, but we don't have it. We're only protected + from concurrency by the reentrant mutex. */ + + /* This file only initializes the embedded module once, the first + time this is called, even if there are subinterpreters. */ + if (!called) { + called = 1; /* invoke _cffi_initialize_python() only once, + but don't set '_cffi_call_python' right now, + otherwise concurrent threads won't call + this function at all (we need them to wait) */ + if (_cffi_initialize_python() == 0) { + /* now initialization is finished. Switch to the fast-path. */ + + /* We would like nobody to see the new value of + '_cffi_call_python' without also seeing the rest of the + data initialized. However, this is not possible. But + the new value of '_cffi_call_python' is the function + 'cffi_call_python()' from _cffi_backend. So: */ + cffi_write_barrier(); + /* ^^^ we put a write barrier here, and a corresponding + read barrier at the start of cffi_call_python(). This + ensures that after that read barrier, we see everything + done here before the write barrier. + */ + + assert(_cffi_call_python_org != NULL); + _cffi_call_python = (_cffi_call_python_fnptr)_cffi_call_python_org; + } + else { + /* initialization failed. Reset this to NULL, even if it was + already set to some other value. Future calls to + _cffi_start_python() are still forced to occur, and will + always return NULL from now on. */ + _cffi_call_python_org = NULL; + } + } + + _cffi_release_reentrant_mutex(); + + return (_cffi_call_python_fnptr)_cffi_call_python_org; +} + +static +void _cffi_start_and_call_python(struct _cffi_externpy_s *externpy, char *args) +{ + _cffi_call_python_fnptr fnptr; + int current_err = errno; +#ifdef _MSC_VER + int current_lasterr = GetLastError(); +#endif + fnptr = _cffi_start_python(); + if (fnptr == NULL) { + fprintf(stderr, "function %s() called, but initialization code " + "failed. Returning 0.\n", externpy->name); + memset(args, 0, externpy->size_of_result); + } +#ifdef _MSC_VER + SetLastError(current_lasterr); +#endif + errno = current_err; + + if (fnptr != NULL) + fnptr(externpy, args); +} + + +/* The cffi_start_python() function makes sure Python is initialized + and our cffi module is set up. It can be called manually from the + user C code. The same effect is obtained automatically from any + dll-exported ``extern "Python"`` function. This function returns + -1 if initialization failed, 0 if all is OK. */ +_CFFI_UNUSED_FN +static int cffi_start_python(void) +{ + if (_cffi_call_python == &_cffi_start_and_call_python) { + if (_cffi_start_python() == NULL) + return -1; + } + cffi_read_barrier(); + return 0; +} + +#undef cffi_compare_and_swap +#undef cffi_write_barrier +#undef cffi_read_barrier diff -Nru pypy-4.0.1+dfsg/lib_pypy/cffi/ffiplatform.py pypy-5.0.1+dfsg/lib_pypy/cffi/ffiplatform.py --- pypy-4.0.1+dfsg/lib_pypy/cffi/ffiplatform.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/cffi/ffiplatform.py 2016-03-19 16:40:12.000000000 +0000 @@ -17,15 +17,16 @@ def get_extension(srcfilename, modname, sources=(), **kwds): from distutils.core import Extension allsources = [srcfilename] - allsources.extend(sources) + for src in sources: + allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext): +def compile(tmpdir, ext, compiler_verbose=0): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext) + outputfilename = _build(tmpdir, ext, compiler_verbose) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -35,10 +36,10 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext): +def _build(tmpdir, ext, compiler_verbose=0): # XXX compact but horrible :-( from distutils.core import Distribution - import distutils.errors + import distutils.errors, distutils.log # dist = Distribution({'ext_modules': [ext]}) dist.parse_config_files() @@ -48,13 +49,18 @@ options['build_temp'] = ('ffiplatform', tmpdir) # try: - dist.run_command('build_ext') + old_level = distutils.log.set_threshold(0) or 0 + try: + distutils.log.set_verbosity(compiler_verbose) + dist.run_command('build_ext') + cmd_obj = dist.get_command_obj('build_ext') + [soname] = cmd_obj.get_outputs() + finally: + distutils.log.set_threshold(old_level) except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) # - cmd_obj = dist.get_command_obj('build_ext') - [soname] = cmd_obj.get_outputs() return soname try: diff -Nru pypy-4.0.1+dfsg/lib_pypy/cffi/__init__.py pypy-5.0.1+dfsg/lib_pypy/cffi/__init__.py --- pypy-4.0.1+dfsg/lib_pypy/cffi/__init__.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/cffi/__init__.py 2016-03-19 16:40:12.000000000 +0000 @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.3.1" -__version_info__ = (1, 3, 1) +__version__ = "1.5.2" +__version_info__ = (1, 5, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff -Nru pypy-4.0.1+dfsg/lib_pypy/cffi/parse_c_type.h pypy-5.0.1+dfsg/lib_pypy/cffi/parse_c_type.h --- pypy-4.0.1+dfsg/lib_pypy/cffi/parse_c_type.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/cffi/parse_c_type.h 2016-03-19 16:40:12.000000000 +0000 @@ -1,5 +1,6 @@ -/* See doc/misc/parse_c_type.rst in the source of CFFI for more information */ +/* This part is from file 'cffi/parse_c_type.h'. It is copied at the + beginning of C sources generated by CFFI's ffi.set_source(). */ typedef void *_cffi_opcode_t; @@ -27,6 +28,7 @@ #define _CFFI_OP_DLOPEN_FUNC 35 #define _CFFI_OP_DLOPEN_CONST 37 #define _CFFI_OP_GLOBAL_VAR_F 39 +#define _CFFI_OP_EXTERN_PYTHON 41 #define _CFFI_PRIM_VOID 0 #define _CFFI_PRIM_BOOL 1 @@ -160,6 +162,12 @@ const char *error_message; }; +struct _cffi_externpy_s { + const char *name; + size_t size_of_result; + void *reserved1, *reserved2; +}; + #ifdef _CFFI_INTERNAL static int parse_c_type(struct _cffi_parse_info_s *info, const char *input); static int search_in_globals(const struct _cffi_type_context_s *ctx, diff -Nru pypy-4.0.1+dfsg/lib_pypy/cffi/recompiler.py pypy-5.0.1+dfsg/lib_pypy/cffi/recompiler.py --- pypy-4.0.1+dfsg/lib_pypy/cffi/recompiler.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/cffi/recompiler.py 2016-03-19 16:40:12.000000000 +0000 @@ -3,6 +3,7 @@ from .cffi_opcode import * VERSION = "0x2601" +VERSION_EMBEDDED = "0x2701" class GlobalExpr: @@ -118,6 +119,7 @@ class Recompiler: + _num_externpy = 0 def __init__(self, ffi, module_name, target_is_python=False): self.ffi = ffi @@ -280,6 +282,29 @@ lines[i:i+1] = self._rel_readlines('parse_c_type.h') prnt(''.join(lines)) # + # if we have ffi._embedding != None, we give it here as a macro + # and include an extra file + base_module_name = self.module_name.split('.')[-1] + if self.ffi._embedding is not None: + prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) + prnt('#define _CFFI_PYTHON_STARTUP_CODE %s' % + (self._string_literal(self.ffi._embedding),)) + prnt('#ifdef PYPY_VERSION') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( + base_module_name,)) + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % ( + base_module_name,)) + prnt('#else') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % ( + base_module_name,)) + prnt('#endif') + lines = self._rel_readlines('_embedding.h') + prnt(''.join(lines)) + version = VERSION_EMBEDDED + else: + version = VERSION + # # then paste the C source given by the user, verbatim. prnt('/************************************************************/') prnt() @@ -356,17 +381,24 @@ else: prnt(' NULL, /* no includes */') prnt(' %d, /* num_types */' % (len(self.cffi_types),)) - prnt(' 0, /* flags */') + flags = 0 + if self._num_externpy: + flags |= 1 # set to mean that we use extern "Python" + prnt(' %d, /* flags */' % flags) prnt('};') prnt() # # the init function - base_module_name = self.module_name.split('.')[-1] prnt('#ifdef PYPY_VERSION') prnt('PyMODINIT_FUNC') prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) prnt('{') - prnt(' p[0] = (const void *)%s;' % VERSION) + if self._num_externpy: + prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') + prnt(' _cffi_call_python_org = ' + '(void(*)(struct _cffi_externpy_s *, char *))p[1];') + prnt(' }') + prnt(' p[0] = (const void *)%s;' % version) prnt(' p[1] = &_cffi_type_context;') prnt('}') # on Windows, distutils insists on putting init_cffi_xyz in @@ -385,14 +417,14 @@ prnt('PyInit_%s(void)' % (base_module_name,)) prnt('{') prnt(' return _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#else') prnt('PyMODINIT_FUNC') prnt('init%s(void)' % (base_module_name,)) prnt('{') prnt(' _cffi_init("%s", %s, &_cffi_type_context);' % ( - self.module_name, VERSION)) + self.module_name, version)) prnt('}') prnt('#endif') @@ -1108,6 +1140,102 @@ GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index))) # ---------- + # extern "Python" + + def _generate_cpy_extern_python_collecttype(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + self._do_collect_type(tp) + + def _generate_cpy_dllexport_python_collecttype(self, tp, name): + self._generate_cpy_extern_python_collecttype(tp, name) + + def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False): + prnt = self._prnt + if isinstance(tp.result, model.VoidType): + size_of_result = '0' + else: + context = 'result of %s' % name + size_of_result = '(int)sizeof(%s)' % ( + tp.result.get_c_name('', context),) + prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name) + prnt(' { "%s", %s };' % (name, size_of_result)) + prnt() + # + arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arg = type.get_c_name(' a%d' % i, context) + arguments.append(arg) + # + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + name_and_arguments = '%s(%s)' % (name, repr_arguments) + if tp.abi == "__stdcall": + name_and_arguments = '_cffi_stdcall ' + name_and_arguments + # + def may_need_128_bits(tp): + return (isinstance(tp, model.PrimitiveType) and + tp.name == 'long double') + # + size_of_a = max(len(tp.args)*8, 8) + if may_need_128_bits(tp.result): + size_of_a = max(size_of_a, 16) + if isinstance(tp.result, model.StructOrUnion): + size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( + tp.result.get_c_name(''), size_of_a, + tp.result.get_c_name(''), size_of_a) + if dllexport: + tag = 'CFFI_DLLEXPORT' + else: + tag = 'static' + prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments))) + prnt('{') + prnt(' char a[%s];' % size_of_a) + prnt(' char *p = a;') + for i, type in enumerate(tp.args): + arg = 'a%d' % i + if (isinstance(type, model.StructOrUnion) or + may_need_128_bits(type)): + arg = '&' + arg + type = model.PointerType(type) + prnt(' *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg)) + prnt(' _cffi_call_python(&_cffi_externpy__%s, p);' % name) + if not isinstance(tp.result, model.VoidType): + prnt(' return *(%s)p;' % (tp.result.get_c_name('*'),)) + prnt('}') + prnt() + self._num_externpy += 1 + + def _generate_cpy_dllexport_python_decl(self, tp, name): + self._generate_cpy_extern_python_decl(tp, name, dllexport=True) + + def _generate_cpy_extern_python_ctx(self, tp, name): + if self.target_is_python: + raise ffiplatform.VerificationError( + "cannot use 'extern \"Python\"' in the ABI mode") + if tp.ellipsis: + raise NotImplementedError("a vararg function is extern \"Python\"") + type_index = self._typesdict[tp] + type_op = CffiOp(OP_EXTERN_PYTHON, type_index) + self._lsts["global"].append( + GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) + + def _generate_cpy_dllexport_python_ctx(self, tp, name): + self._generate_cpy_extern_python_ctx(tp, name) + + def _string_literal(self, s): + def _char_repr(c): + # escape with a '\' the characters '\', '"' or (for trigraphs) '?' + if c in '\\"?': return '\\' + c + if ' ' <= c < '\x7F': return c + if c == '\n': return '\\n' + return '\\%03o' % ord(c) + lines = [] + for line in s.splitlines(True): + lines.append('"%s"' % ''.join([_char_repr(c) for c in line])) + return ' \\\n'.join(lines) + + # ---------- # emitting the opcodes for individual types def _emit_bytecode_VoidType(self, tp, index): @@ -1231,13 +1359,69 @@ parts[-1] += extension return os.path.join(outputdir, *parts), parts + +# Aaargh. Distutils is not tested at all for the purpose of compiling +# DLLs that are not extension modules. Here are some hacks to work +# around that, in the _patch_for_*() functions... + +def _patch_meth(patchlist, cls, name, new_meth): + old = getattr(cls, name) + patchlist.append((cls, name, old)) + setattr(cls, name, new_meth) + return old + +def _unpatch_meths(patchlist): + for cls, name, old_meth in reversed(patchlist): + setattr(cls, name, old_meth) + +def _patch_for_embedding(patchlist): + if sys.platform == 'win32': + # we must not remove the manifest when building for embedding! + from distutils.msvc9compiler import MSVCCompiler + _patch_meth(patchlist, MSVCCompiler, '_remove_visual_c_ref', + lambda self, manifest_file: manifest_file) + + if sys.platform == 'darwin': + # we must not make a '-bundle', but a '-dynamiclib' instead + from distutils.ccompiler import CCompiler + def my_link_shared_object(self, *args, **kwds): + if '-bundle' in self.linker_so: + self.linker_so = list(self.linker_so) + i = self.linker_so.index('-bundle') + self.linker_so[i] = '-dynamiclib' + return old_link_shared_object(self, *args, **kwds) + old_link_shared_object = _patch_meth(patchlist, CCompiler, + 'link_shared_object', + my_link_shared_object) + +def _patch_for_target(patchlist, target): + from distutils.command.build_ext import build_ext + # if 'target' is different from '*', we need to patch some internal + # method to just return this 'target' value, instead of having it + # built from module_name + if target.endswith('.*'): + target = target[:-2] + if sys.platform == 'win32': + target += '.dll' + elif sys.platform == 'darwin': + target += '.dylib' + else: + target += '.so' + _patch_meth(patchlist, build_ext, 'get_ext_filename', + lambda self, ext_name: target) + + def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, - c_file=None, source_extension='.c', extradir=None, **kwds): + c_file=None, source_extension='.c', extradir=None, + compiler_verbose=1, target=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: ffi._apply_windows_unicode(kwds) if preamble is not None: + embedding = (ffi._embedding is not None) + if embedding: + ffi._apply_embedding_fix(kwds) if c_file is None: c_file, parts = _modname_to_file(tmpdir, module_name, source_extension) @@ -1246,15 +1430,28 @@ ext_c_file = os.path.join(*parts) else: ext_c_file = c_file + # + if target is None: + if embedding: + target = '%s.*' % module_name + else: + target = '*' + # ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) updated = make_c_source(ffi, module_name, preamble, c_file) if call_c_compiler: + patchlist = [] cwd = os.getcwd() try: + if embedding: + _patch_for_embedding(patchlist) + if target != '*': + _patch_for_target(patchlist, target) os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext) + outputfilename = ffiplatform.compile('.', ext, compiler_verbose) finally: os.chdir(cwd) + _unpatch_meths(patchlist) return outputfilename else: return ext, updated diff -Nru pypy-4.0.1+dfsg/lib_pypy/cffi/vengine_cpy.py pypy-5.0.1+dfsg/lib_pypy/cffi/vengine_cpy.py --- pypy-4.0.1+dfsg/lib_pypy/cffi/vengine_cpy.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/cffi/vengine_cpy.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,3 +1,6 @@ +# +# DEPRECATED: implementation for ffi.verify() +# import sys, imp from . import model, ffiplatform diff -Nru pypy-4.0.1+dfsg/lib_pypy/cffi/vengine_gen.py pypy-5.0.1+dfsg/lib_pypy/cffi/vengine_gen.py --- pypy-4.0.1+dfsg/lib_pypy/cffi/vengine_gen.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/cffi/vengine_gen.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,3 +1,6 @@ +# +# DEPRECATED: implementation for ffi.verify() +# import sys, os import types diff -Nru pypy-4.0.1+dfsg/lib_pypy/cffi/verifier.py pypy-5.0.1+dfsg/lib_pypy/cffi/verifier.py --- pypy-4.0.1+dfsg/lib_pypy/cffi/verifier.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/cffi/verifier.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,3 +1,6 @@ +# +# DEPRECATED: implementation for ffi.verify() +# import sys, os, binascii, shutil, io from . import __version_verifier_modules__ from . import ffiplatform diff -Nru pypy-4.0.1+dfsg/lib_pypy/cffi.egg-info/PKG-INFO pypy-5.0.1+dfsg/lib_pypy/cffi.egg-info/PKG-INFO --- pypy-4.0.1+dfsg/lib_pypy/cffi.egg-info/PKG-INFO 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/cffi.egg-info/PKG-INFO 2016-03-19 16:40:12.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.3.1 +Version: 1.5.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff -Nru pypy-4.0.1+dfsg/lib_pypy/cPickle.py pypy-5.0.1+dfsg/lib_pypy/cPickle.py --- pypy-4.0.1+dfsg/lib_pypy/cPickle.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/cPickle.py 2016-03-19 16:40:12.000000000 +0000 @@ -167,7 +167,11 @@ try: key = ord(self.read(1)) while key != STOP: - self.dispatch[key](self) + try: + meth = self.dispatch[key] + except KeyError: + raise UnpicklingError("invalid load key, %r." % chr(key)) + meth(self) key = ord(self.read(1)) except TypeError: if self.read(1) == '': @@ -559,6 +563,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -592,6 +597,11 @@ n -= 1L << (nbytes << 3) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + def load(f): return Unpickler(f).load() diff -Nru pypy-4.0.1+dfsg/lib_pypy/datetime.py pypy-5.0.1+dfsg/lib_pypy/datetime.py --- pypy-4.0.1+dfsg/lib_pypy/datetime.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/datetime.py 2016-03-19 16:40:12.000000000 +0000 @@ -21,6 +21,8 @@ import math as _math import struct as _struct +_SENTINEL = object() + def _cmp(x, y): return 0 if x == y else 1 if x > y else -1 @@ -31,6 +33,8 @@ MAXYEAR = 9999 _MINYEARFMT = 1900 +_MAX_DELTA_DAYS = 999999999 + # Utility functions, adapted from Python's Demo/classes/Dates.py, which # also assumes the current Gregorian calendar indefinitely extended in # both directions. Difference: Dates.py calls January 1 of year 0 day @@ -95,6 +99,15 @@ # pasting together 25 4-year cycles. assert _DI100Y == 25 * _DI4Y - 1 +_US_PER_US = 1 +_US_PER_MS = 1000 +_US_PER_SECOND = 1000000 +_US_PER_MINUTE = 60000000 +_SECONDS_PER_DAY = 24 * 3600 +_US_PER_HOUR = 3600000000 +_US_PER_DAY = 86400000000 +_US_PER_WEEK = 604800000000 + def _ord2ymd(n): "ordinal -> (year, month, day), considering 01-Jan-0001 as day 1." @@ -271,15 +284,17 @@ def _check_int_field(value): if isinstance(value, int): - return value + return int(value) if not isinstance(value, float): try: value = value.__int__() except AttributeError: pass else: - if isinstance(value, (int, long)): - return value + if isinstance(value, int): + return int(value) + elif isinstance(value, long): + return int(long(value)) raise TypeError('__int__ method should return an integer') raise TypeError('an integer is required') raise TypeError('integer argument expected, got float') @@ -344,75 +359,79 @@ raise TypeError("can't compare '%s' to '%s'" % ( type(x).__name__, type(y).__name__)) -# This is a start at a struct tm workalike. Goals: -# -# + Works the same way across platforms. -# + Handles all the fields datetime needs handled, without 1970-2038 glitches. -# -# Note: I suspect it's best if this flavor of tm does *not* try to -# second-guess timezones or DST. Instead fold whatever adjustments you want -# into the minutes argument (and the constructor will normalize). - -class _tmxxx: - - ordinal = None - - def __init__(self, year, month, day, hour=0, minute=0, second=0, - microsecond=0): - # Normalize all the inputs, and store the normalized values. - if not 0 <= microsecond <= 999999: - carry, microsecond = divmod(microsecond, 1000000) - second += carry - if not 0 <= second <= 59: - carry, second = divmod(second, 60) - minute += carry - if not 0 <= minute <= 59: - carry, minute = divmod(minute, 60) - hour += carry - if not 0 <= hour <= 23: - carry, hour = divmod(hour, 24) - day += carry - - # That was easy. Now it gets muddy: the proper range for day - # can't be determined without knowing the correct month and year, - # but if day is, e.g., plus or minus a million, the current month - # and year values make no sense (and may also be out of bounds - # themselves). - # Saying 12 months == 1 year should be non-controversial. - if not 1 <= month <= 12: - carry, month = divmod(month-1, 12) - year += carry +def _normalize_pair(hi, lo, factor): + if not 0 <= lo <= factor-1: + inc, lo = divmod(lo, factor) + hi += inc + return hi, lo + +def _normalize_datetime(y, m, d, hh, mm, ss, us, ignore_overflow=False): + # Normalize all the inputs, and store the normalized values. + ss, us = _normalize_pair(ss, us, 1000000) + mm, ss = _normalize_pair(mm, ss, 60) + hh, mm = _normalize_pair(hh, mm, 60) + d, hh = _normalize_pair(d, hh, 24) + y, m, d = _normalize_date(y, m, d, ignore_overflow) + return y, m, d, hh, mm, ss, us + +def _normalize_date(year, month, day, ignore_overflow=False): + # That was easy. Now it gets muddy: the proper range for day + # can't be determined without knowing the correct month and year, + # but if day is, e.g., plus or minus a million, the current month + # and year values make no sense (and may also be out of bounds + # themselves). + # Saying 12 months == 1 year should be non-controversial. + if not 1 <= month <= 12: + year, month = _normalize_pair(year, month-1, 12) + month += 1 + assert 1 <= month <= 12 + + # Now only day can be out of bounds (year may also be out of bounds + # for a datetime object, but we don't care about that here). + # If day is out of bounds, what to do is arguable, but at least the + # method here is principled and explainable. + dim = _days_in_month(year, month) + if not 1 <= day <= dim: + # Move day-1 days from the first of the month. First try to + # get off cheap if we're only one day out of range (adjustments + # for timezone alone can't be worse than that). + if day == 0: # move back a day + month -= 1 + if month > 0: + day = _days_in_month(year, month) + else: + year, month, day = year-1, 12, 31 + elif day == dim + 1: # move forward a day month += 1 - assert 1 <= month <= 12 + day = 1 + if month > 12: + month = 1 + year += 1 + else: + ordinal = _ymd2ord(year, month, 1) + (day - 1) + year, month, day = _ord2ymd(ordinal) - # Now only day can be out of bounds (year may also be out of bounds - # for a datetime object, but we don't care about that here). - # If day is out of bounds, what to do is arguable, but at least the - # method here is principled and explainable. - dim = _days_in_month(year, month) - if not 1 <= day <= dim: - # Move day-1 days from the first of the month. First try to - # get off cheap if we're only one day out of range (adjustments - # for timezone alone can't be worse than that). - if day == 0: # move back a day - month -= 1 - if month > 0: - day = _days_in_month(year, month) - else: - year, month, day = year-1, 12, 31 - elif day == dim + 1: # move forward a day - month += 1 - day = 1 - if month > 12: - month = 1 - year += 1 - else: - self.ordinal = _ymd2ord(year, month, 1) + (day - 1) - year, month, day = _ord2ymd(self.ordinal) + if not ignore_overflow and not MINYEAR <= year <= MAXYEAR: + raise OverflowError("date value out of range") + return year, month, day - self.year, self.month, self.day = year, month, day - self.hour, self.minute, self.second = hour, minute, second - self.microsecond = microsecond +def _accum(tag, sofar, num, factor, leftover): + if isinstance(num, (int, long)): + prod = num * factor + rsum = sofar + prod + return rsum, leftover + if isinstance(num, float): + fracpart, intpart = _math.modf(num) + prod = int(intpart) * factor + rsum = sofar + prod + if fracpart == 0.0: + return rsum, leftover + assert isinstance(factor, (int, long)) + fracpart, intpart = _math.modf(factor * fracpart) + rsum += int(intpart) + return rsum, leftover + fracpart + raise TypeError("unsupported type for timedelta %s component: %s" % + (tag, type(num))) class timedelta(object): """Represent the difference between two datetime objects. @@ -433,100 +452,42 @@ """ __slots__ = '_days', '_seconds', '_microseconds', '_hashcode' - def __new__(cls, days=0, seconds=0, microseconds=0, - milliseconds=0, minutes=0, hours=0, weeks=0): - # Doing this efficiently and accurately in C is going to be difficult - # and error-prone, due to ubiquitous overflow possibilities, and that - # C double doesn't have enough bits of precision to represent - # microseconds over 10K years faithfully. The code here tries to make - # explicit where go-fast assumptions can be relied on, in order to - # guide the C implementation; it's way more convoluted than speed- - # ignoring auto-overflow-to-long idiomatic Python could be. - - # XXX Check that all inputs are ints, longs or floats. - - # Final values, all integer. - # s and us fit in 32-bit signed ints; d isn't bounded. - d = s = us = 0 - - # Normalize everything to days, seconds, microseconds. - days += weeks*7 - seconds += minutes*60 + hours*3600 - microseconds += milliseconds*1000 - - # Get rid of all fractions, and normalize s and us. - # Take a deep breath . - if isinstance(days, float): - dayfrac, days = _math.modf(days) - daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.)) - assert daysecondswhole == int(daysecondswhole) # can't overflow - s = int(daysecondswhole) - assert days == int(days) - d = int(days) - else: - daysecondsfrac = 0.0 - d = days - assert isinstance(daysecondsfrac, float) - assert abs(daysecondsfrac) <= 1.0 - assert isinstance(d, (int, long)) - assert abs(s) <= 24 * 3600 - # days isn't referenced again before redefinition - - if isinstance(seconds, float): - secondsfrac, seconds = _math.modf(seconds) - assert seconds == int(seconds) - seconds = int(seconds) - secondsfrac += daysecondsfrac - assert abs(secondsfrac) <= 2.0 - else: - secondsfrac = daysecondsfrac - # daysecondsfrac isn't referenced again - assert isinstance(secondsfrac, float) - assert abs(secondsfrac) <= 2.0 - - assert isinstance(seconds, (int, long)) - days, seconds = divmod(seconds, 24*3600) - d += days - s += int(seconds) # can't overflow - assert isinstance(s, int) - assert abs(s) <= 2 * 24 * 3600 - # seconds isn't referenced again before redefinition - - usdouble = secondsfrac * 1e6 - assert abs(usdouble) < 2.1e6 # exact value not critical - # secondsfrac isn't referenced again - - if isinstance(microseconds, float): - microseconds = _round(microseconds + usdouble) - seconds, microseconds = divmod(microseconds, 1000000) - days, seconds = divmod(seconds, 24*3600) - d += days - s += int(seconds) - microseconds = int(microseconds) - else: - microseconds = int(microseconds) - seconds, microseconds = divmod(microseconds, 1000000) - days, seconds = divmod(seconds, 24*3600) - d += days - s += int(seconds) - microseconds = _round(microseconds + usdouble) - assert isinstance(s, int) - assert isinstance(microseconds, int) - assert abs(s) <= 3 * 24 * 3600 - assert abs(microseconds) < 3.1e6 - - # Just a little bit of carrying possible for microseconds and seconds. - seconds, us = divmod(microseconds, 1000000) - s += seconds - days, s = divmod(s, 24*3600) - d += days - - assert isinstance(d, (int, long)) - assert isinstance(s, int) and 0 <= s < 24*3600 - assert isinstance(us, int) and 0 <= us < 1000000 + def __new__(cls, days=_SENTINEL, seconds=_SENTINEL, microseconds=_SENTINEL, + milliseconds=_SENTINEL, minutes=_SENTINEL, hours=_SENTINEL, weeks=_SENTINEL): + x = 0 + leftover = 0.0 + if microseconds is not _SENTINEL: + x, leftover = _accum("microseconds", x, microseconds, _US_PER_US, leftover) + if milliseconds is not _SENTINEL: + x, leftover = _accum("milliseconds", x, milliseconds, _US_PER_MS, leftover) + if seconds is not _SENTINEL: + x, leftover = _accum("seconds", x, seconds, _US_PER_SECOND, leftover) + if minutes is not _SENTINEL: + x, leftover = _accum("minutes", x, minutes, _US_PER_MINUTE, leftover) + if hours is not _SENTINEL: + x, leftover = _accum("hours", x, hours, _US_PER_HOUR, leftover) + if days is not _SENTINEL: + x, leftover = _accum("days", x, days, _US_PER_DAY, leftover) + if weeks is not _SENTINEL: + x, leftover = _accum("weeks", x, weeks, _US_PER_WEEK, leftover) + if leftover != 0.0: + x += _round(leftover) + return cls._from_microseconds(x) + + @classmethod + def _from_microseconds(cls, us): + s, us = divmod(us, _US_PER_SECOND) + d, s = divmod(s, _SECONDS_PER_DAY) + return cls._create(d, s, us, False) + + @classmethod + def _create(cls, d, s, us, normalize): + if normalize: + s, us = _normalize_pair(s, us, 1000000) + d, s = _normalize_pair(d, s, 24*3600) - if abs(d) > 999999999: - raise OverflowError("timedelta # of days is too large: %d" % d) + if not -_MAX_DELTA_DAYS <= d <= _MAX_DELTA_DAYS: + raise OverflowError("days=%d; must have magnitude <= %d" % (d, _MAX_DELTA_DAYS)) self = object.__new__(cls) self._days = d @@ -535,6 +496,10 @@ self._hashcode = -1 return self + def _to_microseconds(self): + return ((self._days * _SECONDS_PER_DAY + self._seconds) * _US_PER_SECOND + + self._microseconds) + def __repr__(self): module = "datetime." if self.__class__ is timedelta else "" if self._microseconds: @@ -562,8 +527,7 @@ def total_seconds(self): """Total seconds in the duration.""" - return ((self.days * 86400 + self.seconds) * 10**6 + - self.microseconds) / 10**6 + return self._to_microseconds() / 10**6 # Read-only field accessors @property @@ -585,36 +549,37 @@ if isinstance(other, timedelta): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta - return timedelta(self._days + other._days, - self._seconds + other._seconds, - self._microseconds + other._microseconds) + return timedelta._create(self._days + other._days, + self._seconds + other._seconds, + self._microseconds + other._microseconds, + True) return NotImplemented - __radd__ = __add__ - def __sub__(self, other): if isinstance(other, timedelta): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta - return timedelta(self._days - other._days, - self._seconds - other._seconds, - self._microseconds - other._microseconds) - return NotImplemented - - def __rsub__(self, other): - if isinstance(other, timedelta): - return -self + other + return timedelta._create(self._days - other._days, + self._seconds - other._seconds, + self._microseconds - other._microseconds, + True) return NotImplemented def __neg__(self): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta - return timedelta(-self._days, - -self._seconds, - -self._microseconds) + return timedelta._create(-self._days, + -self._seconds, + -self._microseconds, + True) def __pos__(self): - return self + # for CPython compatibility, we cannot use + # our __class__ here, but need a real timedelta + return timedelta._create(self._days, + self._seconds, + self._microseconds, + False) def __abs__(self): if self._days < 0: @@ -623,25 +588,18 @@ return self def __mul__(self, other): - if isinstance(other, (int, long)): - # for CPython compatibility, we cannot use - # our __class__ here, but need a real timedelta - return timedelta(self._days * other, - self._seconds * other, - self._microseconds * other) - return NotImplemented + if not isinstance(other, (int, long)): + return NotImplemented + usec = self._to_microseconds() + return timedelta._from_microseconds(usec * other) __rmul__ = __mul__ - def _to_microseconds(self): - return ((self._days * (24*3600) + self._seconds) * 1000000 + - self._microseconds) - def __div__(self, other): if not isinstance(other, (int, long)): return NotImplemented usec = self._to_microseconds() - return timedelta(0, 0, usec // other) + return timedelta._from_microseconds(usec // other) __floordiv__ = __div__ @@ -705,9 +663,8 @@ def __reduce__(self): return (self.__class__, self._getstate()) -timedelta.min = timedelta(-999999999) -timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59, - microseconds=999999) +timedelta.min = timedelta(-_MAX_DELTA_DAYS) +timedelta.max = timedelta(_MAX_DELTA_DAYS, 24*3600-1, 1000000-1) timedelta.resolution = timedelta(microseconds=1) class date(object): @@ -948,32 +905,29 @@ # Computations - def _checkOverflow(self, year): - if not MINYEAR <= year <= MAXYEAR: - raise OverflowError("date +/-: result year %d not in %d..%d" % - (year, MINYEAR, MAXYEAR)) + def _add_timedelta(self, other, factor): + y, m, d = _normalize_date( + self._year, + self._month, + self._day + other.days * factor) + return date(y, m, d) def __add__(self, other): "Add a date to a timedelta." if isinstance(other, timedelta): - t = _tmxxx(self._year, - self._month, - self._day + other.days) - self._checkOverflow(t.year) - result = date(t.year, t.month, t.day) - return result + return self._add_timedelta(other, 1) return NotImplemented __radd__ = __add__ def __sub__(self, other): """Subtract two dates, or a date and a timedelta.""" - if isinstance(other, timedelta): - return self + timedelta(-other.days) if isinstance(other, date): days1 = self.toordinal() days2 = other.toordinal() - return timedelta(days1 - days2) + return timedelta._create(days1 - days2, 0, 0, False) + if isinstance(other, timedelta): + return self._add_timedelta(other, -1) return NotImplemented def weekday(self): @@ -1340,7 +1294,7 @@ offset = self._tzinfo.utcoffset(None) offset = _check_utc_offset("utcoffset", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1378,7 +1332,7 @@ offset = self._tzinfo.dst(None) offset = _check_utc_offset("dst", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1505,54 +1459,35 @@ A timezone info object may be passed in as well. """ - _check_tzinfo_arg(tz) - converter = _time.localtime if tz is None else _time.gmtime - - if isinstance(timestamp, int): - us = 0 - else: - t_full = timestamp - timestamp = int(_math.floor(timestamp)) - frac = t_full - timestamp - us = _round(frac * 1e6) - - # If timestamp is less than one microsecond smaller than a - # full second, us can be rounded up to 1000000. In this case, - # roll over to seconds, otherwise, ValueError is raised - # by the constructor. - if us == 1000000: - timestamp += 1 - us = 0 - y, m, d, hh, mm, ss, weekday, jday, dst = converter(timestamp) - ss = min(ss, 59) # clamp out leap seconds if the platform has them - result = cls(y, m, d, hh, mm, ss, us, tz) + self = cls._from_timestamp(converter, timestamp, tz) if tz is not None: - result = tz.fromutc(result) - return result + self = tz.fromutc(self) + return self @classmethod def utcfromtimestamp(cls, t): "Construct a UTC datetime from a POSIX timestamp (like time.time())." - if isinstance(t, int): - us = 0 - else: - t_full = t - t = int(_math.floor(t)) - frac = t_full - t - us = _round(frac * 1e6) + return cls._from_timestamp(_time.gmtime, t, None) + + @classmethod + def _from_timestamp(cls, converter, timestamp, tzinfo): + t_full = timestamp + timestamp = int(_math.floor(timestamp)) + frac = t_full - timestamp + us = _round(frac * 1e6) # If timestamp is less than one microsecond smaller than a # full second, us can be rounded up to 1000000. In this case, # roll over to seconds, otherwise, ValueError is raised # by the constructor. if us == 1000000: - t += 1 + timestamp += 1 us = 0 - y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t) + y, m, d, hh, mm, ss, weekday, jday, dst = converter(timestamp) ss = min(ss, 59) # clamp out leap seconds if the platform has them - return cls(y, m, d, hh, mm, ss, us) + return cls(y, m, d, hh, mm, ss, us, tzinfo) @classmethod def now(cls, tz=None): @@ -1594,9 +1529,9 @@ hh, mm, ss = self.hour, self.minute, self.second offset = self._utcoffset() if offset: # neither None nor 0 - tm = _tmxxx(y, m, d, hh, mm - offset) - y, m, d = tm.year, tm.month, tm.day - hh, mm = tm.hour, tm.minute + mm -= offset + y, m, d, hh, mm, ss, _ = _normalize_datetime( + y, m, d, hh, mm, ss, 0, ignore_overflow=True) return _build_struct_time(y, m, d, hh, mm, ss, 0) def date(self): @@ -1730,7 +1665,7 @@ offset = self._tzinfo.utcoffset(self) offset = _check_utc_offset("utcoffset", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1768,7 +1703,7 @@ offset = self._tzinfo.dst(self) offset = _check_utc_offset("dst", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1859,22 +1794,22 @@ return -1 return diff and 1 or 0 + def _add_timedelta(self, other, factor): + y, m, d, hh, mm, ss, us = _normalize_datetime( + self._year, + self._month, + self._day + other.days * factor, + self._hour, + self._minute, + self._second + other.seconds * factor, + self._microsecond + other.microseconds * factor) + return datetime(y, m, d, hh, mm, ss, us, tzinfo=self._tzinfo) + def __add__(self, other): "Add a datetime and a timedelta." if not isinstance(other, timedelta): return NotImplemented - t = _tmxxx(self._year, - self._month, - self._day + other.days, - self._hour, - self._minute, - self._second + other.seconds, - self._microsecond + other.microseconds) - self._checkOverflow(t.year) - result = datetime(t.year, t.month, t.day, - t.hour, t.minute, t.second, - t.microsecond, tzinfo=self._tzinfo) - return result + return self._add_timedelta(other, 1) __radd__ = __add__ @@ -1882,16 +1817,15 @@ "Subtract two datetimes, or a datetime and a timedelta." if not isinstance(other, datetime): if isinstance(other, timedelta): - return self + -other + return self._add_timedelta(other, -1) return NotImplemented - days1 = self.toordinal() - days2 = other.toordinal() - secs1 = self._second + self._minute * 60 + self._hour * 3600 - secs2 = other._second + other._minute * 60 + other._hour * 3600 - base = timedelta(days1 - days2, - secs1 - secs2, - self._microsecond - other._microsecond) + delta_d = self.toordinal() - other.toordinal() + delta_s = (self._hour - other._hour) * 3600 + \ + (self._minute - other._minute) * 60 + \ + (self._second - other._second) + delta_us = self._microsecond - other._microsecond + base = timedelta._create(delta_d, delta_s, delta_us, True) if self._tzinfo is other._tzinfo: return base myoff = self._utcoffset() diff -Nru pypy-4.0.1+dfsg/lib_pypy/greenlet.py pypy-5.0.1+dfsg/lib_pypy/greenlet.py --- pypy-4.0.1+dfsg/lib_pypy/greenlet.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/greenlet.py 2016-03-19 16:40:12.000000000 +0000 @@ -88,9 +88,19 @@ # try: unbound_method = getattr(_continulet, methodname) + _tls.leaving = current args, kwds = unbound_method(current, *baseargs, to=target) - finally: _tls.current = current + except: + _tls.current = current + if hasattr(_tls, 'trace'): + _run_trace_callback('throw') + _tls.leaving = None + raise + else: + if hasattr(_tls, 'trace'): + _run_trace_callback('switch') + _tls.leaving = None # if kwds: if args: @@ -122,6 +132,34 @@ return f.f_back.f_back.f_back # go past start(), __switch(), switch() # ____________________________________________________________ +# Recent additions + +GREENLET_USE_GC = True +GREENLET_USE_TRACING = True + +def gettrace(): + return getattr(_tls, 'trace', None) + +def settrace(callback): + try: + prev = _tls.trace + del _tls.trace + except AttributeError: + prev = None + if callback is not None: + _tls.trace = callback + return prev + +def _run_trace_callback(event): + try: + _tls.trace(event, (_tls.leaving, _tls.current)) + except: + # In case of exceptions trace function is removed + if hasattr(_tls, 'trace'): + del _tls.trace + raise + +# ____________________________________________________________ # Internal stuff try: @@ -143,22 +181,32 @@ _tls.current = gmain def _greenlet_start(greenlet, args): - args, kwds = args - _tls.current = greenlet try: - res = greenlet.run(*args, **kwds) - except GreenletExit, e: - res = e + args, kwds = args + _tls.current = greenlet + try: + if hasattr(_tls, 'trace'): + _run_trace_callback('switch') + res = greenlet.run(*args, **kwds) + except GreenletExit, e: + res = e + finally: + _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) finally: - _continuation.permute(greenlet, greenlet.parent) - return ((res,), None) + _tls.leaving = greenlet def _greenlet_throw(greenlet, exc, value, tb): - _tls.current = greenlet try: - raise exc, value, tb - except GreenletExit, e: - res = e + _tls.current = greenlet + try: + if hasattr(_tls, 'trace'): + _run_trace_callback('throw') + raise exc, value, tb + except GreenletExit, e: + res = e + finally: + _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) finally: - _continuation.permute(greenlet, greenlet.parent) - return ((res,), None) + _tls.leaving = greenlet diff -Nru pypy-4.0.1+dfsg/lib_pypy/_pypy_testcapi.py pypy-5.0.1+dfsg/lib_pypy/_pypy_testcapi.py --- pypy-4.0.1+dfsg/lib_pypy/_pypy_testcapi.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib_pypy/_pypy_testcapi.py 2016-03-19 16:40:12.000000000 +0000 @@ -7,6 +7,7 @@ content = fid.read() # from cffi's Verifier() key = '\x00'.join([sys.version[:3], content]) + key += 'cpyext-gc-support-2' # this branch requires recompilation! if sys.version_info >= (3,): key = key.encode('utf-8') k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) @@ -62,7 +63,7 @@ if sys.platform == 'win32': # XXX pyconfig.h uses a pragma to link to the import library, # which is currently python27.lib - library = os.path.join(thisdir, '..', 'include', 'python27') + library = os.path.join(thisdir, '..', 'libs', 'python27') if not os.path.exists(library + '.lib'): # For a local translation or nightly build library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') diff -Nru pypy-4.0.1+dfsg/lib-python/2.7/collections.py pypy-5.0.1+dfsg/lib-python/2.7/collections.py --- pypy-4.0.1+dfsg/lib-python/2.7/collections.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/lib-python/2.7/collections.py 2016-03-19 16:40:11.000000000 +0000 @@ -18,9 +18,9 @@ assert '__pypy__' not in _sys.builtin_module_names newdict = lambda _ : {} try: - from __pypy__ import reversed_dict + from __pypy__ import reversed_dict as _reversed_dict except ImportError: - reversed_dict = lambda d: reversed(d.keys()) + _reversed_dict = None # don't have ordered dicts try: from thread import get_ident as _get_ident @@ -46,7 +46,7 @@ ''' def __reversed__(self): - return reversed_dict(self) + return _reversed_dict(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. @@ -116,6 +116,178 @@ return ItemsView(self) +def _compat_with_unordered_dicts(): + # This returns the methods needed in OrderedDict in case the base + # 'dict' class is not actually ordered, like on top of CPython or + # old PyPy or PyPy-STM. + + # ===== Original comments and code follows ===== + # ===== The unmodified methods are not repeated ===== + + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries, but keyword arguments are not recommended because + their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + return dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, _ = self.__map.pop(key) + link_prev[1] = link_next # update link_prev[NEXT] + link_next[0] = link_prev # update link_next[PREV] + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + curr = root[1] # start at the first node + while curr is not root: + yield curr[2] # yield the curr[KEY] + curr = curr[1] # move to next node + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + curr = root[0] # start at the last node + while curr is not root: + yield curr[2] # yield the curr[KEY] + curr = curr[0] # move to previous node + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + dict.clear(self) + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) pairs in od' + for k in self: + yield (k, self[k]) + + update = MutableMapping.update + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + key = next(reversed(self) if last else iter(self)) + value = self.pop(key) + return key, value + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. + If not specified, the value defaults to None. + + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + return locals() + +if _reversed_dict is None: + for _key, _value in _compat_with_unordered_dicts().items(): + setattr(OrderedDict, _key, _value) + del _key, _value + ################################################################################ ### namedtuple ################################################################################ diff -Nru pypy-4.0.1+dfsg/lib-python/2.7/distutils/command/build_ext.py pypy-5.0.1+dfsg/lib-python/2.7/distutils/command/build_ext.py --- pypy-4.0.1+dfsg/lib-python/2.7/distutils/command/build_ext.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/lib-python/2.7/distutils/command/build_ext.py 2016-03-19 16:40:11.000000000 +0000 @@ -188,7 +188,7 @@ # the 'libs' directory is for binary installs - we assume that # must be the *native* platform. But we don't really support # cross-compiling via a binary install anyway, so we let it go. - self.library_dirs.append(os.path.join(sys.exec_prefix, 'include')) + self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) if self.debug: self.build_temp = os.path.join(self.build_temp, "Debug") else: @@ -685,13 +685,17 @@ # the previous version of this code did. This should work for # CPython too. The point is that on PyPy with cpyext, the # config var 'SO' is just ".so" but we want to return - # ".pypy-VERSION.so" instead. - so_ext = _get_c_extension_suffix() + # ".pypy-VERSION.so" instead. Note a further tweak for cffi's + # embedding mode: if EXT_SUFFIX is also defined, use that + # directly. + so_ext = get_config_var('EXT_SUFFIX') if so_ext is None: - so_ext = get_config_var('SO') # fall-back - # extensions in debug_mode are named 'module_d.pyd' under windows - if os.name == 'nt' and self.debug: - so_ext = '_d.pyd' + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back + # extensions in debug_mode are named 'module_d.pyd' under windows + if os.name == 'nt' and self.debug: + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff -Nru pypy-4.0.1+dfsg/lib-python/2.7/json/encoder.py pypy-5.0.1+dfsg/lib-python/2.7/json/encoder.py --- pypy-4.0.1+dfsg/lib-python/2.7/json/encoder.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib-python/2.7/json/encoder.py 2016-03-19 16:40:12.000000000 +0000 @@ -8,13 +8,13 @@ def __init__(self): self._builder = StringBuilder() def append(self, string): - try: - self._builder.append(string) - except UnicodeEncodeError: + if (isinstance(string, unicode) and + type(self._builder) is StringBuilder): ub = UnicodeBuilder() ub.append(self._builder.build()) self._builder = ub - ub.append(string) + self.append = ub.append # shortcut only + self._builder.append(string) def build(self): return self._builder.build() diff -Nru pypy-4.0.1+dfsg/lib-python/2.7/pickle.py pypy-5.0.1+dfsg/lib-python/2.7/pickle.py --- pypy-4.0.1+dfsg/lib-python/2.7/pickle.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib-python/2.7/pickle.py 2016-03-19 16:40:12.000000000 +0000 @@ -1376,6 +1376,7 @@ def decode_long(data): r"""Decode a long from a two's complement little-endian binary string. + This is overriden on PyPy by a RPython version that has linear complexity. >>> decode_long('') 0L @@ -1402,6 +1403,11 @@ n -= 1L << (nbytes * 8) return n +try: + from __pypy__ import decode_long +except ImportError: + pass + # Shorthands try: diff -Nru pypy-4.0.1+dfsg/lib-python/2.7/sysconfig.py pypy-5.0.1+dfsg/lib-python/2.7/sysconfig.py --- pypy-4.0.1+dfsg/lib-python/2.7/sysconfig.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib-python/2.7/sysconfig.py 2016-03-19 16:40:12.000000000 +0000 @@ -524,6 +524,13 @@ import _osx_support _osx_support.customize_config_vars(_CONFIG_VARS) + # PyPy: + import imp + for suffix, mode, type_ in imp.get_suffixes(): + if type_ == imp.C_EXTENSION: + _CONFIG_VARS['SOABI'] = suffix.split('.')[1] + break + if args: vals = [] for name in args: diff -Nru pypy-4.0.1+dfsg/lib-python/2.7/test/capath/0e4015b9.0 pypy-5.0.1+dfsg/lib-python/2.7/test/capath/0e4015b9.0 --- pypy-4.0.1+dfsg/lib-python/2.7/test/capath/0e4015b9.0 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/lib-python/2.7/test/capath/0e4015b9.0 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff -Nru pypy-4.0.1+dfsg/lib-python/2.7/test/capath/ce7b8643.0 pypy-5.0.1+dfsg/lib-python/2.7/test/capath/ce7b8643.0 --- pypy-4.0.1+dfsg/lib-python/2.7/test/capath/ce7b8643.0 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/lib-python/2.7/test/capath/ce7b8643.0 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff -Nru pypy-4.0.1+dfsg/lib-python/2.7/test/https_svn_python_org_root.pem pypy-5.0.1+dfsg/lib-python/2.7/test/https_svn_python_org_root.pem --- pypy-4.0.1+dfsg/lib-python/2.7/test/https_svn_python_org_root.pem 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/lib-python/2.7/test/https_svn_python_org_root.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIHPTCCBSWgAwIBAgIBADANBgkqhkiG9w0BAQQFADB5MRAwDgYDVQQKEwdSb290 -IENBMR4wHAYDVQQLExVodHRwOi8vd3d3LmNhY2VydC5vcmcxIjAgBgNVBAMTGUNB -IENlcnQgU2lnbmluZyBBdXRob3JpdHkxITAfBgkqhkiG9w0BCQEWEnN1cHBvcnRA -Y2FjZXJ0Lm9yZzAeFw0wMzAzMzAxMjI5NDlaFw0zMzAzMjkxMjI5NDlaMHkxEDAO -BgNVBAoTB1Jvb3QgQ0ExHjAcBgNVBAsTFWh0dHA6Ly93d3cuY2FjZXJ0Lm9yZzEi -MCAGA1UEAxMZQ0EgQ2VydCBTaWduaW5nIEF1dGhvcml0eTEhMB8GCSqGSIb3DQEJ -ARYSc3VwcG9ydEBjYWNlcnQub3JnMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC -CgKCAgEAziLA4kZ97DYoB1CW8qAzQIxL8TtmPzHlawI229Z89vGIj053NgVBlfkJ -8BLPRoZzYLdufujAWGSuzbCtRRcMY/pnCujW0r8+55jE8Ez64AO7NV1sId6eINm6 -zWYyN3L69wj1x81YyY7nDl7qPv4coRQKFWyGhFtkZip6qUtTefWIonvuLwphK42y -fk1WpRPs6tqSnqxEQR5YYGUFZvjARL3LlPdCfgv3ZWiYUQXw8wWRBB0bF4LsyFe7 -w2t6iPGwcswlWyCR7BYCEo8y6RcYSNDHBS4CMEK4JZwFaz+qOqfrU0j36NK2B5jc -G8Y0f3/JHIJ6BVgrCFvzOKKrF11myZjXnhCLotLddJr3cQxyYN/Nb5gznZY0dj4k -epKwDpUeb+agRThHqtdB7Uq3EvbXG4OKDy7YCbZZ16oE/9KTfWgu3YtLq1i6L43q -laegw1SJpfvbi1EinbLDvhG+LJGGi5Z4rSDTii8aP8bQUWWHIbEZAWV/RRyH9XzQ -QUxPKZgh/TMfdQwEUfoZd9vUFBzugcMd9Zi3aQaRIt0AUMyBMawSB3s42mhb5ivU -fslfrejrckzzAeVLIL+aplfKkQABi6F1ITe1Yw1nPkZPcCBnzsXWWdsC4PDSy826 -YreQQejdIOQpvGQpQsgi3Hia/0PsmBsJUUtaWsJx8cTLc6nloQsCAwEAAaOCAc4w -ggHKMB0GA1UdDgQWBBQWtTIb1Mfz4OaO873SsDrusjkY0TCBowYDVR0jBIGbMIGY -gBQWtTIb1Mfz4OaO873SsDrusjkY0aF9pHsweTEQMA4GA1UEChMHUm9vdCBDQTEe -MBwGA1UECxMVaHR0cDovL3d3dy5jYWNlcnQub3JnMSIwIAYDVQQDExlDQSBDZXJ0 -IFNpZ25pbmcgQXV0aG9yaXR5MSEwHwYJKoZIhvcNAQkBFhJzdXBwb3J0QGNhY2Vy -dC5vcmeCAQAwDwYDVR0TAQH/BAUwAwEB/zAyBgNVHR8EKzApMCegJaAjhiFodHRw -czovL3d3dy5jYWNlcnQub3JnL3Jldm9rZS5jcmwwMAYJYIZIAYb4QgEEBCMWIWh0 -dHBzOi8vd3d3LmNhY2VydC5vcmcvcmV2b2tlLmNybDA0BglghkgBhvhCAQgEJxYl -aHR0cDovL3d3dy5jYWNlcnQub3JnL2luZGV4LnBocD9pZD0xMDBWBglghkgBhvhC -AQ0ESRZHVG8gZ2V0IHlvdXIgb3duIGNlcnRpZmljYXRlIGZvciBGUkVFIGhlYWQg -b3ZlciB0byBodHRwOi8vd3d3LmNhY2VydC5vcmcwDQYJKoZIhvcNAQEEBQADggIB -ACjH7pyCArpcgBLKNQodgW+JapnM8mgPf6fhjViVPr3yBsOQWqy1YPaZQwGjiHCc -nWKdpIevZ1gNMDY75q1I08t0AoZxPuIrA2jxNGJARjtT6ij0rPtmlVOKTV39O9lg -18p5aTuxZZKmxoGCXJzN600BiqXfEVWqFcofN8CCmHBh22p8lqOOLlQ+TyGpkO/c -gr/c6EWtTZBzCDyUZbAEmXZ/4rzCahWqlwQ3JNgelE5tDlG+1sSPypZt90Pf6DBl -Jzt7u0NDY8RD97LsaMzhGY4i+5jhe1o+ATc7iwiwovOVThrLm82asduycPAtStvY -sONvRUgzEv/+PDIqVPfE94rwiCPCR/5kenHA0R6mY7AHfqQv0wGP3J8rtsYIqQ+T -SCX8Ev2fQtzzxD72V7DX3WnRBnc0CkvSyqD/HMaMyRa+xMwyN2hzXwj7UfdJUzYF -CpUCTPJ5GhD22Dp1nPMd8aINcGeGG7MW9S/lpOt5hvk9C8JzC6WZrG/8Z7jlLwum -GCSNe9FINSkYQKyTYOGWhlC0elnYjyELn8+CkcY7v2vcB5G5l1YjqrZslMZIBjzk -zk6q5PYvCdxTby78dOs6Y5nCpqyJvKeyRKANihDjbPIky/qbn3BHLt4Ui9SyIAmW -omTxJBzcoTWcFbLUvFUufQb1nA5V9FrWk9p2rSVzTMVD ------END CERTIFICATE----- diff -Nru pypy-4.0.1+dfsg/lib-python/2.7/test/selfsigned_pythontestdotnet.pem pypy-5.0.1+dfsg/lib-python/2.7/test/selfsigned_pythontestdotnet.pem --- pypy-4.0.1+dfsg/lib-python/2.7/test/selfsigned_pythontestdotnet.pem 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/lib-python/2.7/test/selfsigned_pythontestdotnet.pem 2016-03-19 16:40:11.000000000 +0000 @@ -1,5 +1,5 @@ -----BEGIN CERTIFICATE----- -MIIChzCCAfCgAwIBAgIJAKGU95wKR8pSMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG @@ -8,9 +8,9 @@ aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv -EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjKTAnMCUGA1UdEQQeMByCGnNl -bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MA0GCSqGSIb3DQEBBQUAA4GBAIOXmdtM -eG9qzP9TiXW/Gc/zI4cBfdCpC+Y4gOfC9bQUC7hefix4iO3+iZjgy3X/FaRxUUoV -HKiXcXIaWqTSUWp45cSh0MbwZXudp6JIAptzdAhvvCrPKeC9i9GvxsPD4LtDAL97 -vSaxQBezA7hdxZd90/EeyMgVZgAnTCnvAWX9 +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= -----END CERTIFICATE----- diff -Nru pypy-4.0.1+dfsg/lib-python/2.7/test/test_ssl.py pypy-5.0.1+dfsg/lib-python/2.7/test/test_ssl.py --- pypy-4.0.1+dfsg/lib-python/2.7/test/test_ssl.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib-python/2.7/test/test_ssl.py 2016-03-19 16:40:12.000000000 +0000 @@ -57,7 +57,8 @@ SIGNED_CERTFILE2 = data_file("keycert4.pem") SIGNING_CA = data_file("pycacert.pem") -SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem") +REMOTE_HOST = "self-signed.pythontest.net" +REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem") EMPTYCERT = data_file("nullcert.pem") BADCERT = data_file("badcert.pem") @@ -244,7 +245,7 @@ self.assertEqual(p['subjectAltName'], san) def test_DER_to_PEM(self): - with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f: + with open(CAFILE_CACERT, 'r') as f: pem = f.read() d1 = ssl.PEM_cert_to_DER_cert(pem) p2 = ssl.DER_cert_to_PEM_cert(d1) @@ -792,7 +793,7 @@ # Mismatching key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"): - ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY) + ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY) # Password protected key and cert ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD) ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode()) @@ -1013,7 +1014,7 @@ ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 1}) - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 1, 'crl': 0, 'x509': 2}) @@ -1023,8 +1024,8 @@ # CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.get_ca_certs(), []) - # but SVN_PYTHON_ORG_ROOT_CERT is a CA cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + # but CAFILE_CACERT is a CA cert + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.get_ca_certs(), [{'issuer': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), @@ -1040,7 +1041,7 @@ (('emailAddress', 'support@cacert.org'),)), 'version': 3}]) - with open(SVN_PYTHON_ORG_ROOT_CERT) as f: + with open(CAFILE_CACERT) as f: pem = f.read() der = ssl.PEM_cert_to_DER_cert(pem) self.assertEqual(ctx.get_ca_certs(True), [der]) @@ -1215,11 +1216,11 @@ class NetworkedTests(unittest.TestCase): def test_connect(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertEqual({}, s.getpeercert()) finally: s.close() @@ -1228,27 +1229,27 @@ s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # this should succeed because we specify the root cert s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertTrue(s.getpeercert()) finally: s.close() def test_connect_ex(self): # Issue #11326: check connect_ex() implementation - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - self.assertEqual(0, s.connect_ex(("svn.python.org", 443))) + self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443))) self.assertTrue(s.getpeercert()) finally: s.close() @@ -1256,14 +1257,14 @@ def test_non_blocking_connect_ex(self): # Issue #11326: non-blocking connect_ex() should allow handshake # to proceed after the socket gets ready. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.setblocking(False) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) # EWOULDBLOCK under Windows, EINPROGRESS elsewhere self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK)) # Wait for connect to finish @@ -1285,58 +1286,62 @@ def test_timeout_connect_ex(self): # Issue #12065: on a timeout, connect_ex() should return the original # errno (mimicking the behaviour of non-SSL sockets). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.settimeout(0.0000001) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) if rc == 0: - self.skipTest("svn.python.org responded too quickly") + self.skipTest("REMOTE_HOST responded too quickly") self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK)) finally: s.close() def test_connect_ex_error(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - rc = s.connect_ex(("svn.python.org", 444)) + rc = s.connect_ex((REMOTE_HOST, 444)) # Issue #19919: Windows machines or VMs hosted on Windows # machines sometimes return EWOULDBLOCK. - self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK)) + errors = ( + errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT, + errno.EWOULDBLOCK, + ) + self.assertIn(rc, errors) finally: s.close() def test_connect_with_context(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): # Same as test_connect, but with a separately created context ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: self.assertEqual({}, s.getpeercert()) finally: s.close() # Same with a server hostname s = ctx.wrap_socket(socket.socket(socket.AF_INET), - server_hostname="svn.python.org") - s.connect(("svn.python.org", 443)) + server_hostname=REMOTE_HOST) + s.connect((REMOTE_HOST, 443)) s.close() # This should fail because we have no verification certs ctx.verify_mode = ssl.CERT_REQUIRED s = ctx.wrap_socket(socket.socket(socket.AF_INET)) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # This should succeed because we specify the root cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(REMOTE_ROOT_CERT) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1349,12 +1354,12 @@ # OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must # contain both versions of each certificate (same content, different # filename) for this test to be portable across OpenSSL releases. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1365,7 +1370,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=BYTES_CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1373,15 +1378,15 @@ s.close() def test_connect_cadata(self): - with open(CAFILE_CACERT) as f: + with open(REMOTE_ROOT_CERT) as f: pem = f.read().decode('ascii') der = ssl.PEM_cert_to_DER_cert(pem) - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=pem) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1390,7 +1395,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=der) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1399,9 +1404,9 @@ # Issue #5238: creating a file-like object with makefile() shouldn't # delay closing the underlying "real socket" (here tested with its # file descriptor, hence skipping the test under Windows). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ss = ssl.wrap_socket(socket.socket(socket.AF_INET)) - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) fd = ss.fileno() f = ss.makefile() f.close() @@ -1415,9 +1420,9 @@ self.assertEqual(e.exception.errno, errno.EBADF) def test_non_blocking_handshake(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = socket.socket(socket.AF_INET) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) s.setblocking(False) s = ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE, @@ -1460,12 +1465,12 @@ if support.verbose: sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem)) - _test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT) + _test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT) if support.IPV6_ENABLED: _test_get_server_certificate('ipv6.google.com', 443) def test_ciphers(self): - remote = ("svn.python.org", 443) + remote = (REMOTE_HOST, 443) with support.transient_internet(remote[0]): with closing(ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s: @@ -1510,13 +1515,13 @@ def test_get_ca_certs_capath(self): # capath certs are loaded on request - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) self.assertEqual(ctx.get_ca_certs(), []) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1527,12 +1532,12 @@ @needs_sni def test_context_setget(self): # Check that the context of a connected socket can be replaced. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = socket.socket(socket.AF_INET) with closing(ctx1.wrap_socket(s)) as ss: - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) self.assertIs(ss.context, ctx1) self.assertIs(ss._sslobj.context, ctx1) ss.context = ctx2 @@ -3026,7 +3031,7 @@ pass for filename in [ - CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE, + CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE, ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY, SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA, BADCERT, BADKEY, EMPTYCERT]: diff -Nru pypy-4.0.1+dfsg/lib-python/2.7/uuid.py pypy-5.0.1+dfsg/lib-python/2.7/uuid.py --- pypy-4.0.1+dfsg/lib-python/2.7/uuid.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib-python/2.7/uuid.py 2016-03-19 16:40:12.000000000 +0000 @@ -604,21 +604,8 @@ def uuid4(): """Generate a random UUID.""" - - # When the system provides a version-4 UUID generator, use it. - if _uuid_generate_random: - _buffer = ctypes.create_string_buffer(16) - _uuid_generate_random(_buffer) - return UUID(bytes=_buffer.raw) - - # Otherwise, get randomness from urandom or the 'random' module. - try: - import os - return UUID(bytes=os.urandom(16), version=4) - except: - import random - bytes = [chr(random.randrange(256)) for i in range(16)] - return UUID(bytes=bytes, version=4) + import os + return UUID(bytes=os.urandom(16), version=4) def uuid5(namespace, name): """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" diff -Nru pypy-4.0.1+dfsg/lib-python/2.7/xml/etree/ElementTree.py pypy-5.0.1+dfsg/lib-python/2.7/xml/etree/ElementTree.py --- pypy-4.0.1+dfsg/lib-python/2.7/xml/etree/ElementTree.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/lib-python/2.7/xml/etree/ElementTree.py 2016-03-19 16:40:12.000000000 +0000 @@ -1606,7 +1606,17 @@ pubid = pubid[1:-1] if hasattr(self.target, "doctype"): self.target.doctype(name, pubid, system[1:-1]) - elif self.doctype is not self._XMLParser__doctype: + elif 1: # XXX PyPy fix, used to be + # elif self.doctype is not self._XMLParser__doctype: + # but that condition is always True on CPython, as far + # as I can tell: self._XMLParser__doctype always + # returns a fresh unbound method object. + # On PyPy, unbound and bound methods have stronger + # unicity guarantees: self._XMLParser__doctype + # can return the same unbound method object, in + # some cases making the test above incorrectly False. + # (My guess would be that the line above is a backport + # from Python 3.) # warn about deprecated call self._XMLParser__doctype(name, pubid, system[1:-1]) self.doctype(name, pubid, system[1:-1]) diff -Nru pypy-4.0.1+dfsg/LICENSE pypy-5.0.1+dfsg/LICENSE --- pypy-4.0.1+dfsg/LICENSE 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/LICENSE 2016-03-19 16:40:11.000000000 +0000 @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2015 +PyPy Copyright holders 2003-2016 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at @@ -41,29 +41,29 @@ Amaury Forgeot d'Arc Antonio Cuni Samuele Pedroni + Matti Picus Alex Gaynor Brian Kearns - Matti Picus Philip Jenvey Michael Hudson + Ronan Lamy David Schneider + Manuel Jacob Holger Krekel Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson + Richard Plangger Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen - Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen + Remi Meier Lukas Diekmann Sven Hager Anders Lehmann - Remi Meier Aurelien Campeas Niklaus Haldimann Camillo Bruni @@ -72,8 +72,8 @@ Romain Guillebert Leonardo Santagada Seo Sanghyeon - Justin Peel Ronny Pfannschmidt + Justin Peel David Edelsohn Anders Hammarquist Jakub Gustak @@ -95,6 +95,7 @@ Tyler Wade Michael Foord Stephan Diehl + Vincent Legoll Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -105,9 +106,9 @@ Jean-Paul Calderone Timo Paulssen Squeaky + Marius Gedminas Alexandre Fayolle Simon Burton - Marius Gedminas Martin Matusiak Konstantin Lopuhin Wenzhu Man @@ -116,16 +117,20 @@ Ivan Sichmann Freitas Greg Price Dario Bertini + Stefano Rivera Mark Pearse Simon Cross Andreas Stührk - Stefano Rivera + Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski + Spenser Bauman Paul deGrandis Ilya Osadchiy + marky1991 Tobias Oberstein Adrian Kuhn Boris Feigin @@ -134,14 +139,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett + Tobias Pape Wanja Saatkamp Gerald Klix Mike Blume - Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -153,6 +156,8 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Tim Felgentreff + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -162,12 +167,12 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Tim Felgentreff + Yichao Yu Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Yichao Yu + Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -191,33 +196,33 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi - Elmo M?ntynen + Elmo Mäntynen + Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani Beatrice During Alex Perry - Vincent Legoll + Vaibhav Sood Alan McIntyre - Spenser Bauman + William Leslie Alexander Sedov Attila Gobi + Jasper.Schulz Christopher Pope - Devin Jeanpierre - Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -225,6 +230,7 @@ Jakub Stasiak Nathan Taylor Vladimir Kryachko + Omer Katz Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -234,11 +240,13 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -250,20 +258,21 @@ Tomo Cocoa Kim Jin Su Toni Mattis + Amber Brown Lucas Stadler Julian Berman Markus Holtermann roberto@goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia - Faye Zhao timo@eistee.fritz.box Andrew Thompson Yusei Tahara + Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon @@ -273,6 +282,7 @@ Christopher Armstrong Michael Hudson-Doyle Anders Sigfridsson + Nikolay Zinov Yasir Suhail Jason Michalski rafalgalczynski@gmail.com @@ -282,6 +292,7 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Matt Bogosian Christian Muirhead Berker Peksag James Lan @@ -316,9 +327,9 @@ Stefan Marr jiaaro Mads Kiilerich - Richard Lancaster opassembler.py Antony Lee + Jason Madden Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer @@ -327,6 +338,7 @@ squeaky Zearin soareschen + Jonas Pfannschmidt Kurt Griffiths Mike Bayer Matthew Miller diff -Nru pypy-4.0.1+dfsg/Makefile pypy-5.0.1+dfsg/Makefile --- pypy-4.0.1+dfsg/Makefile 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/Makefile 2016-03-19 16:40:11.000000000 +0000 @@ -1,5 +1,5 @@ -all: pypy-c +all: pypy-c cffi_imports PYPY_EXECUTABLE := $(shell which pypy) URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") @@ -10,6 +10,8 @@ RUNINTERP = $(PYPY_EXECUTABLE) endif +.PHONY: cffi_imports + pypy-c: @echo @echo "====================================================================" @@ -36,3 +38,6 @@ # replaced with an opaque --jobserver option by the time this Makefile # runs. We cannot get their original value either: # http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html + +cffi_imports: pypy-c + PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py diff -Nru pypy-4.0.1+dfsg/pypy/config/pypyoption.py pypy-5.0.1+dfsg/pypy/config/pypyoption.py --- pypy-4.0.1+dfsg/pypy/config/pypyoption.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/config/pypyoption.py 2016-03-19 16:40:15.000000000 +0000 @@ -36,13 +36,16 @@ "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_continuation", "_cffi_backend", - "_csv", "cppyy", "_pypyjson" + "_csv", "cppyy", "_pypyjson", ]) -if ((sys.platform.startswith('linux') or sys.platform == 'darwin') - and os.uname()[4] == 'x86_64' and sys.maxint > 2**32): - # it's not enough that we get x86_64 - working_modules.add('_vmprof') +from rpython.jit.backend import detect_cpu +try: + if detect_cpu.autodetect().startswith('x86'): + working_modules.add('_vmprof') +except detect_cpu.ProcessorAutodetectError: + pass + translation_modules = default_modules.copy() translation_modules.update([ @@ -167,12 +170,8 @@ cmdline="--translationmodules", suggests=[("objspace.allworkingmodules", False)]), - BoolOption("usepycfiles", "Write and read pyc files when importing", - default=True), - BoolOption("lonepycfiles", "Import pyc files with no matching py file", - default=False, - requires=[("objspace.usepycfiles", True)]), + default=False), StrOption("soabi", "Tag to differentiate extension modules built for different Python interpreters", diff -Nru pypy-4.0.1+dfsg/pypy/doc/build.rst pypy-5.0.1+dfsg/pypy/doc/build.rst --- pypy-4.0.1+dfsg/pypy/doc/build.rst 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/build.rst 2016-03-19 16:40:12.000000000 +0000 @@ -73,28 +73,36 @@ lzma (PyPy3 only) liblzma -sqlite3 - libsqlite3 - -curses - libncurses + cffi dependencies from above - pyexpat libexpat1 _ssl libssl +Make sure to have these libraries (with development headers) installed +before building PyPy, otherwise the resulting binary will not contain +these modules. Furthermore, the following libraries should be present +after building PyPy, otherwise the corresponding CFFI modules are not +built (you can run or re-run `pypy/tool/release/package.py` to retry +to build them; you don't need to re-translate the whole PyPy): + +sqlite3 + libsqlite3 + +curses + libncurses + gdbm libgdbm-dev -Make sure to have these libraries (with development headers) installed before -building PyPy, otherwise the resulting binary will not contain these modules. +tk + tk-dev On Debian, this is the command to install all build-time dependencies:: apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ - libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev + libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ + tk-dev For the optional lzma module on PyPy3 you will also need ``liblzma-dev``. @@ -102,6 +110,7 @@ yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel + (XXX plus the Febora version of libgdbm-dev and tk-dev) For the optional lzma module on PyPy3 you will also need ``xz-devel``. @@ -110,6 +119,7 @@ zypper install gcc make python-devel pkg-config \ zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \ libexpat-devel libffi-devel python-curses + (XXX plus the SLES11 version of libgdbm-dev and tk-dev) For the optional lzma module on PyPy3 you will also need ``xz-devel``. @@ -125,11 +135,13 @@ Translate with JIT:: - pypy rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone.py + cd pypy/goal + pypy ../../rpython/bin/rpython --opt=jit Translate without JIT:: - pypy rpython/bin/rpython --opt=2 pypy/goal/targetpypystandalone.py + cd pypy/goal + pypy ../../rpython/bin/rpython --opt=2 (You can use ``python`` instead of ``pypy`` here, which will take longer but works too.) @@ -138,8 +150,16 @@ current directory. The executable behaves mostly like a normal Python interpreter (see :doc:`cpython_differences`). +Build cffi import libraries for the stdlib +------------------------------------------ + +Various stdlib modules require a separate build step to create the cffi +import libraries in the `out-of-line API mode`_. This is done by the following +command:: + + PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py -.. _translate-pypy: +.. _`out-of-line API mode`: http://cffi.readthedocs.org/en/latest/overview.html#real-example-api-level-out-of-line Translating with non-standard options ------------------------------------- @@ -199,4 +219,3 @@ that this is never the case. -.. TODO windows diff -Nru pypy-4.0.1+dfsg/pypy/doc/conf.py pypy-5.0.1+dfsg/pypy/doc/conf.py --- pypy-4.0.1+dfsg/pypy/doc/conf.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/conf.py 2016-03-19 16:40:12.000000000 +0000 @@ -123,7 +123,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'PyPy documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None diff -Nru pypy-4.0.1+dfsg/pypy/doc/contributor.rst pypy-5.0.1+dfsg/pypy/doc/contributor.rst --- pypy-4.0.1+dfsg/pypy/doc/contributor.rst 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/contributor.rst 2016-03-19 16:40:12.000000000 +0000 @@ -11,29 +11,29 @@ Amaury Forgeot d'Arc Antonio Cuni Samuele Pedroni + Matti Picus Alex Gaynor Brian Kearns - Matti Picus Philip Jenvey Michael Hudson + Ronan Lamy David Schneider + Manuel Jacob Holger Krekel Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson + Richard Plangger Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen - Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen + Remi Meier Lukas Diekmann Sven Hager Anders Lehmann - Remi Meier Aurelien Campeas Niklaus Haldimann Camillo Bruni @@ -42,8 +42,8 @@ Romain Guillebert Leonardo Santagada Seo Sanghyeon - Justin Peel Ronny Pfannschmidt + Justin Peel David Edelsohn Anders Hammarquist Jakub Gustak @@ -65,6 +65,7 @@ Tyler Wade Michael Foord Stephan Diehl + Vincent Legoll Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -75,9 +76,9 @@ Jean-Paul Calderone Timo Paulssen Squeaky + Marius Gedminas Alexandre Fayolle Simon Burton - Marius Gedminas Martin Matusiak Konstantin Lopuhin Wenzhu Man @@ -86,16 +87,20 @@ Ivan Sichmann Freitas Greg Price Dario Bertini + Stefano Rivera Mark Pearse Simon Cross Andreas Stührk - Stefano Rivera + Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski + Spenser Bauman Paul deGrandis Ilya Osadchiy + marky1991 Tobias Oberstein Adrian Kuhn Boris Feigin @@ -104,14 +109,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett + Tobias Pape Wanja Saatkamp Gerald Klix Mike Blume - Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -123,6 +126,8 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Tim Felgentreff + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -132,12 +137,12 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Tim Felgentreff + Yichao Yu Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Yichao Yu + Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -161,33 +166,33 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi - Elmo M?ntynen + Elmo Mäntynen + Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani Beatrice During Alex Perry - Vincent Legoll + Vaibhav Sood Alan McIntyre - Spenser Bauman + William Leslie Alexander Sedov Attila Gobi + Jasper.Schulz Christopher Pope - Devin Jeanpierre - Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -195,6 +200,7 @@ Jakub Stasiak Nathan Taylor Vladimir Kryachko + Omer Katz Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -204,11 +210,13 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -220,20 +228,21 @@ Tomo Cocoa Kim Jin Su Toni Mattis + Amber Brown Lucas Stadler Julian Berman Markus Holtermann roberto@goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia - Faye Zhao timo@eistee.fritz.box Andrew Thompson Yusei Tahara + Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon @@ -243,6 +252,7 @@ Christopher Armstrong Michael Hudson-Doyle Anders Sigfridsson + Nikolay Zinov Yasir Suhail Jason Michalski rafalgalczynski@gmail.com @@ -252,6 +262,7 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Matt Bogosian Christian Muirhead Berker Peksag James Lan @@ -286,9 +297,9 @@ Stefan Marr jiaaro Mads Kiilerich - Richard Lancaster opassembler.py Antony Lee + Jason Madden Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer @@ -297,6 +308,7 @@ squeaky Zearin soareschen + Jonas Pfannschmidt Kurt Griffiths Mike Bayer Matthew Miller @@ -311,4 +323,3 @@ Julien Phalip Roman Podoliaka Dan Loewenherz - diff -Nru pypy-4.0.1+dfsg/pypy/doc/cpython_differences.rst pypy-5.0.1+dfsg/pypy/doc/cpython_differences.rst --- pypy-4.0.1+dfsg/pypy/doc/cpython_differences.rst 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/cpython_differences.rst 2016-03-19 16:40:12.000000000 +0000 @@ -265,7 +265,7 @@ return False def evil(y): - d = {x(): 1} + d = {X(): 1} X.__eq__ = __evil_eq__ d[y] # might trigger a call to __eq__? diff -Nru pypy-4.0.1+dfsg/pypy/doc/discussion/rawrefcount.rst pypy-5.0.1+dfsg/pypy/doc/discussion/rawrefcount.rst --- pypy-4.0.1+dfsg/pypy/doc/discussion/rawrefcount.rst 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/discussion/rawrefcount.rst 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,158 @@ +====================== +Rawrefcount and the GC +====================== + + +GC Interface +------------ + +"PyObject" is a raw structure with at least two fields, ob_refcnt and +ob_pypy_link. The ob_refcnt is the reference counter as used on +CPython. If the PyObject structure is linked to a live PyPy object, +its current address is stored in ob_pypy_link and ob_refcnt is bumped +by either the constant REFCNT_FROM_PYPY, or the constant +REFCNT_FROM_PYPY_LIGHT (== REFCNT_FROM_PYPY + SOME_HUGE_VALUE) +(to mean "light finalizer"). + +Most PyPy objects exist outside cpyext, and conversely in cpyext it is +possible that a lot of PyObjects exist without being seen by the rest +of PyPy. At the interface, however, we can "link" a PyPy object and a +PyObject. There are two kinds of link: + +rawrefcount.create_link_pypy(p, ob) + + Makes a link between an exising object gcref 'p' and a newly + allocated PyObject structure 'ob'. ob->ob_refcnt must be + initialized to either REFCNT_FROM_PYPY, or + REFCNT_FROM_PYPY_LIGHT. (The second case is an optimization: + when the GC finds the PyPy object and PyObject no longer + referenced, it can just free() the PyObject.) + +rawrefcount.create_link_pyobj(p, ob) + + Makes a link from an existing PyObject structure 'ob' to a newly + allocated W_CPyExtPlaceHolderObject 'p'. You must also add + REFCNT_FROM_PYPY to ob->ob_refcnt. For cases where the PyObject + contains all the data, and the PyPy object is just a proxy. The + W_CPyExtPlaceHolderObject should have only a field that contains + the address of the PyObject, but that's outside the scope of the + GC. + +rawrefcount.from_obj(p) + + If there is a link from object 'p' made with create_link_pypy(), + returns the corresponding 'ob'. Otherwise, returns NULL. + +rawrefcount.to_obj(Class, ob) + + Returns ob->ob_pypy_link, cast to an instance of 'Class'. + + +Collection logic +---------------- + +Objects existing purely on the C side have ob->ob_pypy_link == 0; +these are purely reference counted. On the other hand, if +ob->ob_pypy_link != 0, then ob->ob_refcnt is at least REFCNT_FROM_PYPY +and the object is part of a "link". + +The idea is that links whose 'p' is not reachable from other PyPy +objects *and* whose 'ob->ob_refcnt' is REFCNT_FROM_PYPY or +REFCNT_FROM_PYPY_LIGHT are the ones who die. But it is more messy +because PyObjects still (usually) need to have a tp_dealloc called, +and this cannot occur immediately (and can do random things like +accessing other references this object points to, or resurrecting the +object). + +Let P = list of links created with rawrefcount.create_link_pypy() +and O = list of links created with rawrefcount.create_link_pyobj(). +The PyPy objects in the list O are all W_CPyExtPlaceHolderObject: all +the data is in the PyObjects, and all outsite references (if any) are +in C, as "PyObject *" fields. + +So, during the collection we do this about P links: + + for (p, ob) in P: + if ob->ob_refcnt != REFCNT_FROM_PYPY + and ob->ob_refcnt != REFCNT_FROM_PYPY_LIGHT: + mark 'p' as surviving, as well as all its dependencies + +At the end of the collection, the P and O links are both handled like +this: + + for (p, ob) in P + O: + if p is not surviving: # even if 'ob' might be surviving + unlink p and ob + if ob->ob_refcnt == REFCNT_FROM_PYPY_LIGHT: + free(ob) + elif ob->ob_refcnt > REFCNT_FROM_PYPY_LIGHT: + ob->ob_refcnt -= REFCNT_FROM_PYPY_LIGHT + else: + ob->ob_refcnt -= REFCNT_FROM_PYPY + if ob->ob_refcnt == 0: + invoke _Py_Dealloc(ob) later, outside the GC + + +GC Implementation +----------------- + +We need two copies of both the P list and O list, for young or old +objects. All four lists can be regular AddressLists of 'ob' objects. + +We also need an AddressDict mapping 'p' to 'ob' for all links in the P +list, and update it when PyPy objects move. + + +Further notes +------------- + +XXX +XXX the rest is the ideal world, but as a first step, we'll look +XXX for the minimal tweaks needed to adapt the existing cpyext +XXX + +For objects that are opaque in CPython, like , we always create +a PyPy object, and then when needed we make an empty PyObject and +attach it with create_link_pypy()/REFCNT_FROM_PYPY_LIGHT. + +For and objects, the corresponding PyObjects contain a +"long" or "double" field too. We link them with create_link_pypy() +and we can use REFCNT_FROM_PYPY_LIGHT too: 'tp_dealloc' doesn't +need to be called, and instead just calling free() is fine. + +For objects, we need both a PyPy and a PyObject side. These +are made with create_link_pypy()/REFCNT_FROM_PYPY. + +For custom PyXxxObjects allocated from the C extension module, we +need create_link_pyobj(). + +For or objects coming from PyPy, we use +create_link_pypy()/REFCNT_FROM_PYPY_LIGHT with a PyObject +preallocated with the size of the string. We copy the string +lazily into that area if PyString_AS_STRING() is called. + +For , , or objects in the C extension +module, we first allocate it as only a PyObject, which supports +mutation of the data from C, like CPython. When it is exported to +PyPy we could make a W_CPyExtPlaceHolderObject with +create_link_pyobj(). + +For objects coming from PyPy, if they are not specialized, +then the PyPy side holds a regular reference to the items. Then we +can allocate a PyTupleObject and store in it borrowed PyObject +pointers to the items. Such a case is created with +create_link_pypy()/REFCNT_FROM_PYPY_LIGHT. If it is specialized, +then it doesn't work because the items are created just-in-time on the +PyPy side. In this case, the PyTupleObject needs to hold real +references to the PyObject items, and we use create_link_pypy()/ +REFCNT_FROM_PYPY. In all cases, we have a C array of PyObjects +that we can directly return from PySequence_Fast_ITEMS, PyTuple_ITEMS, +PyTuple_GetItem, and so on. + +For objects coming from PyPy, we can use a cpyext list +strategy. The list turns into a PyListObject, as if it had been +allocated from C in the first place. The special strategy can hold +(only) a direct reference to the PyListObject, and we can use either +create_link_pyobj() or create_link_pypy() (to be decided). +PySequence_Fast_ITEMS then works for lists too, and PyList_GetItem +can return a borrowed reference, and so on. diff -Nru pypy-4.0.1+dfsg/pypy/doc/embedding.rst pypy-5.0.1+dfsg/pypy/doc/embedding.rst --- pypy-4.0.1+dfsg/pypy/doc/embedding.rst 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/embedding.rst 2016-03-19 16:40:12.000000000 +0000 @@ -10,6 +10,15 @@ with a ``libpypy-c.so`` or ``pypy-c.dll`` file. This is the default in recent versions of PyPy. +.. note:: + + The interface described in this page is kept for backward compatibility. + From PyPy 4.1, it is recommended to use instead CFFI's `native embedding + support,`__ which gives a simpler approach that works on CPython as well + as PyPy. + +.. __: http://cffi.readthedocs.org/en/latest/embedding.html + The resulting shared library exports very few functions, however they are enough to accomplish everything you need, provided you follow a few principles. The API is: @@ -130,8 +139,13 @@ More complete example --------------------- -.. note:: This example depends on pypy_execute_source_ptr which is not available - in PyPy <= 2.2.1. +.. note:: Note that we do not make use of ``extern "Python"``, the new + way to do callbacks in CFFI 1.4: this is because these examples use + the ABI mode, not the API mode, and with the ABI mode you still have + to use ``ffi.callback()``. It is work in progress to integrate + ``extern "Python"`` with the idea of embedding (and it is expected + to ultimately lead to a better way to do embedding than the one + described here, and that would work equally well on CPython and PyPy). Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. diff -Nru pypy-4.0.1+dfsg/pypy/doc/faq.rst pypy-5.0.1+dfsg/pypy/doc/faq.rst --- pypy-4.0.1+dfsg/pypy/doc/faq.rst 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/faq.rst 2016-03-19 16:40:12.000000000 +0000 @@ -54,7 +54,8 @@ It is quite common nowadays that xyz is available on PyPI_ and installable with ``pip install xyz``. The simplest solution is to `use virtualenv (as documented here)`_. Then enter (activate) the virtualenv -and type: ``pip install xyz``. +and type: ``pip install xyz``. If you don't know or don't want virtualenv, +you can also install ``pip`` globally by saying ``pypy -m ensurepip``. If you get errors from the C compiler, the module is a CPython C Extension module using unsupported features. `See below.`_ diff -Nru pypy-4.0.1+dfsg/pypy/doc/getting-started-dev.rst pypy-5.0.1+dfsg/pypy/doc/getting-started-dev.rst --- pypy-4.0.1+dfsg/pypy/doc/getting-started-dev.rst 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/getting-started-dev.rst 2016-03-19 16:40:12.000000000 +0000 @@ -19,7 +19,9 @@ * Clone this new repo (i.e. the fork) to your local machine with the command ``hg clone ssh://hg@bitbucket.org/yourname/pypy``. It is a very slow - operation but only ever needs to be done once. If you already cloned + operation but only ever needs to be done once. See also + http://pypy.org/download.html#building-from-source . + If you already cloned ``https://bitbucket.org/pypy/pypy`` before, even if some time ago, then you can reuse the same clone by editing the file ``.hg/hgrc`` in your clone to contain the line ``default = diff -Nru pypy-4.0.1+dfsg/pypy/doc/how-to-contribute.rst pypy-5.0.1+dfsg/pypy/doc/how-to-contribute.rst --- pypy-4.0.1+dfsg/pypy/doc/how-to-contribute.rst 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/how-to-contribute.rst 2016-03-19 16:40:12.000000000 +0000 @@ -67,8 +67,8 @@ **module** directory contains extension modules written in RPython * **rpython compiler** that resides in ``rpython/annotator`` and - ``rpython/rtyper`` directories. Consult :doc:`introduction to RPython ` for - further reading + ``rpython/rtyper`` directories. Consult `Getting Started with RPython`_ + for further reading * **JIT generator** lives in ``rpython/jit`` directory. optimizations live in ``rpython/jit/metainterp/optimizeopt``, the main JIT in @@ -80,3 +80,14 @@ The rest of directories serve specific niche goal and are unlikely a good entry point. + + +More documentation +------------------ + +* `Getting Started Developing With PyPy`_ + +* `Getting Started with RPython`_ + +.. _`Getting Started Developing With PyPy`: getting-started-dev.html +.. _`Getting started with RPython`: http://rpython.readthedocs.org/en/latest/getting-started.html diff -Nru pypy-4.0.1+dfsg/pypy/doc/how-to-release.rst pypy-5.0.1+dfsg/pypy/doc/how-to-release.rst --- pypy-4.0.1+dfsg/pypy/doc/how-to-release.rst 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/how-to-release.rst 2016-03-19 16:40:15.000000000 +0000 @@ -1,5 +1,20 @@ -Making a PyPy Release -===================== +The PyPy Release Process +======================== + +Release Policy +++++++++++++++ + +We try to create a stable release a few times a year. These are released on +a branch named like release-2.x or release-4.x, and each release is tagged, +for instance release-4.0.1. + +After release, inevitably there are bug fixes. It is the responsibility of +the commiter who fixes a bug to make sure this fix is on the release branch, +so that we can then create a tagged bug-fix release, which will hopefully +happen more often than stable releases. + +How to Create a PyPy Release +++++++++++++++++++++++++++++ Overview -------- diff -Nru pypy-4.0.1+dfsg/pypy/doc/index-of-release-notes.rst pypy-5.0.1+dfsg/pypy/doc/index-of-release-notes.rst --- pypy-4.0.1+dfsg/pypy/doc/index-of-release-notes.rst 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/index-of-release-notes.rst 2016-03-19 16:40:15.000000000 +0000 @@ -6,6 +6,7 @@ .. toctree:: + release-5.0.0.rst release-4.0.1.rst release-4.0.0.rst release-2.6.1.rst diff -Nru pypy-4.0.1+dfsg/pypy/doc/index-of-whatsnew.rst pypy-5.0.1+dfsg/pypy/doc/index-of-whatsnew.rst --- pypy-4.0.1+dfsg/pypy/doc/index-of-whatsnew.rst 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/index-of-whatsnew.rst 2016-03-19 16:40:12.000000000 +0000 @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst whatsnew-2.6.1.rst diff -Nru pypy-4.0.1+dfsg/pypy/doc/release-4.0.1.rst pypy-5.0.1+dfsg/pypy/doc/release-4.0.1.rst --- pypy-4.0.1+dfsg/pypy/doc/release-4.0.1.rst 2015-11-19 19:21:43.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/release-4.0.1.rst 2016-03-19 16:40:12.000000000 +0000 @@ -2,10 +2,10 @@ PyPy 4.0.1 ========== -We have released PyPy 4.0.1, a bux-fix release after PyPy 4.0.1. We have fixed -a few critical bugs in the JIT compiled code, reported by users. We encourage -all users of PyPy to update to this version. - +We have released PyPy 4.0.1, three weeks after PyPy 4.0.0. We have fixed +a few critical bugs in the JIT compiled code, reported by users. We therefore +encourage all users of PyPy to update to this version. There are a few minor +enhancements in this version as well. You can download the PyPy 4.0.1 release here: @@ -46,18 +46,12 @@ `dynamic languages`_ to see what RPython can do for them. This release supports **x86** machines on most common operating systems -(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD_, freebsd_), -as well as newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux. - -We also introduce `support for the 64 bit PowerPC`_ hardware, specifically -Linux running the big- and little-endian variants of ppc64. +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, freebsd), +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the +big- and little-endian variants of **ppc64** running Linux. .. _`pypy and cpython 2.7.x`: http://speed.pypy.org -.. _OpenBSD: http://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/lang/pypy -.. _freebsd: https://svnweb.freebsd.org/ports/head/lang/pypy/ .. _`dynamic languages`: http://pypyjs.org -.. _`support for the 64 bit PowerPC`: http://morepypy.blogspot.com/2015/10/powerpc-backend-for-jit.html -.. _`here`: http://morepypy.blogspot.com/2015/10/automatic-simd-vectorization-support-in.html Other Highlights (since 4.0.0 released three weeks ago) ======================================================= @@ -91,7 +85,7 @@ * Improve support for __array_interface__ - * Propogate NAN mantissas through float16-float32-float64 conversions + * Propagate NAN mantissas through float16-float32-float64 conversions * Performance improvements and refactorings: diff -Nru pypy-4.0.1+dfsg/pypy/doc/release-5.0.0.rst pypy-5.0.1+dfsg/pypy/doc/release-5.0.0.rst --- pypy-4.0.1+dfsg/pypy/doc/release-5.0.0.rst 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/release-5.0.0.rst 2016-03-19 16:40:15.000000000 +0000 @@ -0,0 +1,227 @@ +======== +PyPy 5.0 +======== + +We have released PyPy 5.0, about three months after PyPy 4.0.1. +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata. The exact effects depend vastly on the program +you're running and can range from insignificant to warmup being up to 30% +faster and memory dropping by about 30%. + +We also merged a major upgrade to our C-API layer (cpyext), simplifying the +interaction between c-level objects and PyPy interpreter level objects. As a +result, lxml (prerelease) with its cython compiled component +`passes all tests`_ on PyPy. The new cpyext is also much faster. + +vmprof_ has been a go-to profiler for PyPy on linux for a few releases +and we're happy to announce that thanks to the cooperation with jetbrains, +vmprof now works on Linux, OS X and Windows on both PyPy and CPython. + +You can download the PyPy 5.0 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +CFFI +==== + +While not applicable only to PyPy, `cffi`_ is arguably our most significant +contribution to the python ecosystem. PyPy 5.0 ships with +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a C program. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`cffi`: https://cffi.readthedocs.org +.. _`cffi-1.5.2`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-5-2 +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy +.. _`passes all tests`: https://bitbucket.org/pypy/compatibility/wiki/lxml +.. _vmprof: http://vmprof.readthedocs.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the +big- and little-endian variants of **PPC64** running Linux. + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 4.0.1 released in November 2015) +========================================================= + +* New features: + + * Support embedding PyPy in a C-program via cffi and static callbacks in cffi. + This deprecates the old method of embedding PyPy + + * Refactor vmprof to work cross-operating-system, deprecate using buggy + libunwind on Linux platforms. Vmprof even works on Windows now. + + * Support more of the C-API type slots, like tp_getattro, and fix C-API + macros, functions, and structs such as _PyLong_FromByteArray(), + PyString_GET_SIZE, f_locals in PyFrameObject, Py_NAN, co_filename in + PyCodeObject + + * Use a more stable approach for allocating PyObjects in cpyext. (see + `blog post`_). Once the PyObject corresponding to a PyPy object is created, + it stays around at the same location until the death of the PyPy object. + Done with a little bit of custom GC support. It allows us to kill the + notion of "borrowing" inside cpyext, reduces 4 dictionaries down to 1, and + significantly simplifies the whole approach (which is why it is a new + feature while technically a refactoring) and allows PyPy to support the + populart lxml module (as of the *next* release) with no PyPy specific + patches needed + + * Make the default filesystem encoding ASCII, like CPython + + * Use `hypothesis`_ in test creation, which is great for randomizing tests + +* Bug Fixes + + * Backport always using os.urandom for uuid4 from cpython and fix the JIT as well + (issue #2202) + + * More completely support datetime, optimize timedelta creation + + * Fix for issue #2185 which caused an inconsistent list of operations to be + generated by the unroller, appeared in a complicated DJango app + + * Fix an elusive issue with stacklets on shadowstack which showed up when + forgetting stacklets without resuming them + + * Fix entrypoint() which now acquires the GIL + + * Fix direct_ffi_call() so failure does not bail out before setting CALL_MAY_FORCE + + * Fix (de)pickling long values by simplifying the implementation + + * Fix RPython rthread so that objects stored as threadlocal do not force minor + GC collection and are kept alive automatically. This improves perfomance of + short-running Python callbacks and prevents resetting such object between + calls + + * Support floats as parameters to itertools.isslice() + + * Check for the existence of CODESET, ignoring it should have prevented PyPy + from working on FreeBSD + + * Fix for corner case (likely shown by Krakatau) for consecutive guards with + interdependencies + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* Numpy: + + * Updates to numpy 1.10.2 (incompatibilities and not-implemented features + still exist) + + * Support dtype=(('O', spec)) union while disallowing record arrays with + mixed object, non-object values + + * Remove all traces of micronumpy from cpyext if --withoutmod-micronumpy option used + + * Support indexing filtering with a boolean ndarray + + * Support partition() as an app-level function, together with a cffi wrapper + in pypy/numpy, this now provides partial support for partition() + +* Performance improvements: + + * Optimize global lookups + + * Improve the memory signature of numbering instances in the JIT. This should + massively decrease the amount of memory consumed by the JIT, which is + significant for most programs. Also compress the numberings using variable- + size encoding + + * Optimize string concatenation + + * Use INT_LSHIFT instead of INT_MUL when possible + + * Improve struct.unpack by casting directly from the underlying buffer. + Unpacking floats and doubles is about 15 times faster, and integer types + about 50% faster (on 64 bit integers). This was then subsequently + improved further in optimizeopt.py. + + * Optimize two-tuple lookups in mapdict, which improves warmup of instance + variable access somewhat + + * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + + * Identify permutations of attributes at instance creation, reducing the + number of bridges created + + * Greatly improve re.sub() performance + + +* Internal refactorings: + + * Refactor and improve exception analysis in the annotator + + * Remove unnecessary special handling of space.wrap(). + + * Support list-resizing setslice operations in RPython + + * Tweak the trace-too-long heuristic for multiple jit drivers + + * Refactor bookkeeping (such a cool word - three double letters) in the + annotater + + * Refactor wrappers for OS functions from rtyper to rlib and simplify them + + * Simplify backend loading instructions to only use four variants + + * Simplify GIL handling in non-jitted code + + * Refactor naming in optimizeopt + + * Change GraphAnalyzer to use a more precise way to recognize external + functions and fix null pointer handling, generally clean up external + function handling + + * Remove pure variants of ``getfield_gc_*`` operations from the JIT by + determining purity while tracing + + * Refactor databasing + + * Simplify bootstrapping in cpyext + + * Refactor rtyper debug code into python.rtyper.debug + + * Seperate structmember.h from Python.h Also enhance creating api functions + to specify which header file they appear in (previously only pypy_decl.h) + + * Fix tokenizer to enforce universal newlines, needed for Python 3 support + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`hypothesis`: http://hypothesis.readthedocs.org +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff -Nru pypy-4.0.1+dfsg/pypy/doc/stm.rst pypy-5.0.1+dfsg/pypy/doc/stm.rst --- pypy-4.0.1+dfsg/pypy/doc/stm.rst 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/stm.rst 2016-03-19 16:40:12.000000000 +0000 @@ -83,28 +83,27 @@ **pypy-stm requires 64-bit Linux for now.** -Development is done in the branch `stmgc-c7`_. If you are only -interested in trying it out, you can download a Ubuntu binary here__ -(``pypy-stm-2.*.tar.bz2``, for Ubuntu 12.04-14.04). The current version -supports four "segments", which means that it will run up to four -threads in parallel. (Development recently switched to `stmgc-c8`_, -but that is not ready for trying out yet.) +Development is done in the branch `stmgc-c8`_. If you are only +interested in trying it out, please pester us until we upload a recent +prebuilt binary. The current version supports four "segments", which +means that it will run up to four threads in parallel. To build a version from sources, you first need to compile a custom -version of clang(!); we recommend downloading `llvm and clang like -described here`__, but at revision 201645 (use ``svn co -r 201645 `` -for all checkouts). Then apply all the patches in `this directory`__: -they are fixes for a clang-only feature that hasn't been used so heavily -in the past (without the patches, you get crashes of clang). Then get -the branch `stmgc-c7`_ of PyPy and run:: +version of gcc(!). See the instructions here: +https://bitbucket.org/pypy/stmgc/src/default/gcc-seg-gs/ +(Note that these patches are being incorporated into gcc. It is likely +that future versions of gcc will not need to be patched any more.) + +Then get the branch `stmgc-c8`_ of PyPy and run:: + + cd pypy/goal + ../../rpython/bin/rpython -Ojit --stm + +At the end, this will try to compile the generated C code by calling +``gcc-seg-gs``, which must be the script you installed in the +instructions above. - rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py - -.. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ .. _`stmgc-c8`: https://bitbucket.org/pypy/pypy/src/stmgc-c8/ -.. __: https://bitbucket.org/pypy/pypy/downloads/ -.. __: http://clang.llvm.org/get_started.html -.. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ .. _caveats: @@ -112,6 +111,12 @@ Current status (stmgc-c7) ------------------------- +.. warning:: + + THIS PAGE IS OLD, THE REST IS ABOUT STMGC-C7 WHEREAS THE CURRENT + DEVELOPMENT WORK IS DONE ON STMGC-C8 + + * **NEW:** It seems to work fine, without crashing any more. Please `report any crash`_ you find (or other bugs). diff -Nru pypy-4.0.1+dfsg/pypy/doc/tool/makecontributor.py pypy-5.0.1+dfsg/pypy/doc/tool/makecontributor.py --- pypy-4.0.1+dfsg/pypy/doc/tool/makecontributor.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/tool/makecontributor.py 2016-03-19 16:40:12.000000000 +0000 @@ -72,6 +72,7 @@ 'Anton Gulenko':['anton gulenko', 'anton_gulenko'], 'Richard Lancaster':['richardlancaster'], 'William Leslie':['William ML Leslie'], + 'Spenser Bauman':['Spenser Andrew Bauman'], } alias_map = {} diff -Nru pypy-4.0.1+dfsg/pypy/doc/whatsnew-5.0.0.rst pypy-5.0.1+dfsg/pypy/doc/whatsnew-5.0.0.rst --- pypy-4.0.1+dfsg/pypy/doc/whatsnew-5.0.0.rst 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/whatsnew-5.0.0.rst 2016-03-19 16:40:15.000000000 +0000 @@ -0,0 +1,197 @@ +====================== +What's new in PyPy 5.0 +====================== + +.. this is a revision shortly after release-4.0.1 +.. startrev: 4b5c840d0da2 + +Fixed ``_PyLong_FromByteArray()``, which was buggy. + +Fixed a crash with stacklets (or greenlets) on non-Linux machines +which showed up if you forget stacklets without resuming them. + +.. branch: numpy-1.10 + +Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy +which is now 1.10.2 + +.. branch: osx-flat-namespace + +Fix the cpyext tests on OSX by linking with -flat_namespace + +.. branch: anntype + +Refactor and improve exception analysis in the annotator. + +.. branch: posita/2193-datetime-timedelta-integrals + +Fix issue #2193. ``isinstance(..., int)`` => ``isinstance(..., numbers.Integral)`` +to allow for alternate ``int``-like implementations (e.g., ``future.types.newint``) + +.. branch: faster-rstruct + +Improve the performace of struct.unpack, which now directly reads inside the +string buffer and directly casts the bytes to the appropriate type, when +allowed. Unpacking of floats and doubles is about 15 times faster now, while +for integer types it's up to ~50% faster for 64bit integers. + +.. branch: wrap-specialisation + +Remove unnecessary special handling of space.wrap(). + +.. branch: compress-numbering + +Improve the memory signature of numbering instances in the JIT. This should massively +decrease the amount of memory consumed by the JIT, which is significant for most programs. + +.. branch: fix-trace-too-long-heuristic + +Improve the heuristic when disable trace-too-long + +.. branch: fix-setslice-can-resize + +Make rlist's ll_listsetslice() able to resize the target list to help +simplify objspace/std/listobject.py. Was issue #2196. + +.. branch: anntype2 + +A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: + +- Implement @doubledispatch decorator and use it for intersection() and difference(). + +- Turn isinstance into a SpaceOperation + +- Create a few direct tests of the fundamental annotation invariant in test_model.py + +- Remove bookkeeper attribute from DictDef and ListDef. + +.. branch: cffi-static-callback + +.. branch: vecopt-absvalue + +- Enhancement. Removed vector fields from AbstractValue. + +.. branch: memop-simplify2 + +Simplification. Backends implement too many loading instructions, only having a slightly different interface. +Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the +commonly known loading operations + +.. branch: more-rposix + +Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and +turn them into regular RPython functions. Most RPython-compatible `os.*` +functions are now directly accessible as `rpython.rposix.*`. + +.. branch: always-enable-gil + +Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. + +.. branch: flowspace-cleanups + +Trivial cleanups in flowspace.operation : fix comment & duplicated method + +.. branch: test-AF_NETLINK + +Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. + +.. branch: small-cleanups-misc + +Trivial misc cleanups: typo, whitespace, obsolete comments + +.. branch: cpyext-slotdefs +.. branch: fix-missing-canraise +.. branch: whatsnew + +.. branch: fix-2211 + +Fix the cryptic exception message when attempting to use extended slicing +in rpython. Was issue #2211. + +.. branch: ec-keepalive + +Optimize the case where, in a new C-created thread, we keep invoking +short-running Python callbacks. (CFFI on CPython has a hack to achieve +the same result.) This can also be seen as a bug fix: previously, +thread-local objects would be reset between two such calls. + +.. branch: globals-quasiimmut + +Optimize global lookups. + +.. branch: cffi-static-callback-embedding + +Updated to CFFI 1.5, which supports a new way to do embedding. +Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. + +.. branch: fix-cpython-ssl-tests-2.7 + +Fix SSL tests by importing cpython's patch + + +.. branch: remove-getfield-pure + +Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant +optimizations instead consult the field descriptor to determine the purity of +the operation. Additionally, pure ``getfield`` operations are now handled +entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than +`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen +for traces containing a large number of pure getfield operations. + +.. branch: exctrans + +Try to ensure that no new functions get annotated during the 'source_c' phase. +Refactor sandboxing to operate at a higher level. + +.. branch: cpyext-bootstrap + +.. branch: vmprof-newstack + +Refactor vmprof to work cross-operating-system. + +.. branch: seperate-strucmember_h + +Seperate structmember.h from Python.h Also enhance creating api functions +to specify which header file they appear in (previously only pypy_decl.h) + +.. branch: llimpl + +Refactor register_external(), remove running_on_llinterp mechanism and +apply sandbox transform on externals at the end of annotation. + +.. branch: cffi-embedding-win32 + +.. branch: windows-vmprof-support + +vmprof should work on Windows. + + +.. branch: reorder-map-attributes + +When creating instances and adding attributes in several different orders +depending on some condition, the JIT would create too much code. This is now +fixed. + +.. branch: cpyext-gc-support-2 + +Improve CPython C API support, which means lxml now runs unmodified +(after removing pypy hacks, pending pull request) + +.. branch: look-inside-tuple-hash + +Look inside tuple hash, improving mdp benchmark + +.. branch: vlen-resume + +Compress resume data, saving 10-20% of memory consumed by the JIT + +.. branch: issue-2248 + +.. branch: ndarray-setitem-filtered + +Fix boolean-array indexing in micronumpy + +.. branch: numpy_partition +Support ndarray.partition() as an app-level function numpy.core._partition_use, +provided as a cffi wrapper to upstream's implementation in the pypy/numpy repo + diff -Nru pypy-4.0.1+dfsg/pypy/doc/whatsnew-head.rst pypy-5.0.1+dfsg/pypy/doc/whatsnew-head.rst --- pypy-4.0.1+dfsg/pypy/doc/whatsnew-head.rst 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/whatsnew-head.rst 2016-03-19 16:40:15.000000000 +0000 @@ -1,7 +1,7 @@ ========================= -What's new in PyPy 4.0.+ +What's new in PyPy 5.0.+ ========================= -.. this is a revision shortly after release-4.0.1 -.. startrev: 4b5c840d0da2 +.. this is a revision shortly after release-5.0 +.. startrev: 9c4299dc2d60 diff -Nru pypy-4.0.1+dfsg/pypy/doc/windows.rst pypy-5.0.1+dfsg/pypy/doc/windows.rst --- pypy-4.0.1+dfsg/pypy/doc/windows.rst 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/doc/windows.rst 2016-03-19 16:40:12.000000000 +0000 @@ -60,6 +60,7 @@ set PYPY_GC_MAX_DELTA=200MB pypy --jit loop_longevity=300 ../../rpython/bin/rpython -Ojit targetpypystandalone set PYPY_GC_MAX_DELTA= + PYTHONPATH=../.. ./pypy-c ../tool/build_cffi_imports.py .. _build instructions: http://pypy.org/download.html#building-from-source diff -Nru pypy-4.0.1+dfsg/pypy/goal/targetpypystandalone.py pypy-5.0.1+dfsg/pypy/goal/targetpypystandalone.py --- pypy-4.0.1+dfsg/pypy/goal/targetpypystandalone.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/goal/targetpypystandalone.py 2016-03-19 16:40:15.000000000 +0000 @@ -81,18 +81,11 @@ # register the minimal equivalent of running a small piece of code. This # should be used as sparsely as possible, just to register callbacks - from rpython.rlib.entrypoint import entrypoint, RPython_StartupCode + from rpython.rlib.entrypoint import entrypoint_highlevel from rpython.rtyper.lltypesystem import rffi, lltype - from rpython.rtyper.lltypesystem.lloperation import llop - w_pathsetter = space.appexec([], """(): - def f(path): - import sys - sys.path[:] = path - return f - """) - - @entrypoint('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') + @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], + c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): from pypy.module.sys.initpath import pypy_find_stdlib verbose = rffi.cast(lltype.Signed, verbose) @@ -109,7 +102,10 @@ " not found in '%s' or in any parent directory" % home1) return rffi.cast(rffi.INT, 1) space.startup() - space.call_function(w_pathsetter, w_path) + space.appexec([w_path], """(path): + import sys + sys.path[:] = path + """) # import site try: space.setattr(space.getbuiltinmodule('sys'), @@ -126,40 +122,35 @@ debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) return rffi.cast(rffi.INT, -1) - @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') + @entrypoint_highlevel('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): return pypy_execute_source_ptr(ll_source, 0) - @entrypoint('main', [rffi.CCHARP, lltype.Signed], - c_name='pypy_execute_source_ptr') + @entrypoint_highlevel('main', [rffi.CCHARP, lltype.Signed], + c_name='pypy_execute_source_ptr') def pypy_execute_source_ptr(ll_source, ll_ptr): - after = rffi.aroundstate.after - if after: after() source = rffi.charp2str(ll_source) res = _pypy_execute_source(source, ll_ptr) - before = rffi.aroundstate.before - if before: before() return rffi.cast(rffi.INT, res) - @entrypoint('main', [], c_name='pypy_init_threads') + @entrypoint_highlevel('main', [], c_name='pypy_init_threads') def pypy_init_threads(): if not space.config.objspace.usemodules.thread: return os_thread.setup_threads(space) - before = rffi.aroundstate.before - if before: before() - @entrypoint('main', [], c_name='pypy_thread_attach') + @entrypoint_highlevel('main', [], c_name='pypy_thread_attach') def pypy_thread_attach(): if not space.config.objspace.usemodules.thread: return os_thread.setup_threads(space) os_thread.bootstrapper.acquire(space, None, None) + # XXX this doesn't really work. Don't use os.fork(), and + # if your embedder program uses fork(), don't use any PyPy + # code in the fork rthread.gc_thread_start() os_thread.bootstrapper.nbthreads += 1 os_thread.bootstrapper.release() - before = rffi.aroundstate.before - if before: before() def _pypy_execute_source(source, c_argument): try: @@ -248,6 +239,9 @@ raise Exception("Cannot use the --output option with PyPy " "when --shared is on (it is by default). " "See issue #1971.") + if sys.platform == 'win32': + config.translation.libname = '..\\..\\libs\\python27.lib' + thisdir.join('..', '..', 'libs').ensure(dir=1) if config.translation.thread: config.objspace.usemodules.thread = True @@ -283,7 +277,6 @@ if config.translation.sandbox: config.objspace.lonepycfiles = False - config.objspace.usepycfiles = False config.translating = True @@ -302,7 +295,7 @@ def hack_for_cffi_modules(self, driver): # HACKHACKHACK - # ugly hack to modify target goal from compile_c to build_cffi_imports + # ugly hack to modify target goal from compile_* to build_cffi_imports # this should probably get cleaned up and merged with driver.create_exe from rpython.translator.driver import taskdef import types @@ -316,7 +309,8 @@ name = name.new(ext='exe') return name - @taskdef(['compile_c'], "Create cffi bindings for modules") + compile_goal, = driver.backend_select_goals(['compile']) + @taskdef([compile_goal], "Create cffi bindings for modules") def task_build_cffi_imports(self): from pypy.tool.build_cffi_imports import create_cffi_import_libraries ''' Use cffi to compile cffi interfaces to modules''' @@ -335,7 +329,7 @@ # if failures, they were already printed print >> sys.stderr, str(exename),'successfully built, but errors while building the above modules will be ignored' driver.task_build_cffi_imports = types.MethodType(task_build_cffi_imports, driver) - driver.tasks['build_cffi_imports'] = driver.task_build_cffi_imports, ['compile_c'] + driver.tasks['build_cffi_imports'] = driver.task_build_cffi_imports, [compile_goal] driver.default_goal = 'build_cffi_imports' # HACKHACKHACK end diff -Nru pypy-4.0.1+dfsg/pypy/interpreter/astcompiler/assemble.py pypy-5.0.1+dfsg/pypy/interpreter/astcompiler/assemble.py --- pypy-4.0.1+dfsg/pypy/interpreter/astcompiler/assemble.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/interpreter/astcompiler/assemble.py 2016-03-19 16:40:12.000000000 +0000 @@ -289,6 +289,8 @@ for w_item in space.fixedview(obj): result_w.append(self._make_key(w_item)) w_key = space.newtuple(result_w[:]) + elif isinstance(obj, PyCode): + w_key = space.newtuple([obj, w_type, space.id(obj)]) else: w_key = space.newtuple([obj, w_type]) return w_key diff -Nru pypy-4.0.1+dfsg/pypy/interpreter/astcompiler/test/test_compiler.py pypy-5.0.1+dfsg/pypy/interpreter/astcompiler/test/test_compiler.py --- pypy-4.0.1+dfsg/pypy/interpreter/astcompiler/test/test_compiler.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/interpreter/astcompiler/test/test_compiler.py 2016-03-19 16:40:12.000000000 +0000 @@ -931,6 +931,11 @@ finally: space.call_function(w_set_debug, space.w_True) + def test_dont_fold_equal_code_objects(self): + yield self.st, "f=lambda:1;g=lambda:1.0;x=g()", 'type(x)', float + yield (self.st, "x=(lambda: (-0.0, 0.0), lambda: (0.0, -0.0))[1]()", + 'repr(x)', '(0.0, -0.0)') + class AppTestCompiler: diff -Nru pypy-4.0.1+dfsg/pypy/interpreter/baseobjspace.py pypy-5.0.1+dfsg/pypy/interpreter/baseobjspace.py --- pypy-4.0.1+dfsg/pypy/interpreter/baseobjspace.py 2015-11-19 19:21:43.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/interpreter/baseobjspace.py 2016-03-19 16:40:15.000000000 +0000 @@ -27,7 +27,7 @@ class W_Root(object): """This is the abstract root class of all wrapped objects that live in a 'normal' object space like StdObjSpace.""" - __slots__ = () + __slots__ = ('__weakref__',) user_overridden_class = False def getdict(self, space): @@ -306,7 +306,7 @@ return None -class W_InterpIterable(W_Root): +class InterpIterable(object): def __init__(self, space, w_iterable): self.w_iter = space.iter(w_iterable) self.space = space @@ -391,7 +391,7 @@ self.check_signal_action = None # changed by the signal module self.user_del_action = UserDelAction(self) self._code_of_sys_exc_info = None - + # can be overridden to a subclass self.initialize() @@ -745,9 +745,13 @@ return self.int_w(self.hash(w_obj)) def len_w(self, w_obj): - """shotcut for space.int_w(space.len(w_obj))""" + """shortcut for space.int_w(space.len(w_obj))""" return self.int_w(self.len(w_obj)) + def contains_w(self, w_container, w_item): + """shortcut for space.is_true(space.contains(w_container, w_item))""" + return self.is_true(self.contains(w_container, w_item)) + def setitem_str(self, w_obj, key, w_value): return self.setitem(w_obj, self.wrap(key), w_value) @@ -846,7 +850,7 @@ return lst_w[:] # make the resulting list resizable def iteriterable(self, w_iterable): - return W_InterpIterable(self, w_iterable) + return InterpIterable(self, w_iterable) def _unpackiterable_unknown_length(self, w_iterator, w_iterable): """Unpack an iterable of unknown length into an interp-level @@ -1237,7 +1241,7 @@ if not isinstance(statement, PyCode): raise TypeError('space.exec_(): expected a string, code or PyCode object') w_key = self.wrap('__builtins__') - if not self.is_true(self.contains(w_globals, w_key)): + if not self.contains_w(w_globals, w_key): self.setitem(w_globals, w_key, self.wrap(self.builtin)) return statement.exec_code(self, w_globals, w_locals) diff -Nru pypy-4.0.1+dfsg/pypy/interpreter/eval.py pypy-5.0.1+dfsg/pypy/interpreter/eval.py --- pypy-4.0.1+dfsg/pypy/interpreter/eval.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/interpreter/eval.py 2016-03-19 16:40:12.000000000 +0000 @@ -9,8 +9,8 @@ class Code(W_Root): """A code is a compiled version of some source code. Abstract base class.""" - _immutable_ = True hidden_applevel = False + _immutable_fields_ = ['co_name', 'fast_natural_arity', 'hidden_applevel'] # n >= 0 : arity # FLATPYCALL = 0x100 diff -Nru pypy-4.0.1+dfsg/pypy/interpreter/function.py pypy-5.0.1+dfsg/pypy/interpreter/function.py --- pypy-4.0.1+dfsg/pypy/interpreter/function.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/interpreter/function.py 2016-03-19 16:40:15.000000000 +0000 @@ -13,6 +13,9 @@ from pypy.interpreter.argument import Arguments from rpython.rlib import jit +from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rlib.rbigint import rbigint + funccallunrolling = unrolling_iterable(range(4)) @@ -557,6 +560,26 @@ return space.w_False return space.eq(self.w_function, w_other.w_function) + def is_w(self, space, other): + if not isinstance(other, Method): + return False + return (self.w_instance is other.w_instance and + self.w_function is other.w_function and + self.w_class is other.w_class) + + def immutable_unique_id(self, space): + from pypy.objspace.std.util import IDTAG_METHOD as tag + from pypy.objspace.std.util import IDTAG_SHIFT + if self.w_instance is not None: + id = space.bigint_w(space.id(self.w_instance)) + id = id.lshift(LONG_BIT) + else: + id = rbigint.fromint(0) + id = id.or_(space.bigint_w(space.id(self.w_function))) + id = id.lshift(LONG_BIT).or_(space.bigint_w(space.id(self.w_class))) + id = id.lshift(IDTAG_SHIFT).int_or_(tag) + return space.newlong_from_rbigint(id) + def descr_method_hash(self): space = self.space w_result = space.hash(self.w_function) diff -Nru pypy-4.0.1+dfsg/pypy/interpreter/pycode.py pypy-5.0.1+dfsg/pypy/interpreter/pycode.py --- pypy-4.0.1+dfsg/pypy/interpreter/pycode.py 2015-11-19 19:21:43.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/interpreter/pycode.py 2016-03-19 16:40:12.000000000 +0000 @@ -50,14 +50,19 @@ kwargname = varnames[argcount] if code.co_flags & CO_VARKEYWORDS else None return Signature(argnames, varargname, kwargname) +class CodeHookCache(object): + def __init__(self, space): + self._code_hook = None class PyCode(eval.Code): "CPython-style code objects." - _immutable_ = True - _immutable_fields_ = ["co_consts_w[*]", "co_names_w[*]", "co_varnames[*]", - "co_freevars[*]", "co_cellvars[*]", - "_args_as_cellvars[*]"] - + _immutable_fields_ = ["_signature", "co_argcount", "co_cellvars[*]", + "co_code", "co_consts_w[*]", "co_filename", + "co_firstlineno", "co_flags", "co_freevars[*]", + "co_lnotab", "co_names_w[*]", "co_nlocals", + "co_stacksize", "co_varnames[*]", + "_args_as_cellvars[*]", "w_globals?"] + def __init__(self, space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars, @@ -81,11 +86,32 @@ self.co_name = name self.co_firstlineno = firstlineno self.co_lnotab = lnotab + # store the first globals object that the code object is run in in + # here. if a frame is run in that globals object, it does not need to + # store it at all + self.w_globals = None self.hidden_applevel = hidden_applevel self.magic = magic self._signature = cpython_code_signature(self) self._initialize() self._init_ready() + self.new_code_hook() + + def frame_stores_global(self, w_globals): + if self.w_globals is None: + self.w_globals = w_globals + return False + if self.w_globals is w_globals: + return False + return True + + def new_code_hook(self): + code_hook = self.space.fromcache(CodeHookCache)._code_hook + if code_hook is not None: + try: + self.space.call_function(code_hook, self) + except OperationError, e: + e.write_unraisable(self.space, "new_code_hook()") def _initialize(self): if self.co_cellvars: diff -Nru pypy-4.0.1+dfsg/pypy/interpreter/pyframe.py pypy-5.0.1+dfsg/pypy/interpreter/pyframe.py --- pypy-4.0.1+dfsg/pypy/interpreter/pyframe.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/interpreter/pyframe.py 2016-03-19 16:40:12.000000000 +0000 @@ -36,6 +36,7 @@ def __init__(self, pycode): self.f_lineno = pycode.co_firstlineno + self.w_globals = pycode.w_globals class PyFrame(W_Root): """Represents a frame for a regular Python function @@ -67,7 +68,6 @@ escaped = False # see mark_as_escaped() debugdata = None - w_globals = None pycode = None # code object executed by that frame locals_cells_stack_w = None # the list of all locals, cells and the valuestack valuestackdepth = 0 # number of items on valuestack @@ -90,8 +90,9 @@ self = hint(self, access_directly=True, fresh_virtualizable=True) assert isinstance(code, pycode.PyCode) self.space = space - self.w_globals = w_globals self.pycode = code + if code.frame_stores_global(w_globals): + self.getorcreatedebug().w_globals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) size = code.co_nlocals + ncellvars + nfreevars + code.co_stacksize @@ -116,6 +117,12 @@ self.debugdata = FrameDebugData(self.pycode) return self.debugdata + def get_w_globals(self): + debugdata = self.getdebug() + if debugdata is not None: + return debugdata.w_globals + return jit.promote(self.pycode).w_globals + def get_w_f_trace(self): d = self.getdebug() if d is None: @@ -201,8 +208,9 @@ if flags & pycode.CO_NEWLOCALS: self.getorcreatedebug().w_locals = self.space.newdict(module=True) else: - assert self.w_globals is not None - self.getorcreatedebug().w_locals = self.w_globals + w_globals = self.get_w_globals() + assert w_globals is not None + self.getorcreatedebug().w_locals = w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) @@ -449,7 +457,7 @@ w_blockstack, w_exc_value, # last_exception w_tb, # - self.w_globals, + self.get_w_globals(), w(self.last_instr), w(self.frame_finished_execution), w(f_lineno), @@ -658,6 +666,11 @@ def fget_getdictscope(self, space): return self.getdictscope() + def fget_w_globals(self, space): + # bit silly, but GetSetProperty passes a space + return self.get_w_globals() + + ### line numbers ### def fget_f_lineno(self, space): diff -Nru pypy-4.0.1+dfsg/pypy/interpreter/pyopcode.py pypy-5.0.1+dfsg/pypy/interpreter/pyopcode.py --- pypy-4.0.1+dfsg/pypy/interpreter/pyopcode.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/interpreter/pyopcode.py 2016-03-19 16:40:12.000000000 +0000 @@ -837,7 +837,7 @@ w_bases = self.popvalue() w_name = self.popvalue() w_metaclass = find_metaclass(self.space, w_bases, - w_methodsdict, self.w_globals, + w_methodsdict, self.get_w_globals(), self.space.wrap(self.get_builtin())) w_newclass = self.space.call_function(w_metaclass, w_name, w_bases, w_methodsdict) @@ -881,14 +881,14 @@ def STORE_GLOBAL(self, nameindex, next_instr): varname = self.getname_u(nameindex) w_newvalue = self.popvalue() - self.space.setitem_str(self.w_globals, varname, w_newvalue) + self.space.setitem_str(self.get_w_globals(), varname, w_newvalue) def DELETE_GLOBAL(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) - self.space.delitem(self.w_globals, w_varname) + self.space.delitem(self.get_w_globals(), w_varname) def LOAD_NAME(self, nameindex, next_instr): - if self.getorcreatedebug().w_locals is not self.w_globals: + if self.getorcreatedebug().w_locals is not self.get_w_globals(): varname = self.getname_u(nameindex) w_value = self.space.finditem_str(self.getorcreatedebug().w_locals, varname) @@ -898,7 +898,7 @@ self.LOAD_GLOBAL(nameindex, next_instr) # fall-back def _load_global(self, varname): - w_value = self.space.finditem_str(self.w_globals, varname) + w_value = self.space.finditem_str(self.get_w_globals(), varname) if w_value is None: # not in the globals, now look in the built-ins w_value = self.get_builtin().getdictvalue(self.space, varname) @@ -1029,7 +1029,7 @@ if w_locals is None: # CPython does this w_locals = space.w_None w_modulename = space.wrap(modulename) - w_globals = self.w_globals + w_globals = self.get_w_globals() if w_flag is None: w_obj = space.call_function(w_import, w_modulename, w_globals, w_locals, w_fromlist) @@ -1237,7 +1237,7 @@ w_codeobj = self.popvalue() codeobj = self.space.interp_w(PyCode, w_codeobj) defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments) self.pushvalue(self.space.wrap(fn)) @@ -1249,7 +1249,7 @@ freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] defaultarguments = self.popvalues(numdefaults) - fn = function.Function(self.space, codeobj, self.w_globals, + fn = function.Function(self.space, codeobj, self.get_w_globals(), defaultarguments, freevars) self.pushvalue(self.space.wrap(fn)) diff -Nru pypy-4.0.1+dfsg/pypy/interpreter/pyparser/pytokenizer.py pypy-5.0.1+dfsg/pypy/interpreter/pyparser/pytokenizer.py --- pypy-4.0.1+dfsg/pypy/interpreter/pyparser/pytokenizer.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/interpreter/pyparser/pytokenizer.py 2016-03-19 16:40:12.000000000 +0000 @@ -91,6 +91,7 @@ strstart = (0, 0, "") for line in lines: lnum = lnum + 1 + line = universal_newline(line) pos, max = 0, len(line) if contstr: @@ -259,3 +260,14 @@ token_list.append((tokens.ENDMARKER, '', lnum, pos, line)) return token_list + + +def universal_newline(line): + # show annotator that indexes below are non-negative + line_len_m2 = len(line) - 2 + if line_len_m2 >= 0 and line[-2] == '\r' and line[-1] == '\n': + return line[:line_len_m2] + '\n' + line_len_m1 = len(line) - 1 + if line_len_m1 >= 0 and line[-1] == '\r': + return line[:line_len_m1] + '\n' + return line diff -Nru pypy-4.0.1+dfsg/pypy/interpreter/pyparser/test/test_pyparse.py pypy-5.0.1+dfsg/pypy/interpreter/pyparser/test/test_pyparse.py --- pypy-4.0.1+dfsg/pypy/interpreter/pyparser/test/test_pyparse.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/interpreter/pyparser/test/test_pyparse.py 2016-03-19 16:40:12.000000000 +0000 @@ -158,3 +158,10 @@ def test_print_function(self): self.parse("from __future__ import print_function\nx = print\n") + + def test_universal_newlines(self): + fmt = 'stuff = """hello%sworld"""' + expected_tree = self.parse(fmt % '\n') + for linefeed in ["\r\n","\r"]: + tree = self.parse(fmt % linefeed) + assert expected_tree == tree diff -Nru pypy-4.0.1+dfsg/pypy/interpreter/test/test_app_main.py pypy-5.0.1+dfsg/pypy/interpreter/test/test_app_main.py --- pypy-4.0.1+dfsg/pypy/interpreter/test/test_app_main.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/interpreter/test/test_app_main.py 2016-03-19 16:40:15.000000000 +0000 @@ -133,7 +133,7 @@ self.check(['-S', '-tO', '--info'], {}, output_contains='translation') self.check(['-S', '-tO', '--version'], {}, output_contains='Python') self.check(['-S', '-tOV'], {}, output_contains='Python') - self.check(['--jit', 'foobar', '-S'], {}, sys_argv=[''], + self.check(['--jit', 'off', '-S'], {}, sys_argv=[''], run_stdin=True, no_site=1) self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass') self.check(['-cpass'], {}, sys_argv=['-c'], run_command='pass') diff -Nru pypy-4.0.1+dfsg/pypy/interpreter/test/test_function.py pypy-5.0.1+dfsg/pypy/interpreter/test/test_function.py --- pypy-4.0.1+dfsg/pypy/interpreter/test/test_function.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/interpreter/test/test_function.py 2016-03-19 16:40:15.000000000 +0000 @@ -1,5 +1,4 @@ - -import unittest +import pytest from pypy.interpreter import eval from pypy.interpreter.function import Function, Method, descr_function_get from pypy.interpreter.pycode import PyCode @@ -187,6 +186,7 @@ raises( TypeError, func, 42, {'arg1': 23}) + @pytest.mark.skipif("config.option.runappdirect") def test_kwargs_nondict_mapping(self): class Mapping: def keys(self): @@ -257,6 +257,14 @@ meth = func.__get__(obj, object) assert meth() == obj + def test_none_get_interaction(self): + skip("XXX issue #2083") + assert type(None).__repr__(None) == 'None' + + def test_none_get_interaction_2(self): + f = None.__repr__ + assert f() == 'None' + def test_no_get_builtin(self): assert not hasattr(dir, '__get__') class A(object): @@ -284,6 +292,7 @@ raises(TypeError, len, s, some_unknown_keyword=s) raises(TypeError, len, s, s, some_unknown_keyword=s) + @pytest.mark.skipif("config.option.runappdirect") def test_call_error_message(self): try: len() @@ -325,6 +334,7 @@ f = lambda: 42 assert f.func_doc is None + @pytest.mark.skipif("config.option.runappdirect") def test_setstate_called_with_wrong_args(self): f = lambda: 42 # not sure what it should raise, since CPython doesn't have setstate @@ -550,6 +560,37 @@ assert A().m == X() assert X() == A().m + @pytest.mark.skipif("config.option.runappdirect") + def test_method_identity(self): + class A(object): + def m(self): + pass + def n(self): + pass + + class B(A): + pass + + class X(object): + def __eq__(self, other): + return True + + a = A() + a2 = A() + assert a.m is a.m + assert id(a.m) == id(a.m) + assert a.m is not a.n + assert id(a.m) != id(a.n) + assert a.m is not a2.m + assert id(a.m) != id(a2.m) + + assert A.m is A.m + assert id(A.m) == id(A.m) + assert A.m is not A.n + assert id(A.m) != id(A.n) + assert A.m is not B.m + assert id(A.m) != id(B.m) + class TestMethod: def setup_method(self, method): diff -Nru pypy-4.0.1+dfsg/pypy/interpreter/test/test_pyframe.py pypy-5.0.1+dfsg/pypy/interpreter/test/test_pyframe.py --- pypy-4.0.1+dfsg/pypy/interpreter/test/test_pyframe.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/interpreter/test/test_pyframe.py 2016-03-19 16:40:12.000000000 +0000 @@ -34,6 +34,7 @@ import sys f = sys._getframe() assert f.f_globals is globals() + raises(TypeError, "f.f_globals = globals()") def test_f_builtins(self): import sys, __builtin__ diff -Nru pypy-4.0.1+dfsg/pypy/interpreter/typedef.py pypy-5.0.1+dfsg/pypy/interpreter/typedef.py --- pypy-4.0.1+dfsg/pypy/interpreter/typedef.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/interpreter/typedef.py 2016-03-19 16:40:12.000000000 +0000 @@ -156,20 +156,6 @@ get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" _subclass_cache = {} -def enum_interplevel_subclasses(config, cls): - """Return a list of all the extra interp-level subclasses of 'cls' that - can be built by get_unique_interplevel_subclass().""" - result = [] - for flag1 in (False, True): - for flag2 in (False, True): - for flag3 in (False, True): - for flag4 in (False, True): - result.append(get_unique_interplevel_subclass( - config, cls, flag1, flag2, flag3, flag4)) - result = dict.fromkeys(result) - assert len(result) <= 6 - return result.keys() - def _getusercls(config, cls, wants_dict, wants_slots, wants_del, weakrefable): typedef = cls.typedef if wants_dict and typedef.hasdict: @@ -262,7 +248,7 @@ def user_setup(self, space, w_subtype): self.space = space self.w__class__ = w_subtype - self.user_setup_slots(w_subtype.nslots) + self.user_setup_slots(w_subtype.layout.nslots) def user_setup_slots(self, nslots): assert nslots == 0 @@ -772,7 +758,7 @@ f_restricted = GetSetProperty(PyFrame.fget_f_restricted), f_code = GetSetProperty(PyFrame.fget_code), f_locals = GetSetProperty(PyFrame.fget_getdictscope), - f_globals = interp_attrproperty_w('w_globals', cls=PyFrame), + f_globals = GetSetProperty(PyFrame.fget_w_globals), ) assert not PyFrame.typedef.acceptable_as_base_class # no __new__ diff -Nru pypy-4.0.1+dfsg/pypy/module/__builtin__/compiling.py pypy-5.0.1+dfsg/pypy/module/__builtin__/compiling.py --- pypy-4.0.1+dfsg/pypy/module/__builtin__/compiling.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/__builtin__/compiling.py 2016-03-19 16:40:12.000000000 +0000 @@ -93,7 +93,7 @@ if space.is_none(w_locals): w_locals = w_globals else: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() if space.is_none(w_locals): w_locals = caller.getdictscope() elif space.is_none(w_locals): diff -Nru pypy-4.0.1+dfsg/pypy/module/__builtin__/descriptor.py pypy-5.0.1+dfsg/pypy/module/__builtin__/descriptor.py --- pypy-4.0.1+dfsg/pypy/module/__builtin__/descriptor.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/__builtin__/descriptor.py 2016-03-19 16:40:12.000000000 +0000 @@ -79,6 +79,7 @@ W_Super.typedef = TypeDef( 'super', __new__ = interp2app(descr_new_super), + __thisclass__ = interp_attrproperty_w("w_starttype", W_Super), __getattribute__ = interp2app(W_Super.getattribute), __get__ = interp2app(W_Super.get), __doc__ = """super(type) -> unbound super object diff -Nru pypy-4.0.1+dfsg/pypy/module/__builtin__/interp_classobj.py pypy-5.0.1+dfsg/pypy/module/__builtin__/interp_classobj.py --- pypy-4.0.1+dfsg/pypy/module/__builtin__/interp_classobj.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/__builtin__/interp_classobj.py 2016-03-19 16:40:11.000000000 +0000 @@ -20,7 +20,7 @@ if not space.isinstance_w(w_dict, space.w_dict): raise_type_err(space, 'bases', 'tuple', w_bases) - if not space.is_true(space.contains(w_dict, space.wrap("__doc__"))): + if not space.contains_w(w_dict, space.wrap("__doc__")): space.setitem(w_dict, space.wrap("__doc__"), space.w_None) # XXX missing: lengthy and obscure logic about "__module__" diff -Nru pypy-4.0.1+dfsg/pypy/module/__builtin__/interp_inspect.py pypy-5.0.1+dfsg/pypy/module/__builtin__/interp_inspect.py --- pypy-4.0.1+dfsg/pypy/module/__builtin__/interp_inspect.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/__builtin__/interp_inspect.py 2016-03-19 16:40:11.000000000 +0000 @@ -2,7 +2,7 @@ def globals(space): "Return the dictionary containing the current scope's global variables." ec = space.getexecutioncontext() - return ec.gettopframe_nohidden().w_globals + return ec.gettopframe_nohidden().get_w_globals() def locals(space): """Return a dictionary containing the current scope's local variables. diff -Nru pypy-4.0.1+dfsg/pypy/module/__builtin__/test/test_classobj.py pypy-5.0.1+dfsg/pypy/module/__builtin__/test/test_classobj.py --- pypy-4.0.1+dfsg/pypy/module/__builtin__/test/test_classobj.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/__builtin__/test/test_classobj.py 2016-03-19 16:40:11.000000000 +0000 @@ -452,7 +452,6 @@ assert a + 1 == 2 assert a + 1.1 == 2 - def test_binaryop_calls_coerce_always(self): l = [] class A: @@ -1076,6 +1075,16 @@ assert (D() > A()) == 'D:A.gt' assert (D() >= A()) == 'D:A.ge' + def test_override___int__(self): + class F(float): + def __int__(self): + return 666 + f = F(-12.3) + assert int(f) == 666 + # on cpython, this calls float_trunc() in floatobject.c + # which ends up calling PyFloat_AS_DOUBLE((PyFloatObject*) f) + assert float.__int__(f) == -12 + class AppTestOldStyleClassBytesDict(object): def setup_class(cls): @@ -1084,7 +1093,7 @@ def is_strdict(space, w_class): from pypy.objspace.std.dictmultiobject import BytesDictStrategy w_d = w_class.getdict(space) - return space.wrap(isinstance(w_d.strategy, BytesDictStrategy)) + return space.wrap(isinstance(w_d.get_strategy(), BytesDictStrategy)) cls.w_is_strdict = cls.space.wrap(gateway.interp2app(is_strdict)) diff -Nru pypy-4.0.1+dfsg/pypy/module/__builtin__/test/test_descriptor.py pypy-5.0.1+dfsg/pypy/module/__builtin__/test/test_descriptor.py --- pypy-4.0.1+dfsg/pypy/module/__builtin__/test/test_descriptor.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/__builtin__/test/test_descriptor.py 2016-03-19 16:40:11.000000000 +0000 @@ -214,7 +214,7 @@ c = C() assert C.goo(1) == (C, 1) assert c.goo(1) == (C, 1) - + assert c.foo(1) == (c, 1) class D(C): pass @@ -238,6 +238,17 @@ meth = classmethod(1).__get__(1) raises(TypeError, meth) + def test_super_thisclass(self): + class A(object): + pass + + assert super(A, A()).__thisclass__ is A + + class B(A): + pass + + assert super(B, B()).__thisclass__ is B + assert super(A, B()).__thisclass__ is A def test_property_docstring(self): assert property.__doc__.startswith('property') diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/call_python.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/call_python.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/call_python.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/call_python.py 2016-03-19 16:40:11.000000000 +0000 @@ -0,0 +1,133 @@ +import os +from rpython.rlib.objectmodel import specialize, instantiate +from rpython.rlib.rarithmetic import intmask +from rpython.rlib import jit +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.annlowlevel import llhelper + +from pypy.interpreter.error import oefmt +from pypy.interpreter.gateway import interp2app +from pypy.module._cffi_backend import parse_c_type +from pypy.module._cffi_backend import cerrno +from pypy.module._cffi_backend import cffi_opcode +from pypy.module._cffi_backend import realize_c_type +from pypy.module._cffi_backend.realize_c_type import getop, getarg + + +STDERR = 2 +EXTERNPY_FN = lltype.FuncType([parse_c_type.PEXTERNPY, rffi.CCHARP], + lltype.Void) + + +def _cffi_call_python(ll_externpy, ll_args): + """Invoked by the helpers generated from extern "Python" in the cdef. + + 'externpy' is a static structure that describes which of the + extern "Python" functions is called. It has got fields 'name' and + 'type_index' describing the function, and more reserved fields + that are initially zero. These reserved fields are set up by + ffi.def_extern(), which invokes externpy_deco() below. + + 'args' is a pointer to an array of 8-byte entries. Each entry + contains an argument. If an argument is less than 8 bytes, only + the part at the beginning of the entry is initialized. If an + argument is 'long double' or a struct/union, then it is passed + by reference. + + 'args' is also used as the place to write the result to + (directly, even if more than 8 bytes). In all cases, 'args' is + at least 8 bytes in size. + """ + from pypy.module._cffi_backend.ccallback import reveal_callback + from rpython.rlib import rgil + + rgil.acquire() + rffi.stackcounter.stacks_counter += 1 + llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py + + cerrno._errno_after(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) + + if not ll_externpy.c_reserved1: + # Not initialized! We don't have a space at all. + # Write the error to the file descriptor stderr. + try: + funcname = rffi.charp2str(ll_externpy.c_name) + msg = ("extern \"Python\": function %s() called, but no code was " + "attached to it yet with @ffi.def_extern(). " + "Returning 0.\n" % (funcname,)) + os.write(STDERR, msg) + except: + pass + for i in range(intmask(ll_externpy.c_size_of_result)): + ll_args[i] = '\x00' + else: + externpython = reveal_callback(ll_externpy.c_reserved1) + # the same buffer is used both for passing arguments and + # the result value + externpython.invoke(ll_args, ll_args) + + cerrno._errno_before(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) + + rffi.stackcounter.stacks_counter -= 1 + rgil.release() + + +def get_ll_cffi_call_python(): + return llhelper(lltype.Ptr(EXTERNPY_FN), _cffi_call_python) + + +class KeepaliveCache: + def __init__(self, space): + self.cache_dict = {} + + +@jit.dont_look_inside +def externpy_deco(space, w_ffi, w_python_callable, w_name, w_error, w_onerror): + from pypy.module._cffi_backend.ffi_obj import W_FFIObject + from pypy.module._cffi_backend.ccallback import W_ExternPython + + ffi = space.interp_w(W_FFIObject, w_ffi) + + if space.is_w(w_name, space.w_None): + w_name = space.getattr(w_python_callable, space.wrap('__name__')) + name = space.str_w(w_name) + + ctx = ffi.ctxobj.ctx + index = parse_c_type.search_in_globals(ctx, name) + if index < 0: + raise externpy_not_found(ffi, name) + + g = ctx.c_globals[index] + if getop(g.c_type_op) != cffi_opcode.OP_EXTERN_PYTHON: + raise externpy_not_found(ffi, name) + + w_ct = realize_c_type.realize_c_type(ffi, ctx.c_types, getarg(g.c_type_op)) + + # make a W_ExternPython instance, which is nonmovable; then cast it + # to a raw pointer and assign it to the field 'reserved1' of the + # externpy object from C. We must make sure to keep it alive forever, + # or at least until ffi.def_extern() is used again to change the + # binding. Note that the W_ExternPython is never exposed to the user. + externpy = rffi.cast(parse_c_type.PEXTERNPY, g.c_address) + externpython = instantiate(W_ExternPython, nonmovable=True) + cdata = rffi.cast(rffi.CCHARP, externpy) + W_ExternPython.__init__(externpython, space, cdata, + w_ct, w_python_callable, w_error, w_onerror) + + key = rffi.cast(lltype.Signed, externpy) + space.fromcache(KeepaliveCache).cache_dict[key] = externpython + externpy.c_reserved1 = externpython.hide_object() + + # return the function object unmodified + return w_python_callable + + +def externpy_not_found(ffi, name): + raise oefmt(ffi.w_FFIError, + "ffi.def_extern('%s'): no 'extern \"Python\"' " + "function with this name", name) + +@specialize.memo() +def get_generic_decorator(space): + return space.wrap(interp2app(externpy_deco)) diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/ccallback.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/ccallback.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/ccallback.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/ccallback.py 2016-03-19 16:40:11.000000000 +0000 @@ -3,12 +3,12 @@ """ import sys, os, py -from rpython.rlib import clibffi, jit, jit_libffi, rgc, objectmodel +from rpython.rlib import clibffi, jit, rgc, objectmodel from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt -from pypy.module._cffi_backend import cerrno, misc +from pypy.module._cffi_backend import cerrno, misc, parse_c_type from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend.ctypefunc import SIZE_OF_FFI_ARG, W_CTypeFunc from pypy.module._cffi_backend.ctypeprim import W_CTypePrimitiveSigned @@ -25,46 +25,39 @@ # we can cast to a plain VOIDP. As long as the object is not freed, # we can cast the VOIDP back to a W_CDataCallback in reveal_callback(). cdata = objectmodel.instantiate(W_CDataCallback, nonmovable=True) - gcref = rgc.cast_instance_to_gcref(cdata) - raw_cdata = rgc.hide_nonmovable_gcref(gcref) - cdata.__init__(space, ctype, w_callable, w_error, w_onerror, raw_cdata) + W_CDataCallback.__init__(cdata, space, ctype, + w_callable, w_error, w_onerror) return cdata def reveal_callback(raw_ptr): addr = rffi.cast(llmemory.Address, raw_ptr) gcref = rgc.reveal_gcref(addr) - return rgc.try_cast_gcref_to_instance(W_CDataCallback, gcref) + return rgc.try_cast_gcref_to_instance(W_ExternPython, gcref) class Closure(object): """This small class is here to have a __del__ outside any cycle.""" - ll_error = lltype.nullptr(rffi.CCHARP.TO) # set manually - def __init__(self, ptr): self.ptr = ptr def __del__(self): clibffi.closureHeap.free(rffi.cast(clibffi.FFI_CLOSUREP, self.ptr)) - if self.ll_error: - lltype.free(self.ll_error, flavor='raw') -class W_CDataCallback(W_CData): - _immutable_fields_ = ['key_pycode'] +class W_ExternPython(W_CData): + """Base class for W_CDataCallback, also used from call_python.py. + """ + decode_args_from_libffi = False w_onerror = None - def __init__(self, space, ctype, w_callable, w_error, w_onerror, - raw_cdata): - raw_closure = rffi.cast(rffi.CCHARP, clibffi.closureHeap.alloc()) - self._closure = Closure(raw_closure) - W_CData.__init__(self, space, raw_closure, ctype) + def __init__(self, space, cdata, ctype, w_callable, w_error, w_onerror): + W_CData.__init__(self, space, cdata, ctype) # if not space.is_true(space.callable(w_callable)): raise oefmt(space.w_TypeError, "expected a callable object, not %T", w_callable) self.w_callable = w_callable - self.key_pycode = space._try_fetch_pycode(w_callable) if not space.is_none(w_onerror): if not space.is_true(space.callable(w_onerror)): raise oefmt(space.w_TypeError, @@ -74,40 +67,22 @@ # fresult = self.getfunctype().ctitem size = fresult.size - if size > 0: - if fresult.is_primitive_integer and size < SIZE_OF_FFI_ARG: - size = SIZE_OF_FFI_ARG - self._closure.ll_error = lltype.malloc(rffi.CCHARP.TO, size, - flavor='raw', zero=True) - if not space.is_none(w_error): - convert_from_object_fficallback(fresult, self._closure.ll_error, - w_error) + if size < 0: + size = 0 + elif fresult.is_primitive_integer and size < SIZE_OF_FFI_ARG: + size = SIZE_OF_FFI_ARG + with lltype.scoped_alloc(rffi.CCHARP.TO, size, zero=True) as ll_error: + if not space.is_none(w_error): + convert_from_object_fficallback(fresult, ll_error, w_error, + self.decode_args_from_libffi) + self.error_string = rffi.charpsize2str(ll_error, size) # # We must setup the GIL here, in case the callback is invoked in # some other non-Pythonic thread. This is the same as cffi on - # CPython. + # CPython, or ctypes. if space.config.translation.thread: from pypy.module.thread.os_thread import setup_threads setup_threads(space) - # - cif_descr = self.getfunctype().cif_descr - if not cif_descr: - raise oefmt(space.w_NotImplementedError, - "%s: callback with unsupported argument or " - "return type or with '...'", self.getfunctype().name) - with self as ptr: - closure_ptr = rffi.cast(clibffi.FFI_CLOSUREP, ptr) - unique_id = rffi.cast(rffi.VOIDP, raw_cdata) - res = clibffi.c_ffi_prep_closure(closure_ptr, cif_descr.cif, - invoke_callback, - unique_id) - if rffi.cast(lltype.Signed, res) != clibffi.FFI_OK: - raise OperationError(space.w_SystemError, - space.wrap("libffi failed to build this callback")) - - def _repr_extra(self): - space = self.space - return 'calling ' + space.str_w(space.repr(self.w_callable)) def getfunctype(self): ctype = self.ctype @@ -117,43 +92,154 @@ space.wrap("expected a function ctype")) return ctype + def hide_object(self): + gcref = rgc.cast_instance_to_gcref(self) + raw = rgc.hide_nonmovable_gcref(gcref) + return rffi.cast(rffi.VOIDP, raw) + + def _repr_extra(self): + space = self.space + return 'calling ' + space.str_w(space.repr(self.w_callable)) + + def write_error_return_value(self, ll_res): + error_string = self.error_string + for i in range(len(error_string)): + ll_res[i] = error_string[i] + + def invoke(self, ll_res, ll_args): + space = self.space + must_leave = False + try: + must_leave = space.threadlocals.try_enter_thread(space) + self.py_invoke(ll_res, ll_args) + # + except Exception, e: + # oups! last-level attempt to recover. + try: + os.write(STDERR, "SystemError: callback raised ") + os.write(STDERR, str(e)) + os.write(STDERR, "\n") + except: + pass + self.write_error_return_value(ll_res) + if must_leave: + space.threadlocals.leave_thread(space) + + def py_invoke(self, ll_res, ll_args): + # For W_ExternPython only; overridden in W_CDataCallback. Note + # that the details of the two jitdrivers differ. For + # W_ExternPython, it depends on the identity of 'self', which + # means every @ffi.def_extern() gets its own machine code, + # which sounds reasonable here. Moreover, 'll_res' is ignored + # as it is always equal to 'll_args'. + jitdriver2.jit_merge_point(externpython=self, ll_args=ll_args) + self.do_invoke(ll_args, ll_args) + + def do_invoke(self, ll_res, ll_args): + space = self.space + extra_line = '' + try: + w_args = self.prepare_args_tuple(ll_args) + w_res = space.call(self.w_callable, w_args) + extra_line = "Trying to convert the result back to C:\n" + self.convert_result(ll_res, w_res) + except OperationError, e: + self.handle_applevel_exception(e, ll_res, extra_line) + @jit.unroll_safe - def invoke(self, ll_args): + def prepare_args_tuple(self, ll_args): space = self.space ctype = self.getfunctype() ctype = jit.promote(ctype) args_w = [] + decode_args_from_libffi = self.decode_args_from_libffi for i, farg in enumerate(ctype.fargs): - ll_arg = rffi.cast(rffi.CCHARP, ll_args[i]) + if decode_args_from_libffi: + ll_arg = rffi.cast(rffi.CCHARPP, ll_args)[i] + else: + ll_arg = rffi.ptradd(ll_args, 8 * i) + if farg.is_indirect_arg_for_call_python: + ll_arg = rffi.cast(rffi.CCHARPP, ll_arg)[0] args_w.append(farg.convert_to_object(ll_arg)) - return space.call(self.w_callable, space.newtuple(args_w)) + return space.newtuple(args_w) def convert_result(self, ll_res, w_res): fresult = self.getfunctype().ctitem - convert_from_object_fficallback(fresult, ll_res, w_res) + convert_from_object_fficallback(fresult, ll_res, w_res, + self.decode_args_from_libffi) def print_error(self, operr, extra_line): space = self.space operr.write_unraisable(space, "cffi callback ", self.w_callable, with_traceback=True, extra_line=extra_line) - def write_error_return_value(self, ll_res): - fresult = self.getfunctype().ctitem - if fresult.size > 0: - misc._raw_memcopy(self._closure.ll_error, ll_res, fresult.size) - keepalive_until_here(self) # to keep self._closure.ll_error alive + @jit.dont_look_inside + def handle_applevel_exception(self, e, ll_res, extra_line): + space = self.space + self.write_error_return_value(ll_res) + if self.w_onerror is None: + self.print_error(e, extra_line) + else: + try: + e.normalize_exception(space) + w_t = e.w_type + w_v = e.get_w_value(space) + w_tb = space.wrap(e.get_traceback()) + w_res = space.call_function(self.w_onerror, w_t, w_v, w_tb) + if not space.is_none(w_res): + self.convert_result(ll_res, w_res) + except OperationError, e2: + # double exception! print a double-traceback... + self.print_error(e, extra_line) # original traceback + e2.write_unraisable(space, '', with_traceback=True, + extra_line="\nDuring the call to 'onerror', " + "another exception occurred:\n\n") + + +class W_CDataCallback(W_ExternPython): + _immutable_fields_ = ['key_pycode'] + decode_args_from_libffi = True + + def __init__(self, space, ctype, w_callable, w_error, w_onerror): + raw_closure = rffi.cast(rffi.CCHARP, clibffi.closureHeap.alloc()) + self._closure = Closure(raw_closure) + W_ExternPython.__init__(self, space, raw_closure, ctype, + w_callable, w_error, w_onerror) + self.key_pycode = space._try_fetch_pycode(w_callable) + # + cif_descr = self.getfunctype().cif_descr + if not cif_descr: + raise oefmt(space.w_NotImplementedError, + "%s: callback with unsupported argument or " + "return type or with '...'", self.getfunctype().name) + with self as ptr: + closure_ptr = rffi.cast(clibffi.FFI_CLOSUREP, ptr) + unique_id = self.hide_object() + res = clibffi.c_ffi_prep_closure(closure_ptr, cif_descr.cif, + invoke_callback, + unique_id) + if rffi.cast(lltype.Signed, res) != clibffi.FFI_OK: + raise OperationError(space.w_SystemError, + space.wrap("libffi failed to build this callback")) + def py_invoke(self, ll_res, ll_args): + jitdriver1.jit_merge_point(callback=self, + ll_res=ll_res, + ll_args=ll_args) + self.do_invoke(ll_res, ll_args) -def convert_from_object_fficallback(fresult, ll_res, w_res): + +def convert_from_object_fficallback(fresult, ll_res, w_res, + encode_result_for_libffi): space = fresult.space - small_result = fresult.size < SIZE_OF_FFI_ARG - if small_result and isinstance(fresult, W_CTypeVoid): + if isinstance(fresult, W_CTypeVoid): if not space.is_w(w_res, space.w_None): raise OperationError(space.w_TypeError, space.wrap("callback with the return type 'void'" " must return None")) return # + small_result = encode_result_for_libffi and fresult.size < SIZE_OF_FFI_ARG if small_result and fresult.is_primitive_integer: # work work work around a libffi irregularity: for integer return # types we have to fill at least a complete 'ffi_arg'-sized result @@ -191,50 +277,30 @@ STDERR = 2 -@jit.dont_look_inside -def _handle_applevel_exception(callback, e, ll_res, extra_line): - space = callback.space - callback.write_error_return_value(ll_res) - if callback.w_onerror is None: - callback.print_error(e, extra_line) - else: - try: - e.normalize_exception(space) - w_t = e.w_type - w_v = e.get_w_value(space) - w_tb = space.wrap(e.get_traceback()) - w_res = space.call_function(callback.w_onerror, - w_t, w_v, w_tb) - if not space.is_none(w_res): - callback.convert_result(ll_res, w_res) - except OperationError, e2: - # double exception! print a double-traceback... - callback.print_error(e, extra_line) # original traceback - e2.write_unraisable(space, '', with_traceback=True, - extra_line="\nDuring the call to 'onerror', " - "another exception occurred:\n\n") +# jitdrivers, for both W_CDataCallback and W_ExternPython -def get_printable_location(key_pycode): +def get_printable_location1(key_pycode): if key_pycode is None: return 'cffi_callback ' return 'cffi_callback ' + key_pycode.get_repr() -jitdriver = jit.JitDriver(name='cffi_callback', - greens=['callback.key_pycode'], - reds=['ll_res', 'll_args', 'callback'], - get_printable_location=get_printable_location) - -def py_invoke_callback(callback, ll_res, ll_args): - jitdriver.jit_merge_point(callback=callback, ll_res=ll_res, ll_args=ll_args) - extra_line = '' - try: - w_res = callback.invoke(ll_args) - extra_line = "Trying to convert the result back to C:\n" - callback.convert_result(ll_res, w_res) - except OperationError, e: - _handle_applevel_exception(callback, e, ll_res, extra_line) +jitdriver1 = jit.JitDriver(name='cffi_callback', + greens=['callback.key_pycode'], + reds=['ll_res', 'll_args', 'callback'], + get_printable_location=get_printable_location1) + +def get_printable_location2(externpython): + with externpython as ptr: + externpy = rffi.cast(parse_c_type.PEXTERNPY, ptr) + return 'cffi_call_python ' + rffi.charp2str(externpy.c_name) + +jitdriver2 = jit.JitDriver(name='cffi_call_python', + greens=['externpython'], + reds=['ll_args'], + get_printable_location=get_printable_location2) + -def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): +def invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care ll_args - rffi.VOIDPP - pointer to array of pointers to args @@ -242,6 +308,7 @@ ll_userdata - a special structure which holds necessary information (what the real callback is for example), casted to VOIDP """ + cerrno._errno_after(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) ll_res = rffi.cast(rffi.CCHARP, ll_res) callback = reveal_callback(ll_userdata) if callback is None: @@ -254,27 +321,6 @@ # In this case, we don't even know how big ll_res is. Let's assume # it is just a 'ffi_arg', and store 0 there. misc._raw_memclear(ll_res, SIZE_OF_FFI_ARG) - return - # - space = callback.space - must_leave = False - try: - must_leave = space.threadlocals.try_enter_thread(space) - py_invoke_callback(callback, ll_res, ll_args) - # - except Exception, e: - # oups! last-level attempt to recover. - try: - os.write(STDERR, "SystemError: callback raised ") - os.write(STDERR, str(e)) - os.write(STDERR, "\n") - except: - pass - callback.write_error_return_value(ll_res) - if must_leave: - space.threadlocals.leave_thread(space) - -def invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): - cerrno._errno_after(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) - _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata) + else: + callback.invoke(ll_res, rffi.cast(rffi.CCHARP, ll_args)) cerrno._errno_before(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/cffi1_module.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/cffi1_module.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/cffi1_module.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/cffi1_module.py 2016-03-19 16:40:11.000000000 +0000 @@ -2,30 +2,36 @@ from pypy.interpreter.error import oefmt from pypy.interpreter.module import Module +from pypy.module import _cffi_backend from pypy.module._cffi_backend import parse_c_type from pypy.module._cffi_backend.ffi_obj import W_FFIObject from pypy.module._cffi_backend.lib_obj import W_LibObject VERSION_MIN = 0x2601 -VERSION_MAX = 0x26FF +VERSION_MAX = 0x27FF -VERSION_EXPORT = 0x0A02 +VERSION_EXPORT = 0x0A03 -initfunctype = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void)) +INITFUNCPTR = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void)) def load_cffi1_module(space, name, path, initptr): # This is called from pypy.module.cpyext.api.load_extension_module() - initfunc = rffi.cast(initfunctype, initptr) - with lltype.scoped_alloc(rffi.VOIDPP.TO, 2) as p: + from pypy.module._cffi_backend.call_python import get_ll_cffi_call_python + + initfunc = rffi.cast(INITFUNCPTR, initptr) + with lltype.scoped_alloc(rffi.VOIDPP.TO, 16, zero=True) as p: p[0] = rffi.cast(rffi.VOIDP, VERSION_EXPORT) + p[1] = rffi.cast(rffi.VOIDP, get_ll_cffi_call_python()) initfunc(p) version = rffi.cast(lltype.Signed, p[0]) if not (VERSION_MIN <= version <= VERSION_MAX): raise oefmt(space.w_ImportError, - "cffi extension module '%s' has unknown version %s", - name, hex(version)) + "cffi extension module '%s' uses an unknown version tag %s. " + "This module might need a more recent version of PyPy. " + "The current PyPy provides CFFI %s.", + name, hex(version), _cffi_backend.VERSION) src_ctx = rffi.cast(parse_c_type.PCTX, p[1]) ffi = W_FFIObject(space, src_ctx) @@ -35,7 +41,8 @@ w_name = space.wrap(name) module = Module(space, w_name) - module.setdictvalue(space, '__file__', space.wrap(path)) + if path is not None: + module.setdictvalue(space, '__file__', space.wrap(path)) module.setdictvalue(space, 'ffi', space.wrap(ffi)) module.setdictvalue(space, 'lib', space.wrap(lib)) w_modules_dict = space.sys.get('modules') diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/cffi_opcode.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/cffi_opcode.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/cffi_opcode.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/cffi_opcode.py 2016-03-19 16:40:11.000000000 +0000 @@ -54,6 +54,7 @@ OP_DLOPEN_FUNC = 35 OP_DLOPEN_CONST = 37 OP_GLOBAL_VAR_F = 39 +OP_EXTERN_PYTHON = 41 PRIM_VOID = 0 PRIM_BOOL = 1 diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/cglob.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/cglob.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/cglob.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/cglob.py 2016-03-19 16:40:11.000000000 +0000 @@ -3,6 +3,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend import newtype +from rpython.rlib import rgil from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -26,7 +27,9 @@ if not we_are_translated(): FNPTR = rffi.CCallback([], rffi.VOIDP) fetch_addr = rffi.cast(FNPTR, self.fetch_addr) + rgil.release() result = fetch_addr() + rgil.acquire() else: # careful in translated versions: we need to call fetch_addr, # but in a GIL-releasing way. The easiest is to invoke a diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/ctypefunc.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/ctypefunc.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/ctypefunc.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/ctypefunc.py 2016-03-19 16:40:11.000000000 +0000 @@ -63,7 +63,8 @@ space = self.space nargs_declared = len(self.fargs) fvarargs = [None] * len(args_w) - fvarargs[:nargs_declared] = self.fargs + for i in range(nargs_declared): + fvarargs[i] = self.fargs[i] for i in range(nargs_declared, len(args_w)): w_obj = args_w[i] if isinstance(w_obj, cdataobj.W_CData): @@ -422,7 +423,9 @@ exchange_offset += rffi.getintfield(self.atypes[i], 'c_size') # store the exchange data size - cif_descr.exchange_size = exchange_offset + # we also align it to the next multiple of 8, in an attempt to + # work around bugs(?) of libffi (see cffi issue #241) + cif_descr.exchange_size = self.align_arg(exchange_offset) def fb_extra_fields(self, cif_descr): cif_descr.abi = self.fabi diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/ctypeobj.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/ctypeobj.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/ctypeobj.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/ctypeobj.py 2016-03-19 16:40:11.000000000 +0000 @@ -22,6 +22,7 @@ cast_anything = False is_primitive_integer = False is_nonfunc_pointer_or_array = False + is_indirect_arg_for_call_python = False kind = "?" def __init__(self, space, size, name, name_position): diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/ctypeprim.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/ctypeprim.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/ctypeprim.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/ctypeprim.py 2016-03-19 16:40:11.000000000 +0000 @@ -424,6 +424,7 @@ class W_CTypePrimitiveLongDouble(W_CTypePrimitiveFloat): _attrs_ = [] + is_indirect_arg_for_call_python = True @jit.dont_look_inside def extra_repr(self, cdata): diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/ctypestruct.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/ctypestruct.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/ctypestruct.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/ctypestruct.py 2016-03-19 16:40:11.000000000 +0000 @@ -18,6 +18,7 @@ class W_CTypeStructOrUnion(W_CType): _immutable_fields_ = ['alignment?', '_fields_list?[*]', '_fields_dict?', '_custom_field_pos?', '_with_var_array?'] + is_indirect_arg_for_call_python = True # three possible states: # - "opaque": for opaque C structs; self.size < 0. diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/embedding.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/embedding.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/embedding.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/embedding.py 2016-03-19 16:40:11.000000000 +0000 @@ -0,0 +1,234 @@ +import os +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo + +from pypy.interpreter.error import OperationError, oefmt + +# ____________________________________________________________ + + +EMBED_VERSION_MIN = 0xB011 +EMBED_VERSION_MAX = 0xB0FF + +STDERR = 2 +INITSTRUCTPTR = lltype.Ptr(lltype.Struct('CFFI_INIT', + ('name', rffi.CCHARP), + ('func', rffi.VOIDP), + ('code', rffi.CCHARP))) + +def load_embedded_cffi_module(space, version, init_struct): + from pypy.module._cffi_backend.cffi1_module import load_cffi1_module + declare_c_function() # translation-time hint only: + # declare _cffi_carefully_make_gil() + # + version = rffi.cast(lltype.Signed, version) + if not (EMBED_VERSION_MIN <= version <= EMBED_VERSION_MAX): + raise oefmt(space.w_ImportError, + "cffi embedded module has got unknown version tag %s", + hex(version)) + # + if space.config.objspace.usemodules.thread: + from pypy.module.thread import os_thread + os_thread.setup_threads(space) + # + name = rffi.charp2str(init_struct.name) + load_cffi1_module(space, name, None, init_struct.func) + code = rffi.charp2str(init_struct.code) + compiler = space.createcompiler() + pycode = compiler.compile(code, "" % name, 'exec', 0) + w_globals = space.newdict(module=True) + space.setitem_str(w_globals, "__builtins__", space.wrap(space.builtin)) + pycode.exec_code(space, w_globals, w_globals) + + +class Global: + pass +glob = Global() + +def patch_sys(space): + # Annoying: CPython would just use the C-level std{in,out,err} as + # configured by the main application, for example in binary mode + # on Windows or with buffering turned off. We can't easily do the + # same. Instead, go for the safest bet (but possibly bad for + # performance) and open sys.std{in,out,err} unbuffered. On + # Windows I guess binary mode is a better default choice. + # + # XXX if needed, we could add support for a flag passed to + # pypy_init_embedded_cffi_module(). + if not glob.patched_sys: + space.appexec([], """(): + import os, sys + sys.stdin = sys.__stdin__ = os.fdopen(0, 'rb', 0) + sys.stdout = sys.__stdout__ = os.fdopen(1, 'wb', 0) + sys.stderr = sys.__stderr__ = os.fdopen(2, 'wb', 0) + """) + glob.patched_sys = True + + +def pypy_init_embedded_cffi_module(version, init_struct): + # called from __init__.py + name = "?" + try: + init_struct = rffi.cast(INITSTRUCTPTR, init_struct) + name = rffi.charp2str(init_struct.name) + # + space = glob.space + must_leave = False + try: + must_leave = space.threadlocals.try_enter_thread(space) + patch_sys(space) + load_embedded_cffi_module(space, version, init_struct) + res = 0 + except OperationError, operr: + operr.write_unraisable(space, "initialization of '%s'" % name, + with_traceback=True) + space.appexec([], r"""(): + import sys + sys.stderr.write('pypy version: %s.%s.%s\n' % + sys.pypy_version_info[:3]) + sys.stderr.write('sys.path: %r\n' % (sys.path,)) + """) + res = -1 + if must_leave: + space.threadlocals.leave_thread(space) + except Exception, e: + # oups! last-level attempt to recover. + try: + os.write(STDERR, "From initialization of '") + os.write(STDERR, name) + os.write(STDERR, "':\n") + os.write(STDERR, str(e)) + os.write(STDERR, "\n") + except: + pass + res = -1 + return rffi.cast(rffi.INT, res) + +# ____________________________________________________________ + +if os.name == 'nt': + + do_includes = r""" +#define _WIN32_WINNT 0x0501 +#include + +#define CFFI_INIT_HOME_PATH_MAX _MAX_PATH +static void _cffi_init(void); +static void _cffi_init_error(const char *msg, const char *extra); + +static int _cffi_init_home(char *output_home_path) +{ + HMODULE hModule = 0; + DWORD res; + + GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | + GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, + (LPCTSTR)&_cffi_init, &hModule); + + if (hModule == 0 ) { + _cffi_init_error("GetModuleHandleEx() failed", ""); + return -1; + } + res = GetModuleFileName(hModule, output_home_path, CFFI_INIT_HOME_PATH_MAX); + if (res >= CFFI_INIT_HOME_PATH_MAX) { + return -1; + } + return 0; +} + +static void _cffi_init_once(void) +{ + static LONG volatile lock = 0; + static int _init_called = 0; + + while (InterlockedCompareExchange(&lock, 1, 0) != 0) { + SwitchToThread(); /* spin loop */ + } + if (!_init_called) { + _cffi_init(); + _init_called = 1; + } + InterlockedCompareExchange(&lock, 0, 1); +} +""" + +else: + + do_includes = r""" +#include +#include + +#define CFFI_INIT_HOME_PATH_MAX PATH_MAX +static void _cffi_init(void); +static void _cffi_init_error(const char *msg, const char *extra); + +static int _cffi_init_home(char *output_home_path) +{ + Dl_info info; + dlerror(); /* reset */ + if (dladdr(&_cffi_init, &info) == 0) { + _cffi_init_error("dladdr() failed: ", dlerror()); + return -1; + } + if (realpath(info.dli_fname, output_home_path) == NULL) { + perror("realpath() failed"); + _cffi_init_error("realpath() failed", ""); + return -1; + } + return 0; +} + +static void _cffi_init_once(void) +{ + static pthread_once_t once_control = PTHREAD_ONCE_INIT; + pthread_once(&once_control, _cffi_init); +} +""" + +do_startup = do_includes + r""" +RPY_EXPORTED void rpython_startup_code(void); +RPY_EXPORTED int pypy_setup_home(char *, int); + +static unsigned char _cffi_ready = 0; +static const char *volatile _cffi_module_name; + +static void _cffi_init_error(const char *msg, const char *extra) +{ + fprintf(stderr, + "\nPyPy initialization failure when loading module '%s':\n%s%s\n", + _cffi_module_name, msg, extra); +} + +static void _cffi_init(void) +{ + char home[CFFI_INIT_HOME_PATH_MAX + 1]; + + rpython_startup_code(); + RPyGilAllocate(); + + if (_cffi_init_home(home) != 0) + return; + if (pypy_setup_home(home, 1) != 0) { + _cffi_init_error("pypy_setup_home() failed", ""); + return; + } + _cffi_ready = 1; +} + +RPY_EXPORTED +int pypy_carefully_make_gil(const char *name) +{ + /* For CFFI: this initializes the GIL and loads the home path. + It can be called completely concurrently from unrelated threads. + It assumes that we don't hold the GIL before (if it exists), and we + don't hold it afterwards. + */ + _cffi_module_name = name; /* not really thread-safe, but better than + nothing */ + _cffi_init_once(); + return (int)_cffi_ready - 1; +} +""" +eci = ExternalCompilationInfo(separate_module_sources=[do_startup]) + +declare_c_function = rffi.llexternal_use_eci(eci) diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/ffi_obj.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/ffi_obj.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/ffi_obj.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/ffi_obj.py 2016-03-19 16:40:11.000000000 +0000 @@ -10,7 +10,7 @@ from pypy.module._cffi_backend import parse_c_type, realize_c_type from pypy.module._cffi_backend import newtype, cerrno, ccallback, ctypearray from pypy.module._cffi_backend import ctypestruct, ctypeptr, handle -from pypy.module._cffi_backend import cbuffer, func, wrapper +from pypy.module._cffi_backend import cbuffer, func, wrapper, call_python from pypy.module._cffi_backend import cffi_opcode, allocator from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend.cdataobj import W_CData @@ -49,6 +49,8 @@ ACCEPT_CDATA = ACCEPT_CDATA w_gc_wref_remove = None + w_init_once_cache = None + jit_init_once_cache = None @jit.dont_look_inside def __init__(self, space, src_ctx): @@ -279,6 +281,30 @@ return cbuffer.buffer(self.space, w_cdata, size) + @unwrap_spec(w_name=WrappedDefault(None), + w_error=WrappedDefault(None), + w_onerror=WrappedDefault(None)) + def descr_def_extern(self, w_name, w_error, w_onerror): + """\ +A decorator. Attaches the decorated Python function to the C code +generated for the 'extern "Python" function of the same name. +Calling the C function will then invoke the Python function. + +Optional arguments: 'name' is the name of the C function, if +different from the Python function; and 'error' and 'onerror' +handle what occurs if the Python function raises an exception +(see the docs for details).""" + # + # returns a single-argument function + space = self.space + w_ffi = space.wrap(self) + w_decorator = call_python.get_generic_decorator(space) + return space.appexec([w_decorator, w_ffi, w_name, w_error, w_onerror], + """(decorator, ffi, name, error, onerror): + return lambda python_callable: decorator(ffi, python_callable, + name, error, onerror)""") + + @unwrap_spec(w_python_callable=WrappedDefault(None), w_error=WrappedDefault(None), w_onerror=WrappedDefault(None)) @@ -585,6 +611,67 @@ return w_result + def descr_init_once(self, w_func, w_tag): + """\ +init_once(function, tag): run function() once. More precisely, +'function()' is called the first time we see a given 'tag'. + +The return value of function() is remembered and returned by the current +and all future init_once() with the same tag. If init_once() is called +from multiple threads in parallel, all calls block until the execution +of function() is done. If function() raises an exception, it is +propagated and nothing is cached.""" + # + # first, a fast-path for the JIT which only works if the very + # same w_tag object is passed; then it turns into no code at all + try: + return self._init_once_elidable(w_tag) + except KeyError: + return self._init_once_slowpath(w_func, w_tag) + + @jit.elidable + def _init_once_elidable(self, w_tag): + jit_cache = self.jit_init_once_cache + if jit_cache is not None: + return jit_cache[w_tag] + else: + raise KeyError + + @jit.dont_look_inside + def _init_once_slowpath(self, w_func, w_tag): + space = self.space + w_cache = self.w_init_once_cache + if w_cache is None: + w_cache = self.space.newdict() + jit_cache = {} + self.w_init_once_cache = w_cache + self.jit_init_once_cache = jit_cache + # + # get the lock or result from cache[tag] + w_res = space.finditem(w_cache, w_tag) + if w_res is None: + w_res = W_InitOnceLock(space) + w_res = space.call_method(w_cache, 'setdefault', w_tag, w_res) + if not isinstance(w_res, W_InitOnceLock): + return w_res + with w_res.lock: + w_res = space.finditem(w_cache, w_tag) + if w_res is None or isinstance(w_res, W_InitOnceLock): + w_res = space.call_function(w_func) + self.jit_init_once_cache[w_tag] = w_res + space.setitem(w_cache, w_tag, w_res) + else: + # the real result was put in the dict while we were + # waiting for lock.__enter__() above + pass + return w_res + + +class W_InitOnceLock(W_Root): + def __init__(self, space): + self.lock = space.allocate_lock() + + @jit.dont_look_inside def make_plain_ffi_object(space, w_ffitype=None): if w_ffitype is None: @@ -635,12 +722,14 @@ buffer = interp2app(W_FFIObject.descr_buffer), callback = interp2app(W_FFIObject.descr_callback), cast = interp2app(W_FFIObject.descr_cast), + def_extern = interp2app(W_FFIObject.descr_def_extern), dlclose = interp2app(W_FFIObject.descr_dlclose), dlopen = interp2app(W_FFIObject.descr_dlopen), from_buffer = interp2app(W_FFIObject.descr_from_buffer), from_handle = interp2app(W_FFIObject.descr_from_handle), gc = interp2app(W_FFIObject.descr_gc), getctype = interp2app(W_FFIObject.descr_getctype), + init_once = interp2app(W_FFIObject.descr_init_once), integer_const = interp2app(W_FFIObject.descr_integer_const), memmove = interp2app(W_FFIObject.descr_memmove), new = interp2app(W_FFIObject.descr_new), diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/__init__.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/__init__.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/__init__.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/__init__.py 2016-03-19 16:40:11.000000000 +0000 @@ -1,8 +1,9 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload, clibffi +from rpython.rlib import rdynload, clibffi, entrypoint +from rpython.rtyper.lltypesystem import rffi -VERSION = "1.3.1" +VERSION = "1.5.2" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: @@ -65,6 +66,11 @@ if has_stdcall: interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL + def startup(self, space): + from pypy.module._cffi_backend import embedding + embedding.glob.space = space + embedding.glob.patched_sys = False + def get_dict_rtld_constants(): found = {} @@ -78,3 +84,11 @@ for _name, _value in get_dict_rtld_constants().items(): Module.interpleveldefs[_name] = 'space.wrap(%d)' % _value + + +# write this entrypoint() here, to make sure it is registered early enough +@entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], + c_name='pypy_init_embedded_cffi_module') +def pypy_init_embedded_cffi_module(version, init_struct): + from pypy.module._cffi_backend import embedding + return embedding.pypy_init_embedded_cffi_module(version, init_struct) diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/lib_obj.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/lib_obj.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/lib_obj.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/lib_obj.py 2016-03-19 16:40:11.000000000 +0000 @@ -167,6 +167,14 @@ w_ctfnptr = w_ct.unwrap_as_fnptr(self.ffi) w_result = W_CData(self.space, ptr, w_ctfnptr) # + # + elif op == cffi_opcode.OP_EXTERN_PYTHON: + # for reading 'lib.bar' where bar is declared + # as an extern "Python" + w_ct = realize_c_type.realize_c_type( + self.ffi, self.ctx.c_types, getarg(g.c_type_op)) + ptr = lltype.direct_fieldptr(g, 'c_size_or_direct_fn') + w_result = w_ct.convert_to_object(rffi.cast(rffi.CCHARP, ptr)) else: raise oefmt(space.w_NotImplementedError, "in lib_build_attr: op=%d", op) @@ -186,6 +194,8 @@ return self.dir1(ignore_global_vars=True) if is_getattr and attr == '__dict__': return self.full_dict_copy() + if is_getattr and attr == '__class__': + return self.space.type(self) if is_getattr and attr == '__name__': return self.descr_repr() raise oefmt(self.space.w_AttributeError, diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/newtype.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/newtype.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/newtype.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/newtype.py 2016-03-19 16:40:11.000000000 +0000 @@ -34,9 +34,11 @@ def _clean_cache(space): "NOT_RPYTHON" from pypy.module._cffi_backend.realize_c_type import RealizeCache + from pypy.module._cffi_backend.call_python import KeepaliveCache if hasattr(space, 'fromcache'): # not with the TinyObjSpace space.fromcache(UniqueCache).__init__(space) space.fromcache(RealizeCache).__init__(space) + space.fromcache(KeepaliveCache).__init__(space) # ____________________________________________________________ diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/parse_c_type.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/parse_c_type.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/parse_c_type.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/parse_c_type.py 2016-03-19 16:40:11.000000000 +0000 @@ -71,6 +71,12 @@ ('error_location', rffi.SIZE_T), ('error_message', rffi.CCHARP)) +PEXTERNPY = rffi.CStructPtr('_cffi_externpy_s', + ('name', rffi.CCHARP), + ('size_of_result', rffi.SIZE_T), + ('reserved1', rffi.VOIDP), + ('reserved2', rffi.VOIDP)) + GETCONST_S = rffi.CStruct('_cffi_getconst_s', ('value', rffi.ULONGLONG), ('ctx', PCTX), diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/src/parse_c_type.h pypy-5.0.1+dfsg/pypy/module/_cffi_backend/src/parse_c_type.h --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/src/parse_c_type.h 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/src/parse_c_type.h 2016-03-19 16:40:11.000000000 +0000 @@ -27,6 +27,7 @@ #define _CFFI_OP_DLOPEN_FUNC 35 #define _CFFI_OP_DLOPEN_CONST 37 #define _CFFI_OP_GLOBAL_VAR_F 39 +#define _CFFI_OP_EXTERN_PYTHON 41 #define _CFFI_PRIM_VOID 0 #define _CFFI_PRIM_BOOL 1 @@ -160,6 +161,12 @@ const char *error_message; }; +struct _cffi_externpy_s { + const char *name; + size_t size_of_result; + void *reserved1, *reserved2; +}; + #ifdef _CFFI_INTERNAL RPY_EXTERN int pypy_parse_c_type(struct _cffi_parse_info_s *info, const char *input); diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/test/_backend_test_c.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/test/_backend_test_c.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/test/_backend_test_c.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/test/_backend_test_c.py 2016-03-19 16:40:11.000000000 +0000 @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.3.1", ("This test_c.py file is for testing a version" +assert __version__ == "1.5.2", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/test/test_ffi_obj.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/test/test_ffi_obj.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/test/test_ffi_obj.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/test/test_ffi_obj.py 2016-03-19 16:40:11.000000000 +0000 @@ -12,6 +12,7 @@ p = ffi.new("int *") p[0] = -42 assert p[0] == -42 + assert type(ffi) is ffi.__class__ is _cffi1_backend.FFI def test_ffi_subclass(self): import _cffi_backend as _cffi1_backend @@ -22,6 +23,7 @@ assert foo.x == 42 p = foo.new("int *") assert p[0] == 0 + assert type(foo) is foo.__class__ is FOO def test_ffi_no_argument(self): import _cffi_backend as _cffi1_backend @@ -447,3 +449,30 @@ assert int(ffi.cast("_Bool", ffi.cast(type, 42))) == 1 assert int(ffi.cast("bool", ffi.cast(type, 42))) == 1 assert int(ffi.cast("_Bool", ffi.cast(type, 0))) == 0 + + def test_init_once(self): + import _cffi_backend as _cffi1_backend + def do_init(): + seen.append(1) + return 42 + ffi = _cffi1_backend.FFI() + seen = [] + for i in range(3): + res = ffi.init_once(do_init, "tag1") + assert res == 42 + assert seen == [1] + for i in range(3): + res = ffi.init_once(do_init, "tag2") + assert res == 42 + assert seen == [1, 1] + + def test_init_once_failure(self): + import _cffi_backend as _cffi1_backend + def do_init(): + seen.append(1) + raise ValueError + ffi = _cffi1_backend.FFI() + seen = [] + for i in range(5): + raises(ValueError, ffi.init_once, do_init, "tag") + assert seen == [1] * (i + 1) diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/test/test_recompiler.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/test/test_recompiler.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/test/test_recompiler.py 2015-11-19 19:21:43.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/test/test_recompiler.py 2016-03-19 16:40:11.000000000 +0000 @@ -16,8 +16,8 @@ from cffi import ffiplatform except ImportError: py.test.skip("system cffi module not found or older than 1.0.0") - if cffi.__version_info__ < (1, 2, 0): - py.test.skip("system cffi module needs to be at least 1.2.0") + if cffi.__version_info__ < (1, 4, 0): + py.test.skip("system cffi module needs to be at least 1.4.0") space.appexec([], """(): import _cffi_backend # force it to be initialized """) @@ -1029,6 +1029,7 @@ assert hasattr(lib, '__dict__') assert lib.__all__ == ['MYFOO', 'mybar'] # but not 'myvar' assert lib.__name__ == repr(lib) + assert lib.__class__ is type(lib) def test_macro_var_callback(self): ffi, lib = self.prepare( @@ -1376,3 +1377,252 @@ 'test_share_FILE_b', "FILE *barize(void) { return NULL; }") lib1.do_stuff(lib2.barize()) + + def w_StdErrCapture(self, fd=False): + if fd: + # note: this is for a case where CPython prints to sys.stderr + # too, but not PyPy + import os + class MiniStringIO(object): + def __init__(self): + self._rd, self._wr = os.pipe() + self._result = None + def getvalue(self): + if self._result is None: + os.close(self._wr) + self._result = os.read(self._rd, 4096) + os.close(self._rd) + # xxx hack away these lines + while self._result.startswith('[platform:execute]'): + self._result = ''.join( + self._result.splitlines(True)[1:]) + return self._result + class StdErrCapture(object): + def __enter__(self): + f = MiniStringIO() + self.old_fd2 = os.dup(2) + os.dup2(f._wr, 2) + return f + def __exit__(self, *args): + os.dup2(self.old_fd2, 2) + os.close(self.old_fd2) + return StdErrCapture() + else: + import sys + class MiniStringIO(object): + def __init__(self): + self._lst = [] + self.write = self._lst.append + def getvalue(self): + return ''.join(self._lst) + class StdErrCapture(object): + def __enter__(self): + self.old_stderr = sys.stderr + sys.stderr = f = MiniStringIO() + return f + def __exit__(self, *args): + sys.stderr = self.old_stderr + return StdErrCapture() + + def test_extern_python_1(self): + ffi, lib = self.prepare(""" + extern "Python" { + int bar(int, int); + void baz(int, int); + int bok(void); + void boz(void); + } + """, 'test_extern_python_1', "") + assert ffi.typeof(lib.bar) == ffi.typeof("int(*)(int, int)") + with self.StdErrCapture(fd=True) as f: + res = lib.bar(4, 5) + assert res == 0 + assert f.getvalue() == ( + "extern \"Python\": function bar() called, but no code was attached " + "to it yet with @ffi.def_extern(). Returning 0.\n") + + @ffi.def_extern("bar") + def my_bar(x, y): + seen.append(("Bar", x, y)) + return x * y + assert my_bar != lib.bar + seen = [] + res = lib.bar(6, 7) + assert seen == [("Bar", 6, 7)] + assert res == 42 + + def baz(x, y): + seen.append(("Baz", x, y)) + baz1 = ffi.def_extern()(baz) + assert baz1 is baz + seen = [] + baz(40L, 4L) + res = lib.baz(50L, 8L) + assert res is None + assert seen == [("Baz", 40L, 4L), ("Baz", 50, 8)] + assert type(seen[0][1]) is type(seen[0][2]) is long + assert type(seen[1][1]) is type(seen[1][2]) is int + + @ffi.def_extern(name="bok") + def bokk(): + seen.append("Bok") + return 42 + seen = [] + assert lib.bok() == 42 + assert seen == ["Bok"] + + @ffi.def_extern() + def boz(): + seen.append("Boz") + seen = [] + assert lib.boz() is None + assert seen == ["Boz"] + + def test_extern_python_bogus_name(self): + ffi, lib = self.prepare("int abc;", + 'test_extern_python_bogus_name', + "int abc;") + def fn(): + pass + raises(ffi.error, ffi.def_extern("unknown_name"), fn) + raises(ffi.error, ffi.def_extern("abc"), fn) + assert lib.abc == 0 + e = raises(ffi.error, ffi.def_extern("abc"), fn) + assert str(e.value) == ("ffi.def_extern('abc'): no 'extern \"Python\"' " + "function with this name") + e = raises(ffi.error, ffi.def_extern(), fn) + assert str(e.value) == ("ffi.def_extern('fn'): no 'extern \"Python\"' " + "function with this name") + # + raises(TypeError, ffi.def_extern(42), fn) + raises((TypeError, AttributeError), ffi.def_extern(), "foo") + class X: + pass + x = X() + x.__name__ = x + raises(TypeError, ffi.def_extern(), x) + + def test_extern_python_bogus_result_type(self): + ffi, lib = self.prepare("""extern "Python" void bar(int);""", + 'test_extern_python_bogus_result_type', + "") + @ffi.def_extern() + def bar(n): + return n * 10 + with self.StdErrCapture() as f: + res = lib.bar(321) + assert res is None + assert f.getvalue() == ( + "From cffi callback %r:\n" % (bar,) + + "Trying to convert the result back to C:\n" + "TypeError: callback with the return type 'void' must return None\n") + + def test_extern_python_redefine(self): + ffi, lib = self.prepare("""extern "Python" int bar(int);""", + 'test_extern_python_redefine', + "") + @ffi.def_extern() + def bar(n): + return n * 10 + assert lib.bar(42) == 420 + # + @ffi.def_extern() + def bar(n): + return -n + assert lib.bar(42) == -42 + + def test_extern_python_struct(self): + ffi, lib = self.prepare(""" + struct foo_s { int a, b, c; }; + extern "Python" int bar(int, struct foo_s, int); + extern "Python" { struct foo_s baz(int, int); + struct foo_s bok(void); } + """, 'test_extern_python_struct', + "struct foo_s { int a, b, c; };") + # + @ffi.def_extern() + def bar(x, s, z): + return x + s.a + s.b + s.c + z + res = lib.bar(1000, [1001, 1002, 1004], 1008) + assert res == 5015 + # + @ffi.def_extern() + def baz(x, y): + return [x + y, x - y, x * y] + res = lib.baz(1000, 42) + assert res.a == 1042 + assert res.b == 958 + assert res.c == 42000 + # + @ffi.def_extern() + def bok(): + return [10, 20, 30] + res = lib.bok() + assert [res.a, res.b, res.c] == [10, 20, 30] + + def test_extern_python_long_double(self): + ffi, lib = self.prepare(""" + extern "Python" int bar(int, long double, int); + extern "Python" long double baz(int, int); + extern "Python" long double bok(void); + """, 'test_extern_python_long_double', "") + # + @ffi.def_extern() + def bar(x, l, z): + seen.append((x, l, z)) + return 6 + seen = [] + lib.bar(10, 3.5, 20) + expected = ffi.cast("long double", 3.5) + assert repr(seen) == repr([(10, expected, 20)]) + # + @ffi.def_extern() + def baz(x, z): + assert x == 10 and z == 20 + return expected + res = lib.baz(10, 20) + assert repr(res) == repr(expected) + # + @ffi.def_extern() + def bok(): + return expected + res = lib.bok() + assert repr(res) == repr(expected) + + def test_extern_python_signature(self): + ffi, lib = self.prepare("", 'test_extern_python_signature', "") + raises(TypeError, ffi.def_extern(425), None) + raises(TypeError, ffi.def_extern, 'a', 'b', 'c', 'd') + + def test_extern_python_errors(self): + ffi, lib = self.prepare(""" + extern "Python" int bar(int); + """, 'test_extern_python_errors', "") + + seen = [] + def oops(*args): + seen.append(args) + + @ffi.def_extern(onerror=oops) + def bar(x): + return x + "" + assert lib.bar(10) == 0 + + @ffi.def_extern(name="bar", onerror=oops, error=-66) + def bar2(x): + return x + "" + assert lib.bar(10) == -66 + + assert len(seen) == 2 + exc, val, tb = seen[0] + assert exc is TypeError + assert isinstance(val, TypeError) + assert tb.tb_frame.f_code.co_name == "bar" + exc, val, tb = seen[1] + assert exc is TypeError + assert isinstance(val, TypeError) + assert tb.tb_frame.f_code.co_name == "bar2" + # + # a case where 'onerror' is not callable + raises(TypeError, ffi.def_extern(name='bar', onerror=42), + lambda x: x) diff -Nru pypy-4.0.1+dfsg/pypy/module/_cffi_backend/test/test_ztranslation.py pypy-5.0.1+dfsg/pypy/module/_cffi_backend/test/test_ztranslation.py --- pypy-4.0.1+dfsg/pypy/module/_cffi_backend/test/test_ztranslation.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_cffi_backend/test/test_ztranslation.py 2016-03-19 16:40:11.000000000 +0000 @@ -4,15 +4,18 @@ # side-effect: FORMAT_LONGDOUBLE must be built before test_checkmodule() from pypy.module._cffi_backend import misc -from pypy.module._cffi_backend import cffi1_module +from pypy.module._cffi_backend import embedding def test_checkmodule(): # prepare_file_argument() is not working without translating the _file # module too def dummy_prepare_file_argument(space, fileobj): - # call load_cffi1_module() too, from a random place like here - cffi1_module.load_cffi1_module(space, "foo", "foo", 42) + # call pypy_init_embedded_cffi_module() from a random place like here + # --- this calls load_cffi1_module(), too + embedding.pypy_init_embedded_cffi_module( + rffi.cast(rffi.INT, embedding.EMBED_VERSION_MIN), + 42) return lltype.nullptr(rffi.CCHARP.TO) old = ctypeptr.prepare_file_argument try: diff -Nru pypy-4.0.1+dfsg/pypy/module/_collections/app_defaultdict.py pypy-5.0.1+dfsg/pypy/module/_collections/app_defaultdict.py --- pypy-4.0.1+dfsg/pypy/module/_collections/app_defaultdict.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_collections/app_defaultdict.py 2016-03-19 16:40:11.000000000 +0000 @@ -12,6 +12,7 @@ class defaultdict(dict): __slots__ = ['default_factory'] + __module__ = 'collections' def __init__(self, *args, **kwds): if len(args) > 0: diff -Nru pypy-4.0.1+dfsg/pypy/module/_collections/test/test_defaultdict.py pypy-5.0.1+dfsg/pypy/module/_collections/test/test_defaultdict.py --- pypy-4.0.1+dfsg/pypy/module/_collections/test/test_defaultdict.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_collections/test/test_defaultdict.py 2016-03-19 16:40:11.000000000 +0000 @@ -14,6 +14,12 @@ d[5].append(44) assert l == [42, 43] and l2 == [44] + def test_module(self): + from _collections import defaultdict + assert repr(defaultdict) in ( + "", # on PyPy + "") # on CPython + def test_keyerror_without_factory(self): from _collections import defaultdict for d1 in [defaultdict(), defaultdict(None)]: diff -Nru pypy-4.0.1+dfsg/pypy/module/_continuation/interp_continuation.py pypy-5.0.1+dfsg/pypy/module/_continuation/interp_continuation.py --- pypy-4.0.1+dfsg/pypy/module/_continuation/interp_continuation.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_continuation/interp_continuation.py 2016-03-19 16:40:11.000000000 +0000 @@ -195,7 +195,7 @@ class SThread(StackletThread): def __init__(self, space, ec): - StackletThread.__init__(self, space.config) + StackletThread.__init__(self) self.space = space self.ec = ec # for unpickling diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/api.py pypy-5.0.1+dfsg/pypy/module/cpyext/api.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/api.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/api.py 2016-03-19 16:40:12.000000000 +0000 @@ -9,7 +9,7 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.annlowlevel import llhelper -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.gensupp import NameManager @@ -30,13 +30,13 @@ from rpython.rlib.rposix import is_valid_fd, validate_fd from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize -from rpython.rlib.exports import export_struct from pypy.module import exceptions from pypy.module.exceptions import interp_exceptions # CPython 2.4 compatibility from py.builtin import BaseException from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rlib import rawrefcount DEBUG_WRAPPER = True @@ -59,7 +59,7 @@ class CConfig: _compilation_info_ = ExternalCompilationInfo( include_dirs=include_dirs, - includes=['Python.h', 'stdarg.h'], + includes=['Python.h', 'stdarg.h', 'structmember.h'], compile_extra=['-DPy_BUILD_CORE'], ) @@ -124,11 +124,12 @@ METH_COEXIST METH_STATIC METH_CLASS METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS -Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE +Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES """.split() for name in constant_names: setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name)) udir.join('pypy_decl.h').write("/* Will be filled later */\n") +udir.join('pypy_structmember_decl.h').write("/* Will be filled later */\n") udir.join('pypy_macros.h').write("/* Will be filled later */\n") globals().update(rffi_platform.configure(CConfig_constants)) @@ -143,23 +144,24 @@ target.chmod(0444) # make the file read-only, to make sure that nobody # edits it by mistake -def copy_header_files(dstdir): +def copy_header_files(dstdir, copy_numpy_headers): # XXX: 20 lines of code to recursively copy a directory, really?? assert dstdir.check(dir=True) headers = include_dir.listdir('*.h') + include_dir.listdir('*.inl') - for name in ("pypy_decl.h", "pypy_macros.h"): + for name in ("pypy_decl.h", "pypy_macros.h", "pypy_structmember_decl.h"): headers.append(udir.join(name)) _copy_header_files(headers, dstdir) - try: - dstdir.mkdir('numpy') - except py.error.EEXIST: - pass - numpy_dstdir = dstdir / 'numpy' - - numpy_include_dir = include_dir / 'numpy' - numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') - _copy_header_files(numpy_headers, numpy_dstdir) + if copy_numpy_headers: + try: + dstdir.mkdir('numpy') + except py.error.EEXIST: + pass + numpy_dstdir = dstdir / 'numpy' + + numpy_include_dir = include_dir / 'numpy' + numpy_headers = numpy_include_dir.listdir('*.h') + numpy_include_dir.listdir('*.inl') + _copy_header_files(numpy_headers, numpy_dstdir) class NotSpecified(object): @@ -192,7 +194,7 @@ class ApiFunction: def __init__(self, argtypes, restype, callable, error=_NOT_SPECIFIED, - c_name=None, gil=None): + c_name=None, gil=None, result_borrowed=False): self.argtypes = argtypes self.restype = restype self.functype = lltype.Ptr(lltype.FuncType(argtypes, restype)) @@ -209,17 +211,15 @@ self.argnames = argnames[1:] assert len(self.argnames) == len(self.argtypes) self.gil = gil + self.result_borrowed = result_borrowed + # + def get_llhelper(space): + return llhelper(self.functype, self.get_wrapper(space)) + self.get_llhelper = get_llhelper def _freeze_(self): return True - def get_llhelper(self, space): - llh = getattr(self, '_llhelper', None) - if llh is None: - llh = llhelper(self.functype, self.get_wrapper(space)) - self._llhelper = llh - return llh - @specialize.memo() def get_wrapper(self, space): wrapper = getattr(self, '_wrapper', None) @@ -231,8 +231,8 @@ wrapper.c_name = cpyext_namespace.uniquename(self.c_name) return wrapper -def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, external=True, - gil=None): +def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header='pypy_decl.h', + gil=None, result_borrowed=False): """ Declares a function to be exported. - `argtypes`, `restype` are lltypes and describe the function signature. @@ -240,8 +240,8 @@ special value 'CANNOT_FAIL' (also when restype is Void) turns an eventual exception into a wrapped SystemError. Unwrapped exceptions also cause a SytemError. - - set `external` to False to get a C function pointer, but not exported by - the API headers. + - `header` is the header file to export the function in, Set to None to get + a C function pointer, but not exported by the API headers. - set `gil` to "acquire", "release" or "around" to acquire the GIL, release the GIL, or both """ @@ -261,16 +261,18 @@ rffi.cast(restype, 0) == 0) def decorate(func): + func._always_inline_ = 'try' func_name = func.func_name - if external: + if header is not None: c_name = None else: c_name = func_name api_function = ApiFunction(argtypes, restype, func, error, - c_name=c_name, gil=gil) + c_name=c_name, gil=gil, + result_borrowed=result_borrowed) func.api_func = api_function - if external: + if header is not None: assert func_name not in FUNCTIONS, ( "%s already registered" % func_name) @@ -278,6 +280,10 @@ raise ValueError("function %s has no return value for exceptions" % func) def make_unwrapper(catch_exception): + # ZZZ is this whole logic really needed??? It seems to be only + # for RPython code calling PyXxx() functions directly. I would + # think that usually directly calling the function is clean + # enough now names = api_function.argnames types_names_enum_ui = unrolling_iterable(enumerate( zip(api_function.argtypes, @@ -285,56 +291,58 @@ @specialize.ll() def unwrapper(space, *args): - from pypy.module.cpyext.pyobject import Py_DecRef - from pypy.module.cpyext.pyobject import make_ref, from_ref - from pypy.module.cpyext.pyobject import Reference + from pypy.module.cpyext.pyobject import Py_DecRef, is_pyobj + from pypy.module.cpyext.pyobject import from_ref, as_pyobj newargs = () - to_decref = [] + keepalives = () assert len(args) == len(api_function.argtypes) for i, (ARG, is_wrapped) in types_names_enum_ui: input_arg = args[i] if is_PyObject(ARG) and not is_wrapped: - # build a reference - if input_arg is None: - arg = lltype.nullptr(PyObject.TO) - elif isinstance(input_arg, W_Root): - ref = make_ref(space, input_arg) - to_decref.append(ref) - arg = rffi.cast(ARG, ref) + # build a 'PyObject *' (not holding a reference) + if not is_pyobj(input_arg): + keepalives += (input_arg,) + arg = rffi.cast(ARG, as_pyobj(space, input_arg)) else: - arg = input_arg + arg = rffi.cast(ARG, input_arg) elif is_PyObject(ARG) and is_wrapped: - # convert to a wrapped object - if input_arg is None: - arg = input_arg - elif isinstance(input_arg, W_Root): - arg = input_arg + # build a W_Root, possibly from a 'PyObject *' + if is_pyobj(input_arg): + arg = from_ref(space, input_arg) else: - try: - arg = from_ref(space, - rffi.cast(PyObject, input_arg)) - except TypeError, e: - err = OperationError(space.w_TypeError, - space.wrap( - "could not cast arg to PyObject")) - if not catch_exception: - raise err - state = space.fromcache(State) - state.set_exception(err) - if is_PyObject(restype): - return None - else: - return api_function.error_value + arg = input_arg + + ## ZZZ: for is_pyobj: + ## try: + ## arg = from_ref(space, + ## rffi.cast(PyObject, input_arg)) + ## except TypeError, e: + ## err = OperationError(space.w_TypeError, + ## space.wrap( + ## "could not cast arg to PyObject")) + ## if not catch_exception: + ## raise err + ## state = space.fromcache(State) + ## state.set_exception(err) + ## if is_PyObject(restype): + ## return None + ## else: + ## return api_function.error_value else: - # convert to a wrapped object + # arg is not declared as PyObject, no magic arg = input_arg newargs += (arg, ) - try: + if not catch_exception: + try: + res = func(space, *newargs) + finally: + keepalive_until_here(*keepalives) + else: + # non-rpython variant + assert not we_are_translated() try: res = func(space, *newargs) except OperationError, e: - if not catch_exception: - raise if not hasattr(api_function, "error_value"): raise state = space.fromcache(State) @@ -343,27 +351,20 @@ return None else: return api_function.error_value - if not we_are_translated(): - got_integer = isinstance(res, (int, long, float)) - assert got_integer == expect_integer,'got %r not integer' % res - if res is None: - return None - elif isinstance(res, Reference): - return res.get_wrapped(space) - else: - return res - finally: - for arg in to_decref: - Py_DecRef(space, arg) + # 'keepalives' is alive here (it's not rpython) + got_integer = isinstance(res, (int, long, float)) + assert got_integer == expect_integer, ( + 'got %r not integer' % (res,)) + return res unwrapper.func = func unwrapper.api_func = api_function - unwrapper._always_inline_ = 'try' return unwrapper unwrapper_catch = make_unwrapper(True) unwrapper_raise = make_unwrapper(False) - if external: + if header is not None: FUNCTIONS[func_name] = api_function + FUNCTIONS_BY_HEADER.setdefault(header, {})[func_name] = api_function INTERPLEVEL_API[func_name] = unwrapper_catch # used in tests return unwrapper_raise # used in 'normal' RPython code. return decorate @@ -382,6 +383,7 @@ INTERPLEVEL_API = {} FUNCTIONS = {} +FUNCTIONS_BY_HEADER = {} # These are C symbols which cpyext will export, but which are defined in .c # files somewhere in the implementation of cpyext (rather than being defined in @@ -442,8 +444,8 @@ TYPES = {} GLOBALS = { # this needs to include all prebuilt pto, otherwise segfaults occur '_Py_NoneStruct#': ('PyObject*', 'space.w_None'), - '_Py_TrueStruct#': ('PyObject*', 'space.w_True'), - '_Py_ZeroStruct#': ('PyObject*', 'space.w_False'), + '_Py_TrueStruct#': ('PyIntObject*', 'space.w_True'), + '_Py_ZeroStruct#': ('PyIntObject*', 'space.w_False'), '_Py_NotImplementedStruct#': ('PyObject*', 'space.w_NotImplemented'), '_Py_EllipsisObject#': ('PyObject*', 'space.w_Ellipsis'), 'PyDateTimeAPI': ('PyDateTime_CAPI*', 'None'), @@ -482,7 +484,6 @@ "PyComplex_Type": "space.w_complex", "PyByteArray_Type": "space.w_bytearray", "PyMemoryView_Type": "space.w_memoryview", - "PyArray_Type": "space.gettypeobject(W_NDimArray.typedef)", "PyBaseObject_Type": "space.w_object", 'PyNone_Type': 'space.type(space.w_None)', 'PyNotImplemented_Type': 'space.type(space.w_NotImplemented)', @@ -498,7 +499,7 @@ GLOBALS['%s#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) for cpyname in '''PyMethodObject PyListObject PyLongObject - PyDictObject PyTupleObject PyClassObject'''.split(): + PyDictObject PyClassObject'''.split(): FORWARD_DECLS.append('typedef struct { PyObject_HEAD } %s' % (cpyname, )) build_exported_objects() @@ -506,17 +507,21 @@ def get_structtype_for_ctype(ctype): from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr from pypy.module.cpyext.cdatetime import PyDateTime_CAPI + from pypy.module.cpyext.intobject import PyIntObject return {"PyObject*": PyObject, "PyTypeObject*": PyTypeObjectPtr, + "PyIntObject*": PyIntObject, "PyDateTime_CAPI*": lltype.Ptr(PyDateTime_CAPI)}[ctype] +# Note: as a special case, "PyObject" is the pointer type in RPython, +# corresponding to "PyObject *" in C. We do that only for PyObject. +# For example, "PyTypeObject" is the struct type even in RPython. PyTypeObject = lltype.ForwardReference() PyTypeObjectPtr = lltype.Ptr(PyTypeObject) -# It is important that these PyObjects are allocated in a raw fashion -# Thus we cannot save a forward pointer to the wrapped object -# So we need a forward and backward mapping in our State instance PyObjectStruct = lltype.ForwardReference() PyObject = lltype.Ptr(PyObjectStruct) -PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr)) +PyObjectFields = (("ob_refcnt", lltype.Signed), + ("ob_pypy_link", lltype.Signed), + ("ob_type", PyTypeObjectPtr)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) cpython_struct('PyObject', PyObjectFields, PyObjectStruct) PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) @@ -602,6 +607,7 @@ # Make the wrapper for the cases (1) and (2) def make_wrapper(space, callable, gil=None): "NOT_RPYTHON" + from rpython.rlib import rgil names = callable.api_func.argnames argtypes_enum_ui = unrolling_iterable(enumerate(zip(callable.api_func.argtypes, [name.startswith("w_") for name in names]))) @@ -612,18 +618,17 @@ @specialize.ll() def wrapper(*args): - from pypy.module.cpyext.pyobject import make_ref, from_ref - from pypy.module.cpyext.pyobject import Reference + from pypy.module.cpyext.pyobject import make_ref, from_ref, is_pyobj + from pypy.module.cpyext.pyobject import as_pyobj # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer if gil_acquire: - after = rffi.aroundstate.after - if after: - after() + rgil.acquire() rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value boxed_args = () + tb = None try: if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, callable, @@ -631,10 +636,8 @@ for i, (typ, is_wrapped) in argtypes_enum_ui: arg = args[i] if is_PyObject(typ) and is_wrapped: - if arg: - arg_conv = from_ref(space, rffi.cast(PyObject, arg)) - else: - arg_conv = None + assert is_pyobj(arg) + arg_conv = from_ref(space, rffi.cast(PyObject, arg)) else: arg_conv = arg boxed_args += (arg_conv, ) @@ -649,6 +652,7 @@ except BaseException, e: failed = True if not we_are_translated(): + tb = sys.exc_info()[2] message = repr(e) import traceback traceback.print_exc() @@ -667,34 +671,37 @@ retval = error_value elif is_PyObject(callable.api_func.restype): - if result is None: - retval = rffi.cast(callable.api_func.restype, - make_ref(space, None)) - elif isinstance(result, Reference): - retval = result.get_ref(space) - elif not rffi._isllptr(result): - retval = rffi.cast(callable.api_func.restype, - make_ref(space, result)) - else: + if is_pyobj(result): retval = result + else: + if result is not None: + if callable.api_func.result_borrowed: + retval = as_pyobj(space, result) + else: + retval = make_ref(space, result) + retval = rffi.cast(callable.api_func.restype, retval) + else: + retval = lltype.nullptr(PyObject.TO) elif callable.api_func.restype is not lltype.Void: retval = rffi.cast(callable.api_func.restype, result) except Exception, e: print 'Fatal error in cpyext, CPython compatibility layer, calling', callable.__name__ print 'Either report a bug or consider not using this particular extension' if not we_are_translated(): + if tb is None: + tb = sys.exc_info()[2] import traceback traceback.print_exc() - print str(e) + if sys.stdout == sys.__stdout__: + import pdb; pdb.post_mortem(tb) # we can't do much here, since we're in ctypes, swallow else: print str(e) pypy_debug_catch_fatal_exception() + assert False rffi.stackcounter.stacks_counter -= 1 if gil_release: - before = rffi.aroundstate.before - if before: - before() + rgil.release() return retval callable._always_inline_ = 'try' wrapper.__name__ = "wrapper for %r" % (callable, ) @@ -774,6 +781,8 @@ "NOT_RPYTHON" from pypy.module.cpyext.pyobject import make_ref + use_micronumpy = setup_micronumpy(space) + export_symbols = list(FUNCTIONS) + SYMBOLS_C + list(GLOBALS) from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() @@ -810,6 +819,7 @@ global_code = '\n'.join(global_objects) prologue = ("#include \n" + "#include \n" "#include \n") code = (prologue + struct_declaration_code + @@ -822,6 +832,19 @@ outputfilename=str(udir / "module_cache" / "pypyapi")) modulename = py.path.local(eci.libraries[-1]) + def dealloc_trigger(): + from pypy.module.cpyext.pyobject import decref + print 'dealloc_trigger...' + while True: + ob = rawrefcount.next_dead(PyObject) + if not ob: + break + print ob + decref(space, ob) + print 'dealloc_trigger DONE' + return "RETRY" + rawrefcount.init(dealloc_trigger) + run_bootstrap_functions(space) # load the bridge, and init structure @@ -831,8 +854,9 @@ space.fromcache(State).install_dll(eci) # populate static data + builder = space.fromcache(StaticObjectBuilder) for name, (typ, expr) in GLOBALS.iteritems(): - from pypy.module import cpyext + from pypy.module import cpyext # for the eval() below w_obj = eval(expr) if name.endswith('#'): name = name[:-1] @@ -855,7 +879,7 @@ assert False, "Unknown static pointer: %s %s" % (typ, name) ptr.value = ctypes.cast(ll2ctypes.lltype2ctypes(value), ctypes.c_void_p).value - elif typ in ('PyObject*', 'PyTypeObject*'): + elif typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*'): if name.startswith('PyPyExc_') or name.startswith('cpyexttestExc_'): # we already have the pointer in_dll = ll2ctypes.get_ctypes_type(PyObject).in_dll(bridge, name) @@ -864,17 +888,10 @@ # we have a structure, get its address in_dll = ll2ctypes.get_ctypes_type(PyObject.TO).in_dll(bridge, name) py_obj = ll2ctypes.ctypes2lltype(PyObject, ctypes.pointer(in_dll)) - from pypy.module.cpyext.pyobject import ( - track_reference, get_typedescr) - w_type = space.type(w_obj) - typedescr = get_typedescr(w_type.instancetypedef) - py_obj.c_ob_refcnt = 1 - py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr, - make_ref(space, w_type)) - typedescr.attach(space, py_obj, w_obj) - track_reference(space, py_obj, w_obj) + builder.prepare(py_obj, w_obj) else: assert False, "Unknown static object: %s %s" % (typ, name) + builder.attach_all() pypyAPI = ctypes.POINTER(ctypes.c_void_p).in_dll(bridge, 'pypyAPI') @@ -891,6 +908,53 @@ setup_init_functions(eci, translating=False) return modulename.new(ext='') + +class StaticObjectBuilder: + def __init__(self, space): + self.space = space + self.static_pyobjs = [] + self.static_objs_w = [] + self.cpyext_type_init = None + # + # add a "method" that is overridden in setup_library() + # ('self.static_pyobjs' is completely ignored in that case) + self.get_static_pyobjs = lambda: self.static_pyobjs + + def prepare(self, py_obj, w_obj): + "NOT_RPYTHON" + if py_obj: + py_obj.c_ob_refcnt = 1 # 1 for kept immortal + self.static_pyobjs.append(py_obj) + self.static_objs_w.append(w_obj) + + def attach_all(self): + # this is RPython, called once in pypy-c when it imports cpyext + from pypy.module.cpyext.pyobject import get_typedescr, make_ref + from pypy.module.cpyext.typeobject import finish_type_1, finish_type_2 + from pypy.module.cpyext.pyobject import track_reference + # + space = self.space + static_pyobjs = self.get_static_pyobjs() + static_objs_w = self.static_objs_w + for i in range(len(static_objs_w)): + track_reference(space, static_pyobjs[i], static_objs_w[i]) + # + self.cpyext_type_init = [] + for i in range(len(static_objs_w)): + py_obj = static_pyobjs[i] + w_obj = static_objs_w[i] + w_type = space.type(w_obj) + typedescr = get_typedescr(w_type.layout.typedef) + py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr, + make_ref(space, w_type)) + typedescr.attach(space, py_obj, w_obj) + cpyext_type_init = self.cpyext_type_init + self.cpyext_type_init = None + for pto, w_type in cpyext_type_init: + finish_type_1(space, pto) + finish_type_2(space, pto, w_type) + + def mangle_name(prefix, name): if name.startswith('Py'): return prefix + name[2:] @@ -935,7 +999,8 @@ "NOT_RPYTHON" # implement function callbacks and generate function decls functions = [] - pypy_decls = [] + decls = {} + pypy_decls = decls['pypy_decl.h'] = [] pypy_decls.append("#ifndef _PYPY_PYPY_DECL_H\n") pypy_decls.append("#define _PYPY_PYPY_DECL_H\n") pypy_decls.append("#ifndef PYPY_STANDALONE\n") @@ -948,17 +1013,23 @@ for decl in FORWARD_DECLS: pypy_decls.append("%s;" % (decl,)) - for name, func in sorted(FUNCTIONS.iteritems()): - restype, args = c_function_signature(db, func) - pypy_decls.append("PyAPI_FUNC(%s) %s(%s);" % (restype, name, args)) - if api_struct: - callargs = ', '.join('arg%d' % (i,) - for i in range(len(func.argtypes))) - if func.restype is lltype.Void: - body = "{ _pypyAPI.%s(%s); }" % (name, callargs) - else: - body = "{ return _pypyAPI.%s(%s); }" % (name, callargs) - functions.append('%s %s(%s)\n%s' % (restype, name, args, body)) + for header_name, header_functions in FUNCTIONS_BY_HEADER.iteritems(): + if header_name not in decls: + header = decls[header_name] = [] + else: + header = decls[header_name] + + for name, func in sorted(header_functions.iteritems()): + restype, args = c_function_signature(db, func) + header.append("PyAPI_FUNC(%s) %s(%s);" % (restype, name, args)) + if api_struct: + callargs = ', '.join('arg%d' % (i,) + for i in range(len(func.argtypes))) + if func.restype is lltype.Void: + body = "{ _pypyAPI.%s(%s); }" % (name, callargs) + else: + body = "{ return _pypyAPI.%s(%s); }" % (name, callargs) + functions.append('%s %s(%s)\n%s' % (restype, name, args, body)) for name in VA_TP_LIST: name_no_star = process_va_name(name) header = ('%s pypy_va_get_%s(va_list* vp)' % @@ -982,10 +1053,29 @@ pypy_decls.append("#endif /*PYPY_STANDALONE*/\n") pypy_decls.append("#endif /*_PYPY_PYPY_DECL_H*/\n") - pypy_decl_h = udir.join('pypy_decl.h') - pypy_decl_h.write('\n'.join(pypy_decls)) + for header_name, header_decls in decls.iteritems(): + decl_h = udir.join(header_name) + decl_h.write('\n'.join(header_decls)) return functions +separate_module_files = [source_dir / "varargwrapper.c", + source_dir / "pyerrors.c", + source_dir / "modsupport.c", + source_dir / "getargs.c", + source_dir / "abstract.c", + source_dir / "stringobject.c", + source_dir / "mysnprintf.c", + source_dir / "pythonrun.c", + source_dir / "sysmodule.c", + source_dir / "bufferobject.c", + source_dir / "cobject.c", + source_dir / "structseq.c", + source_dir / "capsule.c", + source_dir / "pysignals.c", + source_dir / "pythread.c", + source_dir / "missing.c", + ] + def build_eci(building_bridge, export_symbols, code): "NOT_RPYTHON" # Build code and get pointer to the structure @@ -1012,7 +1102,7 @@ if name.endswith('#'): structs.append('%s %s;' % (typ[:-1], name[:-1])) elif name.startswith('PyExc_'): - structs.append('extern PyTypeObject _%s;' % (name,)) + structs.append('PyTypeObject _%s;' % (name,)) structs.append('PyObject* %s = (PyObject*)&_%s;' % (name, name)) elif typ == 'PyDateTime_CAPI*': structs.append('%s %s = NULL;' % (typ, name)) @@ -1039,24 +1129,7 @@ eci = ExternalCompilationInfo( include_dirs=include_dirs, - separate_module_files=[source_dir / "varargwrapper.c", - source_dir / "pyerrors.c", - source_dir / "modsupport.c", - source_dir / "getargs.c", - source_dir / "abstract.c", - source_dir / "stringobject.c", - source_dir / "mysnprintf.c", - source_dir / "pythonrun.c", - source_dir / "sysmodule.c", - source_dir / "bufferobject.c", - source_dir / "cobject.c", - source_dir / "structseq.c", - source_dir / "capsule.c", - source_dir / "pysignals.c", - source_dir / "pythread.c", - source_dir / "ndarrayobject.c", - source_dir / "missing.c", - ], + separate_module_files= separate_module_files, separate_module_sources=separate_module_sources, compile_extra=compile_extra, **kwds @@ -1064,12 +1137,22 @@ return eci +def setup_micronumpy(space): + use_micronumpy = space.config.objspace.usemodules.micronumpy + if not use_micronumpy: + return use_micronumpy + # import to register api functions by side-effect + import pypy.module.cpyext.ndarrayobject + global GLOBALS, SYMBOLS_C, separate_module_files + GLOBALS["PyArray_Type#"]= ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") + SYMBOLS_C += ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS'] + separate_module_files.append(source_dir / "ndarrayobject.c") + return use_micronumpy def setup_library(space): "NOT_RPYTHON" - from pypy.module.cpyext.pyobject import make_ref - - export_symbols = list(FUNCTIONS) + SYMBOLS_C + list(GLOBALS) + use_micronumpy = setup_micronumpy(space) + export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS) from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() @@ -1085,22 +1168,37 @@ run_bootstrap_functions(space) setup_va_functions(eci) - # populate static data - for name, (typ, expr) in GLOBALS.iteritems(): - name = name.replace("#", "") - if name.startswith('PyExc_'): + # emit uninitialized static data + builder = space.fromcache(StaticObjectBuilder) + lines = ['PyObject *pypy_static_pyobjs[] = {\n'] + include_lines = ['RPY_EXTERN PyObject *pypy_static_pyobjs[];\n'] + for name, (typ, expr) in sorted(GLOBALS.items()): + if name.endswith('#'): + assert typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*') + typ, name = typ[:-1], name[:-1] + elif name.startswith('PyExc_'): + typ = 'PyTypeObject' name = '_' + name - from pypy.module import cpyext - w_obj = eval(expr) - if typ in ('PyObject*', 'PyTypeObject*'): - struct_ptr = make_ref(space, w_obj) elif typ == 'PyDateTime_CAPI*': continue else: assert False, "Unknown static data: %s %s" % (typ, name) - struct = rffi.cast(get_structtype_for_ctype(typ), struct_ptr)._obj - struct._compilation_info = eci - export_struct(name, struct) + + from pypy.module import cpyext # for the eval() below + w_obj = eval(expr) + builder.prepare(None, w_obj) + lines.append('\t(PyObject *)&%s,\n' % (name,)) + include_lines.append('RPY_EXPORTED %s %s;\n' % (typ, name)) + + lines.append('};\n') + eci2 = CConfig._compilation_info_.merge(ExternalCompilationInfo( + separate_module_sources = [''.join(lines)], + post_include_bits = [''.join(include_lines)], + )) + # override this method to return a pointer to this C array directly + builder.get_static_pyobjs = rffi.CExternVariable( + PyObjectP, 'pypy_static_pyobjs', eci2, c_type='PyObject **', + getter_only=True, declare_as_extern=False) for name, func in FUNCTIONS.iteritems(): newname = mangle_name('PyPy', name) or name @@ -1109,7 +1207,11 @@ setup_init_functions(eci, translating=True) trunk_include = pypydir.dirpath() / 'include' - copy_header_files(trunk_include) + copy_header_files(trunk_include, use_micronumpy) + +def init_static_data_translated(space): + builder = space.fromcache(StaticObjectBuilder) + builder.attach_all() def _load_from_cffi(space, name, path, initptr): from pypy.module._cffi_backend import cffi1_module @@ -1193,22 +1295,18 @@ @specialize.ll() def generic_cpy_call(space, func, *args): FT = lltype.typeOf(func).TO - return make_generic_cpy_call(FT, True, False)(space, func, *args) - -@specialize.ll() -def generic_cpy_call_dont_decref(space, func, *args): - FT = lltype.typeOf(func).TO - return make_generic_cpy_call(FT, False, False)(space, func, *args) + return make_generic_cpy_call(FT, False)(space, func, *args) @specialize.ll() def generic_cpy_call_expect_null(space, func, *args): FT = lltype.typeOf(func).TO - return make_generic_cpy_call(FT, True, True)(space, func, *args) + return make_generic_cpy_call(FT, True)(space, func, *args) @specialize.memo() -def make_generic_cpy_call(FT, decref_args, expect_null): +def make_generic_cpy_call(FT, expect_null): from pypy.module.cpyext.pyobject import make_ref, from_ref, Py_DecRef - from pypy.module.cpyext.pyobject import RefcountState + from pypy.module.cpyext.pyobject import is_pyobj, as_pyobj + from pypy.module.cpyext.pyobject import get_w_obj_and_decref from pypy.module.cpyext.pyerrors import PyErr_Occurred unrolling_arg_types = unrolling_iterable(enumerate(FT.ARGS)) RESULT_TYPE = FT.RESULT @@ -1236,65 +1334,49 @@ @specialize.ll() def generic_cpy_call(space, func, *args): boxed_args = () - to_decref = [] + keepalives = () assert len(args) == len(FT.ARGS) for i, ARG in unrolling_arg_types: arg = args[i] if is_PyObject(ARG): - if arg is None: - boxed_args += (lltype.nullptr(PyObject.TO),) - elif isinstance(arg, W_Root): - ref = make_ref(space, arg) - boxed_args += (ref,) - if decref_args: - to_decref.append(ref) - else: - boxed_args += (arg,) - else: - boxed_args += (arg,) + if not is_pyobj(arg): + keepalives += (arg,) + arg = as_pyobj(space, arg) + boxed_args += (arg,) try: - # create a new container for borrowed references - state = space.fromcache(RefcountState) - old_container = state.swap_borrow_container(None) - try: - # Call the function - result = call_external_function(func, *boxed_args) - finally: - state.swap_borrow_container(old_container) - - if is_PyObject(RESULT_TYPE): - if result is None: - ret = result - elif isinstance(result, W_Root): - ret = result + # Call the function + result = call_external_function(func, *boxed_args) + finally: + keepalive_until_here(*keepalives) + + if is_PyObject(RESULT_TYPE): + if not is_pyobj(result): + ret = result + else: + # The object reference returned from a C function + # that is called from Python must be an owned reference + # - ownership is transferred from the function to its caller. + if result: + ret = get_w_obj_and_decref(space, result) else: - ret = from_ref(space, result) - # The object reference returned from a C function - # that is called from Python must be an owned reference - # - ownership is transferred from the function to its caller. - if result: - Py_DecRef(space, result) - - # Check for exception consistency - has_error = PyErr_Occurred(space) is not None - has_result = ret is not None - if has_error and has_result: - raise OperationError(space.w_SystemError, space.wrap( - "An exception was set, but function returned a value")) - elif not expect_null and not has_error and not has_result: - raise OperationError(space.w_SystemError, space.wrap( - "Function returned a NULL result without setting an exception")) - - if has_error: - state = space.fromcache(State) - state.check_and_raise_exception() + ret = None - return ret - return result - finally: - if decref_args: - for ref in to_decref: - Py_DecRef(space, ref) - return generic_cpy_call + # Check for exception consistency + has_error = PyErr_Occurred(space) is not None + has_result = ret is not None + if has_error and has_result: + raise OperationError(space.w_SystemError, space.wrap( + "An exception was set, but function returned a value")) + elif not expect_null and not has_error and not has_result: + raise OperationError(space.w_SystemError, space.wrap( + "Function returned a NULL result without setting an exception")) + + if has_error: + state = space.fromcache(State) + state.check_and_raise_exception() + return ret + return result + + return generic_cpy_call diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/bufferobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/bufferobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/bufferobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/bufferobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -25,7 +25,7 @@ @bootstrap_function def init_bufferobject(space): "Type description of PyBufferObject" - make_typedescr(space.w_buffer.instancetypedef, + make_typedescr(space.w_buffer.layout.typedef, basestruct=PyBufferObject.TO, attach=buffer_attach, dealloc=buffer_dealloc, @@ -73,7 +73,7 @@ "Don't know how to realize a buffer")) -@cpython_api([PyObject], lltype.Void, external=False) +@cpython_api([PyObject], lltype.Void, header=None) def buffer_dealloc(space, py_obj): py_buf = rffi.cast(PyBufferObject, py_obj) if py_buf.c_b_base: diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/bytesobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/bytesobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/bytesobject.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/bytesobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,320 @@ +from pypy.interpreter.error import OperationError +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, cpython_struct, bootstrap_function, build_type_checkers, + PyObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) +from pypy.module.cpyext.pyerrors import PyErr_BadArgument +from pypy.module.cpyext.pyobject import ( + PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) + +## +## Implementation of PyStringObject +## ================================ +## +## The problem +## ----------- +## +## PyString_AsString() must return a (non-movable) pointer to the underlying +## buffer, whereas pypy strings are movable. C code may temporarily store +## this address and use it, as long as it owns a reference to the PyObject. +## There is no "release" function to specify that the pointer is not needed +## any more. +## +## Also, the pointer may be used to fill the initial value of string. This is +## valid only when the string was just allocated, and is not used elsewhere. +## +## Solution +## -------- +## +## PyStringObject contains two additional members: the size and a pointer to a +## char buffer; it may be NULL. +## +## - A string allocated by pypy will be converted into a PyStringObject with a +## NULL buffer. The first time PyString_AsString() is called, memory is +## allocated (with flavor='raw') and content is copied. +## +## - A string allocated with PyString_FromStringAndSize(NULL, size) will +## allocate a PyStringObject structure, and a buffer with the specified +## size, but the reference won't be stored in the global map; there is no +## corresponding object in pypy. When from_ref() or Py_INCREF() is called, +## the pypy string is created, and added to the global map of tracked +## objects. The buffer is then supposed to be immutable. +## +## - _PyString_Resize() works only on not-yet-pypy'd strings, and returns a +## similar object. +## +## - PyString_Size() doesn't need to force the object. +## +## - There could be an (expensive!) check in from_ref() that the buffer still +## corresponds to the pypy gc-managed string. +## + +PyStringObjectStruct = lltype.ForwardReference() +PyStringObject = lltype.Ptr(PyStringObjectStruct) +PyStringObjectFields = PyObjectFields + \ + (("buffer", rffi.CCHARP), ("size", Py_ssize_t)) +cpython_struct("PyStringObject", PyStringObjectFields, PyStringObjectStruct) + +@bootstrap_function +def init_stringobject(space): + "Type description of PyStringObject" + make_typedescr(space.w_str.layout.typedef, + basestruct=PyStringObject.TO, + attach=string_attach, + dealloc=string_dealloc, + realize=string_realize) + +PyString_Check, PyString_CheckExact = build_type_checkers("String", "w_str") + +def new_empty_str(space, length): + """ + Allocate a PyStringObject and its buffer, but without a corresponding + interpreter object. The buffer may be mutated, until string_realize() is + called. Refcount of the result is 1. + """ + typedescr = get_typedescr(space.w_str.layout.typedef) + py_obj = typedescr.allocate(space, space.w_str) + py_str = rffi.cast(PyStringObject, py_obj) + + buflen = length + 1 + py_str.c_size = length + py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen, + flavor='raw', zero=True, + add_memory_pressure=True) + return py_str + +def string_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyStringObject with the given string object. The + buffer must not be modified. + """ + py_str = rffi.cast(PyStringObject, py_obj) + py_str.c_size = len(space.str_w(w_obj)) + py_str.c_buffer = lltype.nullptr(rffi.CCHARP.TO) + +def string_realize(space, py_obj): + """ + Creates the string in the interpreter. The PyStringObject buffer must not + be modified after this call. + """ + py_str = rffi.cast(PyStringObject, py_obj) + s = rffi.charpsize2str(py_str.c_buffer, py_str.c_size) + w_obj = space.wrap(s) + track_reference(space, py_obj, w_obj) + return w_obj + +@cpython_api([PyObject], lltype.Void, header=None) +def string_dealloc(space, py_obj): + """Frees allocated PyStringObject resources. + """ + py_str = rffi.cast(PyStringObject, py_obj) + if py_str.c_buffer: + lltype.free(py_str.c_buffer, flavor="raw") + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + +#_______________________________________________________________________ + +@cpython_api([CONST_STRING, Py_ssize_t], PyObject) +def PyString_FromStringAndSize(space, char_p, length): + if char_p: + s = rffi.charpsize2str(char_p, length) + return make_ref(space, space.wrap(s)) + else: + return rffi.cast(PyObject, new_empty_str(space, length)) + +@cpython_api([CONST_STRING], PyObject) +def PyString_FromString(space, char_p): + s = rffi.charp2str(char_p) + return space.wrap(s) + +@cpython_api([PyObject], rffi.CCHARP, error=0) +def PyString_AsString(space, ref): + if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: + pass # typecheck returned "ok" without forcing 'ref' at all + elif not PyString_Check(space, ref): # otherwise, use the alternate way + raise OperationError(space.w_TypeError, space.wrap( + "PyString_AsString only support strings")) + ref_str = rffi.cast(PyStringObject, ref) + if not ref_str.c_buffer: + # copy string buffer + w_str = from_ref(space, ref) + s = space.str_w(w_str) + ref_str.c_buffer = rffi.str2charp(s) + return ref_str.c_buffer + +@cpython_api([PyObject, rffi.CCHARPP, rffi.CArrayPtr(Py_ssize_t)], rffi.INT_real, error=-1) +def PyString_AsStringAndSize(space, ref, buffer, length): + if not PyString_Check(space, ref): + raise OperationError(space.w_TypeError, space.wrap( + "PyString_AsStringAndSize only support strings")) + ref_str = rffi.cast(PyStringObject, ref) + if not ref_str.c_buffer: + # copy string buffer + w_str = from_ref(space, ref) + s = space.str_w(w_str) + ref_str.c_buffer = rffi.str2charp(s) + buffer[0] = ref_str.c_buffer + if length: + length[0] = ref_str.c_size + else: + i = 0 + while ref_str.c_buffer[i] != '\0': + i += 1 + if i != ref_str.c_size: + raise OperationError(space.w_TypeError, space.wrap( + "expected string without null bytes")) + return 0 + +@cpython_api([PyObject], Py_ssize_t, error=-1) +def PyString_Size(space, ref): + if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: + ref = rffi.cast(PyStringObject, ref) + return ref.c_size + else: + w_obj = from_ref(space, ref) + return space.len_w(w_obj) + +@cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) +def _PyString_Resize(space, ref, newsize): + """A way to resize a string object even though it is "immutable". Only use this to + build up a brand new string object; don't use this if the string may already be + known in other parts of the code. It is an error to call this function if the + refcount on the input string object is not one. Pass the address of an existing + string object as an lvalue (it may be written into), and the new size desired. + On success, *string holds the resized string object and 0 is returned; + the address in *string may differ from its input value. If the reallocation + fails, the original string object at *string is deallocated, *string is + set to NULL, a memory exception is set, and -1 is returned. + """ + # XXX always create a new string so far + py_str = rffi.cast(PyStringObject, ref[0]) + if not py_str.c_buffer: + raise OperationError(space.w_SystemError, space.wrap( + "_PyString_Resize called on already created string")) + try: + py_newstr = new_empty_str(space, newsize) + except MemoryError: + Py_DecRef(space, ref[0]) + ref[0] = lltype.nullptr(PyObject.TO) + raise + to_cp = newsize + oldsize = py_str.c_size + if oldsize < newsize: + to_cp = oldsize + for i in range(to_cp): + py_newstr.c_buffer[i] = py_str.c_buffer[i] + Py_DecRef(space, ref[0]) + ref[0] = rffi.cast(PyObject, py_newstr) + return 0 + +@cpython_api([PyObject, PyObject], rffi.INT, error=CANNOT_FAIL) +def _PyString_Eq(space, w_str1, w_str2): + return space.eq_w(w_str1, w_str2) + +@cpython_api([PyObjectP, PyObject], lltype.Void) +def PyString_Concat(space, ref, w_newpart): + """Create a new string object in *string containing the contents of newpart + appended to string; the caller will own the new reference. The reference to + the old value of string will be stolen. If the new string cannot be created, + the old reference to string will still be discarded and the value of + *string will be set to NULL; the appropriate exception will be set.""" + + if not ref[0]: + return + + if w_newpart is None or not PyString_Check(space, ref[0]) or \ + not PyString_Check(space, w_newpart): + Py_DecRef(space, ref[0]) + ref[0] = lltype.nullptr(PyObject.TO) + return + w_str = from_ref(space, ref[0]) + w_newstr = space.add(w_str, w_newpart) + Py_DecRef(space, ref[0]) + ref[0] = make_ref(space, w_newstr) + +@cpython_api([PyObjectP, PyObject], lltype.Void) +def PyString_ConcatAndDel(space, ref, newpart): + """Create a new string object in *string containing the contents of newpart + appended to string. This version decrements the reference count of newpart.""" + PyString_Concat(space, ref, newpart) + Py_DecRef(space, newpart) + +@cpython_api([PyObject, PyObject], PyObject) +def PyString_Format(space, w_format, w_args): + """Return a new string object from format and args. Analogous to format % + args. The args argument must be a tuple.""" + return space.mod(w_format, w_args) + +@cpython_api([CONST_STRING], PyObject) +def PyString_InternFromString(space, string): + """A combination of PyString_FromString() and + PyString_InternInPlace(), returning either a new string object that has + been interned, or a new ("owned") reference to an earlier interned string + object with the same value.""" + s = rffi.charp2str(string) + return space.new_interned_str(s) + +@cpython_api([PyObjectP], lltype.Void) +def PyString_InternInPlace(space, string): + """Intern the argument *string in place. The argument must be the + address of a pointer variable pointing to a Python string object. + If there is an existing interned string that is the same as + *string, it sets *string to it (decrementing the reference count + of the old string object and incrementing the reference count of + the interned string object), otherwise it leaves *string alone and + interns it (incrementing its reference count). (Clarification: + even though there is a lot of talk about reference counts, think + of this function as reference-count-neutral; you own the object + after the call if and only if you owned it before the call.) + + This function is not available in 3.x and does not have a PyBytes + alias.""" + w_str = from_ref(space, string[0]) + w_str = space.new_interned_w_str(w_str) + Py_DecRef(space, string[0]) + string[0] = make_ref(space, w_str) + +@cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) +def PyString_AsEncodedObject(space, w_str, encoding, errors): + """Encode a string object using the codec registered for encoding and return + the result as Python object. encoding and errors have the same meaning as + the parameters of the same name in the string encode() method. The codec to + be used is looked up using the Python codec registry. Return NULL if an + exception was raised by the codec. + + This function is not available in 3.x and does not have a PyBytes alias.""" + if not PyString_Check(space, w_str): + PyErr_BadArgument(space) + + w_encoding = w_errors = None + if encoding: + w_encoding = space.wrap(rffi.charp2str(encoding)) + if errors: + w_errors = space.wrap(rffi.charp2str(errors)) + return space.call_method(w_str, 'encode', w_encoding, w_errors) + +@cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) +def PyString_AsDecodedObject(space, w_str, encoding, errors): + """Decode a string object by passing it to the codec registered + for encoding and return the result as Python object. encoding and + errors have the same meaning as the parameters of the same name in + the string encode() method. The codec to be used is looked up + using the Python codec registry. Return NULL if an exception was + raised by the codec. + + This function is not available in 3.x and does not have a PyBytes alias.""" + if not PyString_Check(space, w_str): + PyErr_BadArgument(space) + + w_encoding = w_errors = None + if encoding: + w_encoding = space.wrap(rffi.charp2str(encoding)) + if errors: + w_errors = space.wrap(rffi.charp2str(errors)) + return space.call_method(w_str, "decode", w_encoding, w_errors) + +@cpython_api([PyObject, PyObject], PyObject) +def _PyString_Join(space, w_sep, w_seq): + return space.call_method(w_sep, 'join', w_seq) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/complexobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/complexobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/complexobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/complexobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -43,7 +43,7 @@ # lltype does not handle functions returning a structure. This implements a # helper function, which takes as argument a reference to the return value. -@cpython_api([PyObject, Py_complex_ptr], lltype.Void) +@cpython_api([PyObject, Py_complex_ptr], rffi.INT_real, error=-1) def _PyComplex_AsCComplex(space, w_obj, result): """Return the Py_complex value of the complex number op. @@ -60,7 +60,7 @@ # if the above did not work, interpret obj as a float giving the # real part of the result, and fill in the imaginary part as 0. result.c_real = PyFloat_AsDouble(space, w_obj) # -1 on failure - return + return 0 if not PyComplex_Check(space, w_obj): raise OperationError(space.w_TypeError, space.wrap( @@ -69,3 +69,4 @@ assert isinstance(w_obj, W_ComplexObject) result.c_real = w_obj.realval result.c_imag = w_obj.imagval + return 0 diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/dictobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/dictobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/dictobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/dictobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -2,8 +2,7 @@ from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, build_type_checkers, Py_ssize_t, Py_ssize_tP, CONST_STRING) -from pypy.module.cpyext.pyobject import PyObject, PyObjectP, borrow_from -from pypy.module.cpyext.pyobject import RefcountState +from pypy.module.cpyext.pyobject import PyObject, PyObjectP, as_pyobj from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.interpreter.error import OperationError from rpython.rlib.objectmodel import specialize @@ -14,13 +13,17 @@ PyDict_Check, PyDict_CheckExact = build_type_checkers("Dict") -@cpython_api([PyObject, PyObject], PyObject, error=CANNOT_FAIL) +@cpython_api([PyObject, PyObject], PyObject, error=CANNOT_FAIL, + result_borrowed=True) def PyDict_GetItem(space, w_dict, w_key): try: w_res = space.getitem(w_dict, w_key) except: return None - return borrow_from(w_dict, w_res) + # NOTE: this works so far because all our dict strategies store + # *values* as full objects, which stay alive as long as the dict is + # alive and not modified. So we can return a borrowed ref. + return w_res @cpython_api([PyObject, PyObject, PyObject], rffi.INT_real, error=-1) def PyDict_SetItem(space, w_dict, w_key, w_obj): @@ -47,7 +50,8 @@ else: PyErr_BadInternalCall(space) -@cpython_api([PyObject, CONST_STRING], PyObject, error=CANNOT_FAIL) +@cpython_api([PyObject, CONST_STRING], PyObject, error=CANNOT_FAIL, + result_borrowed=True) def PyDict_GetItemString(space, w_dict, key): """This is the same as PyDict_GetItem(), but key is specified as a char*, rather than a PyObject*.""" @@ -55,11 +59,12 @@ w_res = space.finditem_str(w_dict, rffi.charp2str(key)) except: w_res = None - if w_res is None: - return None - return borrow_from(w_dict, w_res) + # NOTE: this works so far because all our dict strategies store + # *values* as full objects, which stay alive as long as the dict is + # alive and not modified. So we can return a borrowed ref. + return w_res -@cpython_api([PyObject, rffi.CCHARP], rffi.INT_real, error=-1) +@cpython_api([PyObject, CONST_STRING], rffi.INT_real, error=-1) def PyDict_DelItemString(space, w_dict, key_ptr): """Remove the entry in dictionary p which has a key specified by the string key. Return 0 on success or -1 on failure.""" @@ -170,10 +175,13 @@ if w_dict is None: return 0 - # Note: this is not efficient. Storing an iterator would probably + # XXX XXX PyDict_Next is not efficient. Storing an iterator would probably # work, but we can't work out how to not leak it if iteration does - # not complete. + # not complete. Alternatively, we could add some RPython-only + # dict-iterator method to move forward by N steps. + w_dict.ensure_object_strategy() # make sure both keys and values can + # be borrwed try: w_iter = space.call_method(space.w_dict, "iteritems", w_dict) pos = ppos[0] @@ -183,11 +191,10 @@ w_item = space.call_method(w_iter, "next") w_key, w_value = space.fixedview(w_item, 2) - state = space.fromcache(RefcountState) if pkey: - pkey[0] = state.make_borrowed(w_dict, w_key) + pkey[0] = as_pyobj(space, w_key) if pvalue: - pvalue[0] = state.make_borrowed(w_dict, w_value) + pvalue[0] = as_pyobj(space, w_value) ppos[0] += 1 except OperationError, e: if not e.match(space, space.w_StopIteration): diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/Doc_stubgen_enable.patch pypy-5.0.1+dfsg/pypy/module/cpyext/Doc_stubgen_enable.patch --- pypy-4.0.1+dfsg/pypy/module/cpyext/Doc_stubgen_enable.patch 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/Doc_stubgen_enable.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -Index: Doc/tools/sphinx/ext/refcounting.py -=================================================================== ---- Doc/tools/sphinx/ext/refcounting.py (Revision 79453) -+++ Doc/tools/sphinx/ext/refcounting.py (Arbeitskopie) -@@ -91,6 +91,7 @@ - if app.config.refcount_file: - refcounts = Refcounts.fromfile( - path.join(app.srcdir, app.config.refcount_file)) -+ app._refcounts = refcounts - app.connect('doctree-read', refcounts.add_refcount_annotations) - - -Index: Doc/conf.py -=================================================================== ---- Doc/conf.py (Revision 79421) -+++ Doc/conf.py (Arbeitskopie) -@@ -13,8 +13,8 @@ - # General configuration - # --------------------- - --extensions = ['sphinx.ext.refcounting', 'sphinx.ext.coverage', -- 'sphinx.ext.doctest', 'pyspecific'] -+extensions = ['pypy.module.cpyext.stubgen', 'sphinx.ext.refcounting', 'sphinx.ext.coverage', -+ 'sphinx.ext.doctest', 'pyspecific', ] - templates_path = ['tools/sphinxext'] - - # General substitutions. diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/eval.py pypy-5.0.1+dfsg/pypy/module/cpyext/eval.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/eval.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/eval.py 2016-03-19 16:40:12.000000000 +0000 @@ -4,7 +4,7 @@ from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, fread, feof, Py_ssize_tP, cpython_struct, is_valid_fp) -from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.pyobject import PyObject from pypy.module.cpyext.pyerrors import PyErr_SetFromErrno from pypy.module.cpyext.funcobject import PyCodeObject from pypy.module.__builtin__ import compiling @@ -23,38 +23,38 @@ def PyEval_CallObjectWithKeywords(space, w_obj, w_arg, w_kwds): return space.call(w_obj, w_arg, w_kwds) -@cpython_api([], PyObject) +@cpython_api([], PyObject, result_borrowed=True) def PyEval_GetBuiltins(space): """Return a dictionary of the builtins in the current execution frame, or the interpreter of the thread state if no frame is currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtins = space.getitem(w_globals, space.wrap('__builtins__')) if not space.isinstance_w(w_builtins, space.w_dict): w_builtins = w_builtins.getdict(space) else: w_builtins = space.builtin.getdict(space) - return borrow_from(None, w_builtins) + return w_builtins # borrowed ref in all cases -@cpython_api([], PyObject, error=CANNOT_FAIL) +@cpython_api([], PyObject, error=CANNOT_FAIL, result_borrowed=True) def PyEval_GetLocals(space): """Return a dictionary of the local variables in the current execution frame, or NULL if no frame is currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: return None - return borrow_from(None, caller.getdictscope()) + return caller.getdictscope() # borrowed ref -@cpython_api([], PyObject, error=CANNOT_FAIL) +@cpython_api([], PyObject, error=CANNOT_FAIL, result_borrowed=True) def PyEval_GetGlobals(space): """Return a dictionary of the global variables in the current execution frame, or NULL if no frame is currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: return None - return borrow_from(None, caller.w_globals) + return caller.get_w_globals() # borrowed ref @cpython_api([PyCodeObject, PyObject, PyObject], PyObject) def PyEval_EvalCode(space, w_code, w_globals, w_locals): @@ -128,7 +128,7 @@ filename = "" return run_string(space, source, filename, start, w_globals, w_locals) -@cpython_api([rffi.CCHARP, rffi.INT_real, PyObject, PyObject, +@cpython_api([CONST_STRING, rffi.INT_real, PyObject, PyObject, PyCompilerFlagsPtr], PyObject) def PyRun_StringFlags(space, source, start, w_globals, w_locals, flagsptr): """Execute Python source code from str in the context specified by the @@ -189,7 +189,7 @@ pi[0] = space.getindex_w(w_obj, None) return 1 -@cpython_api([rffi.CCHARP, rffi.CCHARP, rffi.INT_real, PyCompilerFlagsPtr], +@cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real, PyCompilerFlagsPtr], PyObject) def Py_CompileStringFlags(space, source, filename, start, flagsptr): """Parse and compile the Python source code in str, returning the diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/frameobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/frameobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/frameobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/frameobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -17,6 +17,7 @@ PyFrameObjectFields = (PyObjectFields + (("f_code", PyCodeObject), ("f_globals", PyObject), + ("f_locals", PyObject), ("f_lineno", rffi.INT), )) cpython_struct("PyFrameObject", PyFrameObjectFields, PyFrameObjectStruct) @@ -34,15 +35,17 @@ frame = space.interp_w(PyFrame, w_obj) py_frame = rffi.cast(PyFrameObject, py_obj) py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) - py_frame.c_f_globals = make_ref(space, frame.w_globals) + py_frame.c_f_globals = make_ref(space, frame.get_w_globals()) + py_frame.c_f_locals = make_ref(space, frame.get_w_locals()) rffi.setintfield(py_frame, 'c_f_lineno', frame.getorcreatedebug().f_lineno) -@cpython_api([PyObject], lltype.Void, external=False) +@cpython_api([PyObject], lltype.Void, header=None) def frame_dealloc(space, py_obj): py_frame = rffi.cast(PyFrameObject, py_obj) py_code = rffi.cast(PyObject, py_frame.c_f_code) Py_DecRef(space, py_code) Py_DecRef(space, py_frame.c_f_globals) + Py_DecRef(space, py_frame.c_f_locals) from pypy.module.cpyext.object import PyObject_dealloc PyObject_dealloc(space, py_obj) @@ -72,6 +75,7 @@ space.interp_w(PyCode, w_code) # sanity check py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, w_code)) py_frame.c_f_globals = make_ref(space, w_globals) + py_frame.c_f_locals = make_ref(space, w_locals) return py_frame @cpython_api([PyFrameObject], rffi.INT_real, error=-1) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/funcobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/funcobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/funcobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/funcobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -3,7 +3,7 @@ PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL, Py_ssize_t, cpython_api, bootstrap_function, cpython_struct, build_type_checkers) from pypy.module.cpyext.pyobject import ( - PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) + PyObject, make_ref, from_ref, Py_DecRef, make_typedescr) from rpython.rlib.unroll import unrolling_iterable from pypy.interpreter.error import OperationError from pypy.interpreter.function import Function, Method @@ -30,6 +30,7 @@ PyCodeObject = lltype.Ptr(PyCodeObjectStruct) PyCodeObjectFields = PyObjectFields + \ (("co_name", PyObject), + ("co_filename", PyObject), ("co_flags", rffi.INT), ("co_argcount", rffi.INT), ) @@ -55,7 +56,7 @@ assert isinstance(w_obj, Function) py_func.c_func_name = make_ref(space, space.wrap(w_obj.name)) -@cpython_api([PyObject], lltype.Void, external=False) +@cpython_api([PyObject], lltype.Void, header=None) def function_dealloc(space, py_obj): py_func = rffi.cast(PyFunctionObject, py_obj) Py_DecRef(space, py_func.c_func_name) @@ -66,6 +67,7 @@ py_code = rffi.cast(PyCodeObject, py_obj) assert isinstance(w_obj, PyCode) py_code.c_co_name = make_ref(space, space.wrap(w_obj.co_name)) + py_code.c_co_filename = make_ref(space, space.wrap(w_obj.co_filename)) co_flags = 0 for name, value in ALL_CODE_FLAGS: if w_obj.co_flags & getattr(pycode, name): @@ -73,19 +75,20 @@ rffi.setintfield(py_code, 'c_co_flags', co_flags) rffi.setintfield(py_code, 'c_co_argcount', w_obj.co_argcount) -@cpython_api([PyObject], lltype.Void, external=False) +@cpython_api([PyObject], lltype.Void, header=None) def code_dealloc(space, py_obj): py_code = rffi.cast(PyCodeObject, py_obj) Py_DecRef(space, py_code.c_co_name) + Py_DecRef(space, py_code.c_co_filename) from pypy.module.cpyext.object import PyObject_dealloc PyObject_dealloc(space, py_obj) -@cpython_api([PyObject], PyObject) +@cpython_api([PyObject], PyObject, result_borrowed=True) def PyFunction_GetCode(space, w_func): """Return the code object associated with the function object op.""" func = space.interp_w(Function, w_func) w_code = space.wrap(func.code) - return borrow_from(w_func, w_code) + return w_code # borrowed ref @cpython_api([PyObject, PyObject, PyObject], PyObject) def PyMethod_New(space, w_func, w_self, w_cls): @@ -96,25 +99,25 @@ class which provides the unbound method.""" return Method(space, w_func, w_self, w_cls) -@cpython_api([PyObject], PyObject) +@cpython_api([PyObject], PyObject, result_borrowed=True) def PyMethod_Function(space, w_method): """Return the function object associated with the method meth.""" assert isinstance(w_method, Method) - return borrow_from(w_method, w_method.w_function) + return w_method.w_function # borrowed ref -@cpython_api([PyObject], PyObject) +@cpython_api([PyObject], PyObject, result_borrowed=True) def PyMethod_Self(space, w_method): """Return the instance associated with the method meth if it is bound, otherwise return NULL.""" assert isinstance(w_method, Method) - return borrow_from(w_method, w_method.w_instance) + return w_method.w_instance # borrowed ref -@cpython_api([PyObject], PyObject) +@cpython_api([PyObject], PyObject, result_borrowed=True) def PyMethod_Class(space, w_method): """Return the class object from which the method meth was created; if this was created from an instance, it will be the class of the instance.""" assert isinstance(w_method, Method) - return borrow_from(w_method, w_method.w_class) + return w_method.w_class # borrowed ref def unwrap_list_of_strings(space, w_list): return [space.str_w(w_item) for w_item in space.fixedview(w_list)] diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/import_.py pypy-5.0.1+dfsg/pypy/module/cpyext/import_.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/import_.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/import_.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,7 +1,6 @@ from pypy.interpreter import module from pypy.module.cpyext.api import ( generic_cpy_call, cpython_api, PyObject, CONST_STRING) -from pypy.module.cpyext.pyobject import borrow_from from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import OperationError from pypy.interpreter.module import Module @@ -20,7 +19,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() # Get the builtins from current globals if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) else: # No globals -- use standard builtins, and fake globals @@ -56,7 +55,7 @@ from pypy.module.imp.importing import reload return reload(space, w_mod) -@cpython_api([CONST_STRING], PyObject) +@cpython_api([CONST_STRING], PyObject, result_borrowed=True) def PyImport_AddModule(space, name): """Return the module object corresponding to a module name. The name argument may be of the form package.module. First check the modules @@ -74,14 +73,16 @@ w_mod = check_sys_modules_w(space, modulename) if not w_mod or space.is_w(w_mod, space.w_None): w_mod = Module(space, space.wrap(modulename)) - return borrow_from(None, w_mod) + space.setitem(space.sys.get('modules'), space.wrap(modulename), w_mod) + # return a borrowed ref --- assumes one copy in sys.modules + return w_mod -@cpython_api([], PyObject) +@cpython_api([], PyObject, result_borrowed=True) def PyImport_GetModuleDict(space): """Return the dictionary used for the module administration (a.k.a. sys.modules). Note that this is a per-interpreter variable.""" w_modulesDict = space.sys.get('modules') - return borrow_from(None, w_modulesDict) + return w_modulesDict # borrowed ref @cpython_api([rffi.CCHARP, PyObject], PyObject) def PyImport_ExecCodeModule(space, name, w_code): diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/include/code.h pypy-5.0.1+dfsg/pypy/module/cpyext/include/code.h --- pypy-4.0.1+dfsg/pypy/module/cpyext/include/code.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/include/code.h 2016-03-19 16:40:12.000000000 +0000 @@ -7,6 +7,7 @@ typedef struct { PyObject_HEAD PyObject *co_name; + PyObject *co_filename; int co_argcount; int co_flags; } PyCodeObject; diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/include/complexobject.h pypy-5.0.1+dfsg/pypy/module/cpyext/include/complexobject.h --- pypy-4.0.1+dfsg/pypy/module/cpyext/include/complexobject.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/include/complexobject.h 2016-03-19 16:40:12.000000000 +0000 @@ -15,7 +15,7 @@ } Py_complex; /* generated function */ -PyAPI_FUNC(void) _PyComplex_AsCComplex(PyObject *, Py_complex *); +PyAPI_FUNC(int) _PyComplex_AsCComplex(PyObject *, Py_complex *); PyAPI_FUNC(PyObject *) _PyComplex_FromCComplex(Py_complex *); Py_LOCAL_INLINE(Py_complex) PyComplex_AsCComplex(PyObject *obj) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/include/floatobject.h pypy-5.0.1+dfsg/pypy/module/cpyext/include/floatobject.h --- pypy-4.0.1+dfsg/pypy/module/cpyext/include/floatobject.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/include/floatobject.h 2016-03-19 16:40:12.000000000 +0000 @@ -7,6 +7,18 @@ extern "C" { #endif +#define PyFloat_STR_PRECISION 12 + +#ifdef Py_NAN +#define Py_RETURN_NAN return PyFloat_FromDouble(Py_NAN) +#endif + +#define Py_RETURN_INF(sign) do \ + if (copysign(1., sign) == 1.) { \ + return PyFloat_FromDouble(Py_HUGE_VAL); \ + } else { \ + return PyFloat_FromDouble(-Py_HUGE_VAL); \ + } while(0) #ifdef __cplusplus } diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/include/frameobject.h pypy-5.0.1+dfsg/pypy/module/cpyext/include/frameobject.h --- pypy-4.0.1+dfsg/pypy/module/cpyext/include/frameobject.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/include/frameobject.h 2016-03-19 16:40:12.000000000 +0000 @@ -8,6 +8,7 @@ PyObject_HEAD PyCodeObject *f_code; PyObject *f_globals; + PyObject *f_locals; int f_lineno; } PyFrameObject; diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/include/object.h pypy-5.0.1+dfsg/pypy/module/cpyext/include/object.h --- pypy-4.0.1+dfsg/pypy/module/cpyext/include/object.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/include/object.h 2016-03-19 16:40:12.000000000 +0000 @@ -17,7 +17,8 @@ #define staticforward static #define PyObject_HEAD \ - long ob_refcnt; \ + Py_ssize_t ob_refcnt; \ + Py_ssize_t ob_pypy_link; \ struct _typeobject *ob_type; #define PyObject_VAR_HEAD \ @@ -25,7 +26,7 @@ Py_ssize_t ob_size; /* Number of items in variable part */ #define PyObject_HEAD_INIT(type) \ - 1, type, + 1, 0, type, #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, @@ -40,19 +41,19 @@ #ifdef PYPY_DEBUG_REFCOUNT /* Slow version, but useful for debugging */ -#define Py_INCREF(ob) (Py_IncRef((PyObject *)ob)) -#define Py_DECREF(ob) (Py_DecRef((PyObject *)ob)) -#define Py_XINCREF(ob) (Py_IncRef((PyObject *)ob)) -#define Py_XDECREF(ob) (Py_DecRef((PyObject *)ob)) +#define Py_INCREF(ob) (Py_IncRef((PyObject *)(ob))) +#define Py_DECREF(ob) (Py_DecRef((PyObject *)(ob))) +#define Py_XINCREF(ob) (Py_IncRef((PyObject *)(ob))) +#define Py_XDECREF(ob) (Py_DecRef((PyObject *)(ob))) #else /* Fast version */ -#define Py_INCREF(ob) (((PyObject *)ob)->ob_refcnt++) -#define Py_DECREF(ob) \ +#define Py_INCREF(ob) (((PyObject *)(ob))->ob_refcnt++) +#define Py_DECREF(op) \ do { \ - if (((PyObject *)ob)->ob_refcnt > 1) \ - ((PyObject *)ob)->ob_refcnt--; \ + if (--((PyObject *)(op))->ob_refcnt != 0) \ + ; \ else \ - Py_DecRef((PyObject *)ob); \ + _Py_Dealloc((PyObject *)(op)); \ } while (0) #define Py_XINCREF(op) do { if ((op) == NULL) ; else Py_INCREF(op); } while (0) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/include/patchlevel.h pypy-5.0.1+dfsg/pypy/module/cpyext/include/patchlevel.h --- pypy-4.0.1+dfsg/pypy/module/cpyext/include/patchlevel.h 2015-11-19 19:21:43.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/include/patchlevel.h 2016-03-19 16:40:15.000000000 +0000 @@ -29,7 +29,14 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "4.0.1" +#define PYPY_VERSION "5.0.1" +#define PYPY_VERSION_NUM 0x05000100 + +/* Defined to mean a PyPy where cpyext holds more regular references + to PyObjects, e.g. staying alive as long as the internal PyPy object + stays alive. */ +#define PYPY_CPYEXT_GC 1 +#define PyPy_Borrow(a, b) ((void) 0) /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/include/pymath.h pypy-5.0.1+dfsg/pypy/module/cpyext/include/pymath.h --- pypy-4.0.1+dfsg/pypy/module/cpyext/include/pymath.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/include/pymath.h 2016-03-19 16:40:12.000000000 +0000 @@ -17,4 +17,35 @@ #define Py_HUGE_VAL HUGE_VAL #endif +/* Py_NAN + * A value that evaluates to a NaN. On IEEE 754 platforms INF*0 or + * INF/INF works. Define Py_NO_NAN in pyconfig.h if your platform + * doesn't support NaNs. + */ +#if !defined(Py_NAN) && !defined(Py_NO_NAN) +#if !defined(__INTEL_COMPILER) + #define Py_NAN (Py_HUGE_VAL * 0.) +#else /* __INTEL_COMPILER */ + #if defined(ICC_NAN_STRICT) + #pragma float_control(push) + #pragma float_control(precise, on) + #pragma float_control(except, on) + #if defined(_MSC_VER) + __declspec(noinline) + #else /* Linux */ + __attribute__((noinline)) + #endif /* _MSC_VER */ + static double __icc_nan() + { + return sqrt(-1.0); + } + #pragma float_control (pop) + #define Py_NAN __icc_nan() + #else /* ICC_NAN_RELAXED as default for Intel Compiler */ + static union { unsigned char buf[8]; double __icc_nan; } __nan_store = {0,0,0,0,0,0,0xf8,0x7f}; + #define Py_NAN (__nan_store.__icc_nan) + #endif /* ICC_NAN_STRICT */ +#endif /* __INTEL_COMPILER */ +#endif + #endif /* Py_PYMATH_H */ diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/include/Python.h pypy-5.0.1+dfsg/pypy/module/cpyext/include/Python.h --- pypy-4.0.1+dfsg/pypy/module/cpyext/include/Python.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/include/Python.h 2016-03-19 16:40:12.000000000 +0000 @@ -84,6 +84,7 @@ #include "pyconfig.h" #include "object.h" +#include "pymath.h" #include "pyport.h" #include "warnings.h" @@ -99,6 +100,7 @@ #include "complexobject.h" #include "methodobject.h" #include "funcobject.h" +#include "code.h" #include "modsupport.h" #include "pythonrun.h" @@ -114,7 +116,6 @@ #include "compile.h" #include "frameobject.h" #include "eval.h" -#include "pymath.h" #include "pymem.h" #include "pycobject.h" #include "pycapsule.h" @@ -131,9 +132,6 @@ /* Missing definitions */ #include "missing.h" -// XXX This shouldn't be included here -#include "structmember.h" - #include /* Define macros for inline documentation. */ diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/include/stringobject.h pypy-5.0.1+dfsg/pypy/module/cpyext/include/stringobject.h --- pypy-4.0.1+dfsg/pypy/module/cpyext/include/stringobject.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/include/stringobject.h 2016-03-19 16:40:12.000000000 +0000 @@ -7,8 +7,8 @@ extern "C" { #endif -#define PyString_GET_SIZE(op) PyString_Size(op) -#define PyString_AS_STRING(op) PyString_AsString(op) +#define PyString_GET_SIZE(op) PyString_Size((PyObject*)(op)) +#define PyString_AS_STRING(op) PyString_AsString((PyObject*)(op)) typedef struct { PyObject_HEAD diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/include/structmember.h pypy-5.0.1+dfsg/pypy/module/cpyext/include/structmember.h --- pypy-4.0.1+dfsg/pypy/module/cpyext/include/structmember.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/include/structmember.h 2016-03-19 16:40:12.000000000 +0000 @@ -4,54 +4,85 @@ extern "C" { #endif + +/* Interface to map C struct members to Python object attributes */ + #include /* For offsetof */ + +/* The offsetof() macro calculates the offset of a structure member + in its structure. Unfortunately this cannot be written down + portably, hence it is provided by a Standard C header file. + For pre-Standard C compilers, here is a version that usually works + (but watch out!): */ + #ifndef offsetof #define offsetof(type, member) ( (int) & ((type*)0) -> member ) #endif +/* An array of memberlist structures defines the name, type and offset + of selected members of a C structure. These can be read by + PyMember_Get() and set by PyMember_Set() (except if their READONLY flag + is set). The array must be terminated with an entry whose name + pointer is NULL. */ + + typedef struct PyMemberDef { - /* Current version, use this */ - char *name; - int type; - Py_ssize_t offset; - int flags; - char *doc; + /* Current version, use this */ + char *name; + int type; + Py_ssize_t offset; + int flags; + char *doc; } PyMemberDef; +/* Types */ +#define T_SHORT 0 +#define T_INT 1 +#define T_LONG 2 +#define T_FLOAT 3 +#define T_DOUBLE 4 +#define T_STRING 5 +#define T_OBJECT 6 +/* XXX the ordering here is weird for binary compatibility */ +#define T_CHAR 7 /* 1-character string */ +#define T_BYTE 8 /* 8-bit signed int */ +/* unsigned variants: */ +#define T_UBYTE 9 +#define T_USHORT 10 +#define T_UINT 11 +#define T_ULONG 12 + +/* Added by Jack: strings contained in the structure */ +#define T_STRING_INPLACE 13 + +/* Added by Lillo: bools contained in the structure (assumed char) */ +#define T_BOOL 14 + +#define T_OBJECT_EX 16 /* Like T_OBJECT, but raises AttributeError + when the value is NULL, instead of + converting to None. */ +#ifdef HAVE_LONG_LONG +#define T_LONGLONG 17 +#define T_ULONGLONG 18 +#endif /* HAVE_LONG_LONG */ -/* Types. These constants are also in structmemberdefs.py. */ -#define T_SHORT 0 -#define T_INT 1 -#define T_LONG 2 -#define T_FLOAT 3 -#define T_DOUBLE 4 -#define T_STRING 5 -#define T_OBJECT 6 -#define T_CHAR 7 /* 1-character string */ -#define T_BYTE 8 /* 8-bit signed int */ -#define T_UBYTE 9 -#define T_USHORT 10 -#define T_UINT 11 -#define T_ULONG 12 -#define T_STRING_INPLACE 13 /* Strings contained in the structure */ -#define T_BOOL 14 -#define T_OBJECT_EX 16 /* Like T_OBJECT, but raises AttributeError - when the value is NULL, instead of - converting to None. */ -#define T_LONGLONG 17 -#define T_ULONGLONG 18 -#define T_PYSSIZET 19 +#define T_PYSSIZET 19 /* Py_ssize_t */ /* Flags. These constants are also in structmemberdefs.py. */ -#define READONLY 1 -#define RO READONLY /* Shorthand */ +#define READONLY 1 +#define RO READONLY /* Shorthand */ #define READ_RESTRICTED 2 #define PY_WRITE_RESTRICTED 4 -#define RESTRICTED (READ_RESTRICTED | PY_WRITE_RESTRICTED) +#define RESTRICTED (READ_RESTRICTED | PY_WRITE_RESTRICTED) + + +/* API functions. */ +#include "pypy_structmember_decl.h" #ifdef __cplusplus } #endif #endif /* !Py_STRUCTMEMBER_H */ + diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/include/tupleobject.h pypy-5.0.1+dfsg/pypy/module/cpyext/include/tupleobject.h --- pypy-4.0.1+dfsg/pypy/module/cpyext/include/tupleobject.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/include/tupleobject.h 2016-03-19 16:40:12.000000000 +0000 @@ -7,11 +7,21 @@ extern "C" { #endif +typedef struct { + PyObject_HEAD + Py_ssize_t ob_size; + PyObject **ob_item; /* XXX optimize to ob_item[] */ +} PyTupleObject; + /* defined in varargswrapper.c */ PyAPI_FUNC(PyObject *) PyTuple_Pack(Py_ssize_t, ...); -#define PyTuple_SET_ITEM PyTuple_SetItem -#define PyTuple_GET_ITEM PyTuple_GetItem +/* Macro, trading safety for speed */ +#define PyTuple_GET_ITEM(op, i) (((PyTupleObject *)(op))->ob_item[i]) +#define PyTuple_GET_SIZE(op) Py_SIZE(op) + +/* Macro, *only* to be used to fill in brand new tuples */ +#define PyTuple_SET_ITEM(op, i, v) (((PyTupleObject *)(op))->ob_item[i] = v) #ifdef __cplusplus diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/__init__.py pypy-5.0.1+dfsg/pypy/module/cpyext/__init__.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/__init__.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/__init__.py 2016-03-19 16:40:12.000000000 +0000 @@ -34,9 +34,8 @@ import pypy.module.cpyext.pyerrors import pypy.module.cpyext.typeobject import pypy.module.cpyext.object -import pypy.module.cpyext.stringobject +import pypy.module.cpyext.bytesobject import pypy.module.cpyext.tupleobject -import pypy.module.cpyext.ndarrayobject import pypy.module.cpyext.setobject import pypy.module.cpyext.dictobject import pypy.module.cpyext.intobject @@ -61,7 +60,6 @@ import pypy.module.cpyext.funcobject import pypy.module.cpyext.frameobject import pypy.module.cpyext.classobject -import pypy.module.cpyext.pypyintf import pypy.module.cpyext.memoryobject import pypy.module.cpyext.codecs import pypy.module.cpyext.pyfile diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/intobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/intobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/intobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/intobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -5,7 +5,7 @@ cpython_api, cpython_struct, build_type_checkers, bootstrap_function, PyObject, PyObjectFields, CONST_STRING, CANNOT_FAIL, Py_ssize_t) from pypy.module.cpyext.pyobject import ( - make_typedescr, track_reference, RefcountState, from_ref) + make_typedescr, track_reference, from_ref) from rpython.rlib.rarithmetic import r_uint, intmask, LONG_TEST, r_ulonglong from pypy.objspace.std.intobject import W_IntObject import sys @@ -19,7 +19,7 @@ @bootstrap_function def init_intobject(space): "Type description of PyIntObject" - make_typedescr(space.w_int.instancetypedef, + make_typedescr(space.w_int.layout.typedef, basestruct=PyIntObject.TO, attach=int_attach, realize=int_realize) @@ -38,8 +38,6 @@ w_obj = space.allocate_instance(W_IntObject, w_type) w_obj.__init__(intval) track_reference(space, obj, w_obj) - state = space.fromcache(RefcountState) - state.set_lifeline(w_obj, obj) return w_obj PyInt_Check, PyInt_CheckExact = build_type_checkers("Int") @@ -53,7 +51,7 @@ @cpython_api([lltype.Signed], PyObject) def PyInt_FromLong(space, ival): """Create a new integer object with a value of ival. - + """ return space.wrap(ival) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/listobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/listobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/listobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/listobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -3,7 +3,7 @@ from pypy.module.cpyext.api import (cpython_api, CANNOT_FAIL, Py_ssize_t, build_type_checkers) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall -from pypy.module.cpyext.pyobject import Py_DecRef, PyObject, borrow_from +from pypy.module.cpyext.pyobject import Py_DecRef, PyObject from pypy.objspace.std.listobject import W_ListObject from pypy.interpreter.error import OperationError @@ -38,7 +38,7 @@ w_list.setitem(index, w_item) return 0 -@cpython_api([PyObject, Py_ssize_t], PyObject) +@cpython_api([PyObject, Py_ssize_t], PyObject, result_borrowed=True) def PyList_GetItem(space, w_list, index): """Return the object at position pos in the list pointed to by p. The position must be positive, indexing from the end of the list is not @@ -49,8 +49,10 @@ if index < 0 or index >= w_list.length(): raise OperationError(space.w_IndexError, space.wrap( "list index out of range")) - w_item = w_list.getitem(index) - return borrow_from(w_list, w_item) + w_list.ensure_object_strategy() # make sure we can return a borrowed obj + # XXX ^^^ how does this interact with CPyListStrategy? + w_res = w_list.getitem(index) + return w_res # borrowed ref @cpython_api([PyObject, PyObject], rffi.INT_real, error=-1) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/longobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/longobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/longobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/longobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -228,26 +228,11 @@ def _PyLong_FromByteArray(space, bytes, n, little_endian, signed): little_endian = rffi.cast(lltype.Signed, little_endian) signed = rffi.cast(lltype.Signed, signed) - - result = rbigint() - negative = False - - for i in range(0, n): - if little_endian: - c = intmask(bytes[i]) - else: - c = intmask(bytes[n - i - 1]) - if i == 0 and signed and c & 0x80: - negative = True - if negative: - c = c ^ 0xFF - digit = rbigint.fromint(c) - - result = result.lshift(8) - result = result.add(digit) - - if negative: - result = result.neg() - + s = rffi.charpsize2str(rffi.cast(rffi.CCHARP, bytes), + rffi.cast(lltype.Signed, n)) + if little_endian: + byteorder = 'little' + else: + byteorder = 'big' + result = rbigint.frombytes(s, byteorder, signed != 0) return space.newlong_from_rbigint(result) - diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/methodobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/methodobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/methodobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/methodobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -50,7 +50,7 @@ py_func.c_m_self = make_ref(space, w_obj.w_self) py_func.c_m_module = make_ref(space, w_obj.w_module) -@cpython_api([PyObject], lltype.Void, external=False) +@cpython_api([PyObject], lltype.Void, header=None) def cfunction_dealloc(space, py_obj): py_func = rffi.cast(PyCFunctionObject, py_obj) Py_DecRef(space, py_func.c_m_self) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/modsupport.py pypy-5.0.1+dfsg/pypy/module/cpyext/modsupport.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/modsupport.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/modsupport.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import cpython_api, cpython_struct, \ METH_STATIC, METH_CLASS, METH_COEXIST, CANNOT_FAIL, CONST_STRING -from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.pyobject import PyObject from pypy.interpreter.module import Module from pypy.module.cpyext.methodobject import ( W_PyCFunctionObject, PyCFunction_NewEx, PyDescr_NewMethod, @@ -34,7 +34,7 @@ # This is actually the Py_InitModule4 function, # renamed to refuse modules built against CPython headers. @cpython_api([CONST_STRING, lltype.Ptr(PyMethodDef), CONST_STRING, - PyObject, rffi.INT_real], PyObject) + PyObject, rffi.INT_real], PyObject, result_borrowed=True) def _Py_InitPyPyModule(space, name, methods, doc, w_self, apiver): """ Create a new module object based on a name and table of functions, returning @@ -69,7 +69,7 @@ if doc: space.setattr(w_mod, space.wrap("__doc__"), space.wrap(rffi.charp2str(doc))) - return borrow_from(None, w_mod) + return w_mod # borrowed result kept alive in PyImport_AddModule() def convert_method_defs(space, dict_w, methods, w_type, w_self=None, name=None): @@ -114,12 +114,12 @@ return int(space.is_w(w_type, w_obj_type) or space.is_true(space.issubtype(w_obj_type, w_type))) -@cpython_api([PyObject], PyObject) +@cpython_api([PyObject], PyObject, result_borrowed=True) def PyModule_GetDict(space, w_mod): if PyModule_Check(space, w_mod): assert isinstance(w_mod, Module) w_dict = w_mod.getdict(space) - return borrow_from(w_mod, w_dict) + return w_dict # borrowed reference, likely from w_mod.w_dict else: PyErr_BadInternalCall(space) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/object.py pypy-5.0.1+dfsg/pypy/module/cpyext/object.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/object.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/object.py 2016-03-19 16:40:12.000000000 +0000 @@ -6,7 +6,7 @@ Py_GE, CONST_STRING, FILEP, fwrite) from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, create_ref, from_ref, Py_IncRef, Py_DecRef, - track_reference, get_typedescr, _Py_NewReference, RefcountState) + get_typedescr, _Py_NewReference) from pypy.module.cpyext.typeobject import PyTypeObjectPtr from pypy.module.cpyext.pyerrors import PyErr_NoMemory, PyErr_BadInternalCall from pypy.objspace.std.typeobject import W_TypeObject @@ -17,7 +17,8 @@ @cpython_api([Py_ssize_t], rffi.VOIDP) def PyObject_MALLOC(space, size): return lltype.malloc(rffi.VOIDP.TO, size, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) @cpython_api([rffi.VOIDP], lltype.Void) def PyObject_FREE(space, ptr): @@ -31,9 +32,9 @@ def _PyObject_NewVar(space, type, itemcount): w_type = from_ref(space, rffi.cast(PyObject, type)) assert isinstance(w_type, W_TypeObject) - typedescr = get_typedescr(w_type.instancetypedef) + typedescr = get_typedescr(w_type.layout.typedef) py_obj = typedescr.allocate(space, w_type, itemcount=itemcount) - py_obj.c_ob_refcnt = 0 + #py_obj.c_ob_refcnt = 0 --- will be set to 1 again by PyObject_Init{Var} if type.c_tp_itemsize == 0: w_obj = PyObject_Init(space, py_obj, type) else: diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/patches/Doc_stubgen_enable.patch pypy-5.0.1+dfsg/pypy/module/cpyext/patches/Doc_stubgen_enable.patch --- pypy-4.0.1+dfsg/pypy/module/cpyext/patches/Doc_stubgen_enable.patch 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/patches/Doc_stubgen_enable.patch 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,27 @@ +Index: Doc/tools/sphinx/ext/refcounting.py +=================================================================== +--- Doc/tools/sphinx/ext/refcounting.py (Revision 79453) ++++ Doc/tools/sphinx/ext/refcounting.py (Arbeitskopie) +@@ -91,6 +91,7 @@ + if app.config.refcount_file: + refcounts = Refcounts.fromfile( + path.join(app.srcdir, app.config.refcount_file)) ++ app._refcounts = refcounts + app.connect('doctree-read', refcounts.add_refcount_annotations) + + +Index: Doc/conf.py +=================================================================== +--- Doc/conf.py (Revision 79421) ++++ Doc/conf.py (Arbeitskopie) +@@ -13,8 +13,8 @@ + # General configuration + # --------------------- + +-extensions = ['sphinx.ext.refcounting', 'sphinx.ext.coverage', +- 'sphinx.ext.doctest', 'pyspecific'] ++extensions = ['pypy.module.cpyext.stubgen', 'sphinx.ext.refcounting', 'sphinx.ext.coverage', ++ 'sphinx.ext.doctest', 'pyspecific', ] + templates_path = ['tools/sphinxext'] + + # General substitutions. diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/pyerrors.py pypy-5.0.1+dfsg/pypy/module/cpyext/pyerrors.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/pyerrors.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/pyerrors.py 2016-03-19 16:40:12.000000000 +0000 @@ -6,7 +6,7 @@ from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL, CONST_STRING from pypy.module.exceptions.interp_exceptions import W_RuntimeWarning from pypy.module.cpyext.pyobject import ( - PyObject, PyObjectP, make_ref, from_ref, Py_DecRef, borrow_from) + PyObject, PyObjectP, make_ref, from_ref, Py_DecRef) from pypy.module.cpyext.state import State from pypy.module.cpyext.import_ import PyImport_Import from rpython.rlib import rposix, jit @@ -28,12 +28,12 @@ """This is a shorthand for PyErr_SetObject(type, Py_None).""" PyErr_SetObject(space, w_type, space.w_None) -@cpython_api([], PyObject) +@cpython_api([], PyObject, result_borrowed=True) def PyErr_Occurred(space): state = space.fromcache(State) if state.operror is None: return None - return borrow_from(None, state.operror.w_type) + return state.operror.w_type # borrowed ref @cpython_api([], lltype.Void) def PyErr_Clear(space): diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/pyfile.py pypy-5.0.1+dfsg/pypy/module/cpyext/pyfile.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/pyfile.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/pyfile.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, build_type_checkers) -from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.pyobject import PyObject from pypy.module.cpyext.object import Py_PRINT_RAW from pypy.interpreter.error import OperationError from pypy.module._file.interp_file import W_File @@ -83,7 +83,8 @@ @cpython_api([PyObject], PyObject) def PyFile_Name(space, w_p): """Return the name of the file specified by p as a string object.""" - return borrow_from(w_p, space.getattr(w_p, space.wrap("name"))) + w_name = space.getattr(w_p, space.wrap("name")) + return w_name # borrowed ref, should be a W_StringObject from the file @cpython_api([PyObject, rffi.INT_real], rffi.INT_real, error=CANNOT_FAIL) def PyFile_SoftSpace(space, w_p, newflag): diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/pyobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/pyobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/pyobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/pyobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -2,15 +2,19 @@ from pypy.interpreter.baseobjspace import W_Root, SpaceCache from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.extregistry import ExtRegistryEntry from pypy.module.cpyext.api import ( cpython_api, bootstrap_function, PyObject, PyObjectP, ADDR, - CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr) + CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr, is_PyObject, + INTERPLEVEL_API) from pypy.module.cpyext.state import State from pypy.objspace.std.typeobject import W_TypeObject from pypy.objspace.std.objectobject import W_ObjectObject from rpython.rlib.objectmodel import specialize, we_are_translated -from rpython.rlib.rweakref import RWeakKeyDictionary +from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.annlowlevel import llhelper +from rpython.rlib import rawrefcount + #________________________________________________________ # type description @@ -28,13 +32,15 @@ def allocate(self, space, w_type, itemcount=0): # similar to PyType_GenericAlloc? # except that it's not related to any pypy object. + # this returns a PyObject with ob_refcnt == 1. - pytype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_type)) + pytype = as_pyobj(space, w_type) + pytype = rffi.cast(PyTypeObjectPtr, pytype) + assert pytype # Don't increase refcount for non-heaptypes - if pytype: - flags = rffi.cast(lltype.Signed, pytype.c_tp_flags) - if not flags & Py_TPFLAGS_HEAPTYPE: - Py_DecRef(space, w_type) + flags = rffi.cast(lltype.Signed, pytype.c_tp_flags) + if flags & Py_TPFLAGS_HEAPTYPE: + Py_IncRef(space, w_type) if pytype: size = pytype.c_tp_basicsize @@ -42,8 +48,10 @@ size = rffi.sizeof(self.basestruct) if itemcount: size += itemcount * pytype.c_tp_itemsize + assert size >= rffi.sizeof(PyObject.TO) buf = lltype.malloc(rffi.VOIDP.TO, size, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) pyobj = rffi.cast(PyObject, buf) pyobj.c_ob_refcnt = 1 pyobj.c_ob_type = pytype @@ -56,9 +64,6 @@ w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) w_obj = space.allocate_instance(self.W_BaseObject, w_type) track_reference(space, obj, w_obj) - if w_type is not space.gettypefor(self.W_BaseObject): - state = space.fromcache(RefcountState) - state.set_lifeline(w_obj, obj) return w_obj typedescr_cache = {} @@ -70,7 +75,7 @@ alloc : allocate and basic initialization of a raw PyObject attach : Function called to tie a raw structure to a pypy object realize : Function called to create a pypy object from a raw struct - dealloc : a cpython_api(external=False), similar to PyObject_dealloc + dealloc : a cpython_api(header=None), similar to PyObject_dealloc """ tp_basestruct = kw.pop('basestruct', PyObject.TO) @@ -111,7 +116,7 @@ def init_pyobject(space): from pypy.module.cpyext.object import PyObject_dealloc # typedescr for the 'object' type - make_typedescr(space.w_object.instancetypedef, + make_typedescr(space.w_object.layout.typedef, dealloc=PyObject_dealloc) # almost all types, which should better inherit from object. make_typedescr(None) @@ -134,104 +139,6 @@ #________________________________________________________ # refcounted object support -class RefcountState: - def __init__(self, space): - self.space = space - self.py_objects_w2r = {} # { w_obj -> raw PyObject } - self.py_objects_r2w = {} # { addr of raw PyObject -> w_obj } - - self.lifeline_dict = RWeakKeyDictionary(W_Root, PyOLifeline) - - self.borrow_mapping = {None: {}} - # { w_container -> { w_containee -> None } } - # the None entry manages references borrowed during a call to - # generic_cpy_call() - - # For tests - self.non_heaptypes_w = [] - - def _cleanup_(self): - assert self.borrow_mapping == {None: {}} - self.py_objects_r2w.clear() # is not valid anymore after translation - - def init_r2w_from_w2r(self): - """Rebuilds the dict py_objects_r2w on startup""" - for w_obj, obj in self.py_objects_w2r.items(): - ptr = rffi.cast(ADDR, obj) - self.py_objects_r2w[ptr] = w_obj - - def print_refcounts(self): - print "REFCOUNTS" - for w_obj, obj in self.py_objects_w2r.items(): - print "%r: %i" % (w_obj, obj.c_ob_refcnt) - - def get_from_lifeline(self, w_obj): - lifeline = self.lifeline_dict.get(w_obj) - if lifeline is not None: # make old PyObject ready for use in C code - py_obj = lifeline.pyo - assert py_obj.c_ob_refcnt == 0 - return py_obj - else: - return lltype.nullptr(PyObject.TO) - - def set_lifeline(self, w_obj, py_obj): - self.lifeline_dict.set(w_obj, - PyOLifeline(self.space, py_obj)) - - def make_borrowed(self, w_container, w_borrowed): - """ - Create a borrowed reference, which will live as long as the container - has a living reference (as a PyObject!) - """ - ref = make_ref(self.space, w_borrowed) - obj_ptr = rffi.cast(ADDR, ref) - - borrowees = self.borrow_mapping.setdefault(w_container, {}) - if w_borrowed in borrowees: - Py_DecRef(self.space, w_borrowed) # cancel incref from make_ref() - else: - borrowees[w_borrowed] = None - - return ref - - def reset_borrowed_references(self): - "Used in tests" - for w_container, w_borrowed in self.borrow_mapping.items(): - Py_DecRef(self.space, w_borrowed) - self.borrow_mapping = {None: {}} - - def delete_borrower(self, w_obj): - """ - Called when a potential container for borrowed references has lost its - last reference. Removes the borrowed references it contains. - """ - if w_obj in self.borrow_mapping: # move to lifeline __del__ - for w_containee in self.borrow_mapping[w_obj]: - self.forget_borrowee(w_containee) - del self.borrow_mapping[w_obj] - - def swap_borrow_container(self, container): - """switch the current default contained with the given one.""" - if container is None: - old_container = self.borrow_mapping[None] - self.borrow_mapping[None] = {} - return old_container - else: - old_container = self.borrow_mapping[None] - self.borrow_mapping[None] = container - for w_containee in old_container: - self.forget_borrowee(w_containee) - - def forget_borrowee(self, w_obj): - "De-register an object from the list of borrowed references" - ref = self.py_objects_w2r.get(w_obj, lltype.nullptr(PyObject.TO)) - if not ref: - if DEBUG_REFCOUNT: - print >>sys.stderr, "Borrowed object is already gone!" - return - - Py_DecRef(self.space, ref) - class InvalidPointerException(Exception): pass @@ -249,72 +156,50 @@ def create_ref(space, w_obj, itemcount=0): """ Allocates a PyObject, and fills its fields with info from the given - intepreter object. + interpreter object. """ - state = space.fromcache(RefcountState) w_type = space.type(w_obj) - if w_type.is_cpytype(): - py_obj = state.get_from_lifeline(w_obj) - if py_obj: - Py_IncRef(space, py_obj) - return py_obj - typedescr = get_typedescr(w_obj.typedef) py_obj = typedescr.allocate(space, w_type, itemcount=itemcount) - if w_type.is_cpytype(): - state.set_lifeline(w_obj, py_obj) + track_reference(space, py_obj, w_obj) + # + # py_obj.c_ob_refcnt should be exactly REFCNT_FROM_PYPY + 1 here, + # and we want only REFCNT_FROM_PYPY, i.e. only count as attached + # to the W_Root but not with any reference from the py_obj side. + assert py_obj.c_ob_refcnt > rawrefcount.REFCNT_FROM_PYPY + py_obj.c_ob_refcnt -= 1 + # typedescr.attach(space, py_obj, w_obj) return py_obj -def track_reference(space, py_obj, w_obj, replace=False): +def track_reference(space, py_obj, w_obj): """ Ties together a PyObject and an interpreter object. + The PyObject's refcnt is increased by REFCNT_FROM_PYPY. + The reference in 'py_obj' is not stolen! Remember to Py_DecRef() + it is you need to. """ # XXX looks like a PyObject_GC_TRACK - ptr = rffi.cast(ADDR, py_obj) - state = space.fromcache(RefcountState) + assert py_obj.c_ob_refcnt < rawrefcount.REFCNT_FROM_PYPY + py_obj.c_ob_refcnt += rawrefcount.REFCNT_FROM_PYPY if DEBUG_REFCOUNT: debug_refcount("MAKREF", py_obj, w_obj) - if not replace: - assert w_obj not in state.py_objects_w2r - assert ptr not in state.py_objects_r2w - state.py_objects_w2r[w_obj] = py_obj - if ptr: # init_typeobject() bootstraps with NULL references - state.py_objects_r2w[ptr] = w_obj - -def make_ref(space, w_obj): - """ - Returns a new reference to an intepreter object. - """ - if w_obj is None: - return lltype.nullptr(PyObject.TO) - assert isinstance(w_obj, W_Root) - state = space.fromcache(RefcountState) - try: - py_obj = state.py_objects_w2r[w_obj] - except KeyError: - py_obj = create_ref(space, w_obj) - track_reference(space, py_obj, w_obj) - else: - Py_IncRef(space, py_obj) - return py_obj + assert w_obj + assert py_obj + rawrefcount.create_link_pypy(w_obj, py_obj) def from_ref(space, ref): """ Finds the interpreter object corresponding to the given reference. If the - object is not yet realized (see stringobject.py), creates it. + object is not yet realized (see bytesobject.py), creates it. """ - assert lltype.typeOf(ref) == PyObject + assert is_pyobj(ref) if not ref: return None - state = space.fromcache(RefcountState) - ptr = rffi.cast(ADDR, ref) - - try: - return state.py_objects_r2w[ptr] - except KeyError: - pass + w_obj = rawrefcount.to_obj(W_Root, ref) + if w_obj is not None: + return w_obj # This reference is not yet a real interpreter object. # Realize it. @@ -323,126 +208,135 @@ raise InvalidPointerException(str(ref)) w_type = from_ref(space, ref_type) assert isinstance(w_type, W_TypeObject) - return get_typedescr(w_type.instancetypedef).realize(space, ref) + return get_typedescr(w_type.layout.typedef).realize(space, ref) -# XXX Optimize these functions and put them into macro definitions -@cpython_api([PyObject], lltype.Void) -def Py_DecRef(space, obj): - if not obj: - return - assert lltype.typeOf(obj) == PyObject +def debug_collect(): + rawrefcount._collect() - obj.c_ob_refcnt -= 1 - if DEBUG_REFCOUNT: - debug_refcount("DECREF", obj, obj.c_ob_refcnt, frame_stackdepth=3) - if obj.c_ob_refcnt == 0: - state = space.fromcache(RefcountState) - ptr = rffi.cast(ADDR, obj) - if ptr not in state.py_objects_r2w: - # this is a half-allocated object, lets call the deallocator - # without modifying the r2w/w2r dicts - _Py_Dealloc(space, obj) - else: - w_obj = state.py_objects_r2w[ptr] - del state.py_objects_r2w[ptr] - w_type = space.type(w_obj) - if not w_type.is_cpytype(): + +def as_pyobj(space, w_obj): + """ + Returns a 'PyObject *' representing the given intepreter object. + This doesn't give a new reference, but the returned 'PyObject *' + is valid at least as long as 'w_obj' is. **To be safe, you should + use keepalive_until_here(w_obj) some time later.** In case of + doubt, use the safer make_ref(). + """ + if w_obj is not None: + assert not is_pyobj(w_obj) + py_obj = rawrefcount.from_obj(PyObject, w_obj) + if not py_obj: + py_obj = create_ref(space, w_obj) + return py_obj + else: + return lltype.nullptr(PyObject.TO) +as_pyobj._always_inline_ = 'try' +INTERPLEVEL_API['as_pyobj'] = as_pyobj + +def pyobj_has_w_obj(pyobj): + return rawrefcount.to_obj(W_Root, pyobj) is not None +INTERPLEVEL_API['pyobj_has_w_obj'] = staticmethod(pyobj_has_w_obj) + + +def is_pyobj(x): + if x is None or isinstance(x, W_Root): + return False + elif is_PyObject(lltype.typeOf(x)): + return True + else: + raise TypeError(repr(type(x))) +INTERPLEVEL_API['is_pyobj'] = staticmethod(is_pyobj) + +class Entry(ExtRegistryEntry): + _about_ = is_pyobj + def compute_result_annotation(self, s_x): + from rpython.rtyper.llannotation import SomePtr + return self.bookkeeper.immutablevalue(isinstance(s_x, SomePtr)) + def specialize_call(self, hop): + hop.exception_cannot_occur() + return hop.inputconst(lltype.Bool, hop.s_result.const) + +@specialize.ll() +def make_ref(space, obj): + """Increment the reference counter of the PyObject and return it. + Can be called with either a PyObject or a W_Root. + """ + if is_pyobj(obj): + pyobj = rffi.cast(PyObject, obj) + else: + pyobj = as_pyobj(space, obj) + if pyobj: + assert pyobj.c_ob_refcnt > 0 + pyobj.c_ob_refcnt += 1 + if not is_pyobj(obj): + keepalive_until_here(obj) + return pyobj +INTERPLEVEL_API['make_ref'] = make_ref + + +@specialize.ll() +def get_w_obj_and_decref(space, obj): + """Decrement the reference counter of the PyObject and return the + corresponding W_Root object (so the reference count is at least + REFCNT_FROM_PYPY and cannot be zero). Can be called with either + a PyObject or a W_Root. + """ + if is_pyobj(obj): + pyobj = rffi.cast(PyObject, obj) + w_obj = from_ref(space, pyobj) + else: + w_obj = obj + pyobj = as_pyobj(space, w_obj) + if pyobj: + pyobj.c_ob_refcnt -= 1 + assert pyobj.c_ob_refcnt >= rawrefcount.REFCNT_FROM_PYPY + keepalive_until_here(w_obj) + return w_obj +INTERPLEVEL_API['get_w_obj_and_decref'] = get_w_obj_and_decref + + +@specialize.ll() +def incref(space, obj): + make_ref(space, obj) +INTERPLEVEL_API['incref'] = incref + +@specialize.ll() +def decref(space, obj): + if is_pyobj(obj): + obj = rffi.cast(PyObject, obj) + if obj: + assert obj.c_ob_refcnt > 0 + obj.c_ob_refcnt -= 1 + if obj.c_ob_refcnt == 0: _Py_Dealloc(space, obj) - del state.py_objects_w2r[w_obj] - # if the object was a container for borrowed references - state.delete_borrower(w_obj) else: - if not we_are_translated() and obj.c_ob_refcnt < 0: - message = "Negative refcount for obj %s with type %s" % ( - obj, rffi.charp2str(obj.c_ob_type.c_tp_name)) - print >>sys.stderr, message - assert False, message + get_w_obj_and_decref(space, obj) +INTERPLEVEL_API['decref'] = decref + @cpython_api([PyObject], lltype.Void) def Py_IncRef(space, obj): - if not obj: - return - obj.c_ob_refcnt += 1 - assert obj.c_ob_refcnt > 0 - if DEBUG_REFCOUNT: - debug_refcount("INCREF", obj, obj.c_ob_refcnt, frame_stackdepth=3) + incref(space, obj) + +@cpython_api([PyObject], lltype.Void) +def Py_DecRef(space, obj): + decref(space, obj) @cpython_api([PyObject], lltype.Void) def _Py_NewReference(space, obj): obj.c_ob_refcnt = 1 w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) assert isinstance(w_type, W_TypeObject) - get_typedescr(w_type.instancetypedef).realize(space, obj) + get_typedescr(w_type.layout.typedef).realize(space, obj) +@cpython_api([PyObject], lltype.Void) def _Py_Dealloc(space, obj): - from pypy.module.cpyext.api import generic_cpy_call_dont_decref + from pypy.module.cpyext.api import generic_cpy_call pto = obj.c_ob_type #print >>sys.stderr, "Calling dealloc slot", pto.c_tp_dealloc, "of", obj, \ # "'s type which is", rffi.charp2str(pto.c_tp_name) - generic_cpy_call_dont_decref(space, pto.c_tp_dealloc, obj) - -#___________________________________________________________ -# Support for "lifelines" -# -# Object structure must stay alive even when not referenced -# by any C code. - -class PyOLifeline(object): - def __init__(self, space, pyo): - self.pyo = pyo - self.space = space - - def __del__(self): - if self.pyo: - assert self.pyo.c_ob_refcnt == 0 - _Py_Dealloc(self.space, self.pyo) - self.pyo = lltype.nullptr(PyObject.TO) - # XXX handle borrowed objects here - -#___________________________________________________________ -# Support for borrowed references - -def make_borrowed_ref(space, w_container, w_borrowed): - """ - Create a borrowed reference, which will live as long as the container - has a living reference (as a PyObject!) - """ - if w_borrowed is None: - return lltype.nullptr(PyObject.TO) - - state = space.fromcache(RefcountState) - return state.make_borrowed(w_container, w_borrowed) - -class Reference: - def __init__(self, pyobj): - assert not isinstance(pyobj, W_Root) - self.pyobj = pyobj - - def get_ref(self, space): - return self.pyobj - - def get_wrapped(self, space): - return from_ref(space, self.pyobj) - -class BorrowPair(Reference): - """ - Delays the creation of a borrowed reference. - """ - def __init__(self, w_container, w_borrowed): - self.w_container = w_container - self.w_borrowed = w_borrowed - - def get_ref(self, space): - return make_borrowed_ref(space, self.w_container, self.w_borrowed) - - def get_wrapped(self, space): - return self.w_borrowed - -def borrow_from(container, borrowed): - return BorrowPair(container, borrowed) - -#___________________________________________________________ + generic_cpy_call(space, pto.c_tp_dealloc, obj) @cpython_api([rffi.VOIDP], lltype.Signed, error=CANNOT_FAIL) def _Py_HashPointer(space, ptr): diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/pypyintf.py pypy-5.0.1+dfsg/pypy/module/cpyext/pypyintf.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/pypyintf.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/pypyintf.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -from pypy.module.cpyext.api import cpython_api -from pypy.module.cpyext.pyobject import PyObject, borrow_from - - -@cpython_api([PyObject, PyObject], PyObject) -def PyPy_Borrow(space, w_parentobj, w_obj): - """Returns a borrowed reference to 'obj', borrowing from the 'parentobj'. - """ - return borrow_from(w_parentobj, w_obj) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/pystrtod.py pypy-5.0.1+dfsg/pypy/module/cpyext/pystrtod.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/pystrtod.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/pystrtod.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,6 +1,6 @@ import errno from pypy.interpreter.error import OperationError -from pypy.module.cpyext.api import cpython_api +from pypy.module.cpyext.api import cpython_api, CONST_STRING from pypy.module.cpyext.pyobject import PyObject from rpython.rlib import rdtoa from rpython.rlib import rfloat @@ -22,7 +22,7 @@ rfloat.DIST_NAN: Py_DTST_NAN } -@cpython_api([rffi.CCHARP, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0) +@cpython_api([CONST_STRING, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0) @jit.dont_look_inside # direct use of _get_errno() def PyOS_string_to_double(space, s, endptr, w_overflow_exception): """Convert a string s to a double, raising a Python diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/pytraceback.py pypy-5.0.1+dfsg/pypy/module/cpyext/pytraceback.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/pytraceback.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/pytraceback.py 2016-03-19 16:40:12.000000000 +0000 @@ -3,7 +3,7 @@ PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL, Py_ssize_t, cpython_api, bootstrap_function, cpython_struct, build_type_checkers) from pypy.module.cpyext.pyobject import ( - PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) + PyObject, make_ref, from_ref, Py_DecRef, make_typedescr) from pypy.module.cpyext.frameobject import PyFrameObject from rpython.rlib.unroll import unrolling_iterable from pypy.interpreter.error import OperationError @@ -41,7 +41,7 @@ rffi.setintfield(py_traceback, 'c_tb_lasti', traceback.lasti) rffi.setintfield(py_traceback, 'c_tb_lineno',traceback.get_lineno()) -@cpython_api([PyObject], lltype.Void, external=False) +@cpython_api([PyObject], lltype.Void, header=None) def traceback_dealloc(space, py_obj): py_traceback = rffi.cast(PyTracebackObject, py_obj) Py_DecRef(space, rffi.cast(PyObject, py_traceback.c_tb_next)) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/sequence.py pypy-5.0.1+dfsg/pypy/module/cpyext/sequence.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/sequence.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/sequence.py 2016-03-19 16:40:12.000000000 +0000 @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, CONST_STRING, Py_ssize_t) -from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.pyobject import PyObject from rpython.rtyper.lltypesystem import rffi, lltype from pypy.objspace.std import listobject, tupleobject @@ -42,15 +42,19 @@ which case o is returned. Use PySequence_Fast_GET_ITEM() to access the members of the result. Returns NULL on failure. If the object is not a sequence, raises TypeError with m as the message text.""" - if (isinstance(w_obj, listobject.W_ListObject) or - isinstance(w_obj, tupleobject.W_TupleObject)): + if isinstance(w_obj, listobject.W_ListObject): + # make sure we can return a borrowed obj from PySequence_Fast_GET_ITEM + # XXX how does this interact with CPyListStrategy? + w_obj.ensure_object_strategy() + return w_obj + if isinstance(w_obj, tupleobject.W_TupleObject): return w_obj try: return tupleobject.W_TupleObject(space.fixedview(w_obj)) except OperationError: raise OperationError(space.w_TypeError, space.wrap(rffi.charp2str(m))) -@cpython_api([PyObject, Py_ssize_t], PyObject) +@cpython_api([PyObject, Py_ssize_t], PyObject, result_borrowed=True) def PySequence_Fast_GET_ITEM(space, w_obj, index): """Return the ith element of o, assuming that o was returned by PySequence_Fast(), o is not NULL, and that i is within bounds. @@ -60,7 +64,7 @@ else: assert isinstance(w_obj, tupleobject.W_TupleObject) w_res = w_obj.wrappeditems[index] - return borrow_from(w_obj, w_res) + return w_res # borrowed ref @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) def PySequence_Fast_GET_SIZE(space, w_obj): diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/setobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/setobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/setobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/setobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -3,7 +3,7 @@ from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL, build_type_checkers) from pypy.module.cpyext.pyobject import (PyObject, PyObjectP, Py_DecRef, - borrow_from, make_ref, from_ref) + make_ref, from_ref) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.objspace.std.setobject import W_SetObject, newset diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/sliceobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/sliceobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/sliceobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/sliceobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -36,7 +36,7 @@ py_slice.c_stop = make_ref(space, w_obj.w_stop) py_slice.c_step = make_ref(space, w_obj.w_step) -@cpython_api([PyObject], lltype.Void, external=False) +@cpython_api([PyObject], lltype.Void, header=None) def slice_dealloc(space, py_obj): """Frees allocated PyStringObject resources. """ diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/slotdefs.py pypy-5.0.1+dfsg/pypy/module/cpyext/slotdefs.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/slotdefs.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/slotdefs.py 2016-03-19 16:40:12.000000000 +0000 @@ -4,14 +4,14 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, readbufferproc) -from pypy.module.cpyext.pyobject import from_ref +from pypy.module.cpyext.pyobject import from_ref, make_ref, Py_DecRef from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, oefmt @@ -31,17 +31,16 @@ Py_GE = 5 -def check_num_args(space, ob, n): - from pypy.module.cpyext.tupleobject import PyTuple_CheckExact, \ - PyTuple_GET_SIZE - if not PyTuple_CheckExact(space, ob): +def check_num_args(space, w_ob, n): + from pypy.module.cpyext.tupleobject import PyTuple_CheckExact + if not PyTuple_CheckExact(space, w_ob): raise OperationError(space.w_SystemError, space.wrap("PyArg_UnpackTuple() argument list is not a tuple")) - if n == PyTuple_GET_SIZE(space, ob): + if n == space.len_w(w_ob): return raise oefmt(space.w_TypeError, "expected %d arguments, got %d", - n, PyTuple_GET_SIZE(space, ob)) + n, space.len_w(w_ob)) def wrap_init(space, w_self, w_args, func, w_kwargs): func_init = rffi.cast(initproc, func) @@ -65,22 +64,24 @@ func_binary = rffi.cast(binaryfunc, func) check_num_args(space, w_args, 1) args_w = space.fixedview(w_args) - - if not space.is_true(space.issubtype(space.type(args_w[0]), - space.type(w_self))): + ref = make_ref(space, w_self) + if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and + not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self)))): return space.w_NotImplemented - + Py_DecRef(space, ref) return generic_cpy_call(space, func_binary, w_self, args_w[0]) def wrap_binaryfunc_r(space, w_self, w_args, func): func_binary = rffi.cast(binaryfunc, func) check_num_args(space, w_args, 1) args_w = space.fixedview(w_args) - - if not space.is_true(space.issubtype(space.type(args_w[0]), - space.type(w_self))): + ref = make_ref(space, w_self) + if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and + not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self)))): return space.w_NotImplemented - + Py_DecRef(space, ref) return generic_cpy_call(space, func_binary, args_w[0], w_self) def wrap_inquirypred(space, w_self, w_args, func): @@ -307,7 +308,7 @@ return space.wrap(generic_cpy_call(space, func_target, w_self, w_other)) -@cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, external=False) +@cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, header=None) def slot_tp_new(space, type, w_args, w_kwds): from pypy.module.cpyext.tupleobject import PyTuple_Check pyo = rffi.cast(PyObject, type) @@ -318,30 +319,30 @@ w_args_new = space.newtuple(args_w) return space.call(w_func, w_args_new, w_kwds) -@cpython_api([PyObject, PyObject, PyObject], rffi.INT_real, error=-1, external=False) +@cpython_api([PyObject, PyObject, PyObject], rffi.INT_real, error=-1, header=None) def slot_tp_init(space, w_self, w_args, w_kwds): w_descr = space.lookup(w_self, '__init__') args = Arguments.frompacked(space, w_args, w_kwds) space.get_and_call_args(w_descr, w_self, args) return 0 -@cpython_api([PyObject, PyObject, PyObject], PyObject, external=False) +@cpython_api([PyObject, PyObject, PyObject], PyObject, header=None) def slot_tp_call(space, w_self, w_args, w_kwds): return space.call(w_self, w_args, w_kwds) -@cpython_api([PyObject], PyObject, external=False) +@cpython_api([PyObject], PyObject, header=None) def slot_tp_str(space, w_self): return space.str(w_self) -@cpython_api([PyObject], PyObject, external=False) +@cpython_api([PyObject], PyObject, header=None) def slot_nb_int(space, w_self): return space.int(w_self) -@cpython_api([PyObject], PyObject, external=False) +@cpython_api([PyObject], PyObject, header=None) def slot_tp_iter(space, w_self): return space.iter(w_self) -@cpython_api([PyObject], PyObject, external=False) +@cpython_api([PyObject], PyObject, header=None) def slot_tp_iternext(space, w_self): return space.next(w_self) @@ -369,7 +370,7 @@ return @cpython_api([PyObject, PyObject, PyObject], rffi.INT_real, - error=-1, external=True) # XXX should not be exported + error=-1) # XXX should be header=None @func_renamer("cpyext_tp_setattro_%s" % (typedef.name,)) def slot_tp_setattro(space, w_self, w_name, w_value): if w_value is not None: @@ -378,6 +379,16 @@ space.call_function(delattr_fn, w_self, w_name) return 0 api_func = slot_tp_setattro.api_func + elif name == 'tp_getattro': + getattr_fn = w_type.getdictvalue(space, '__getattribute__') + if getattr_fn is None: + return + + @cpython_api([PyObject, PyObject], PyObject) + @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) + def slot_tp_getattro(space, w_self, w_name): + return space.call_function(getattr_fn, w_self, w_name) + api_func = slot_tp_getattro.api_func else: return diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/src/getargs.c pypy-5.0.1+dfsg/pypy/module/cpyext/src/getargs.c --- pypy-4.0.1+dfsg/pypy/module/cpyext/src/getargs.c 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/src/getargs.c 2016-03-19 16:40:12.000000000 +0000 @@ -442,7 +442,7 @@ strncpy(msgbuf, "is not retrievable", bufsize); return msgbuf; } - PyPy_Borrow(arg, item); + //PyPy_Borrow(arg, item); msg = convertitem(item, &format, p_va, flags, levels+1, msgbuf, bufsize, freelist); /* PySequence_GetItem calls tp->sq_item, which INCREFs */ diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/state.py pypy-5.0.1+dfsg/pypy/module/cpyext/state.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/state.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/state.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,8 +1,11 @@ from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.error import OperationError +from pypy.interpreter.executioncontext import AsyncAction from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.annlowlevel import llhelper from rpython.rlib.rdynload import DLLHANDLE +from rpython.rlib import rawrefcount import sys class State: @@ -11,6 +14,8 @@ self.reset() self.programname = lltype.nullptr(rffi.CCHARP.TO) self.version = lltype.nullptr(rffi.CCHARP.TO) + pyobj_dealloc_action = PyObjDeallocAction(space) + self.dealloc_trigger = lambda: pyobj_dealloc_action.fire() def reset(self): from pypy.module.cpyext.modsupport import PyMethodDef @@ -74,13 +79,15 @@ "This function is called when the program really starts" from pypy.module.cpyext.typeobject import setup_new_method_def - from pypy.module.cpyext.pyobject import RefcountState from pypy.module.cpyext.api import INIT_FUNCTIONS + from pypy.module.cpyext.api import init_static_data_translated - setup_new_method_def(space) if we_are_translated(): - refcountstate = space.fromcache(RefcountState) - refcountstate.init_r2w_from_w2r() + rawrefcount.init(llhelper(rawrefcount.RAWREFCOUNT_DEALLOC_TRIGGER, + self.dealloc_trigger)) + init_static_data_translated(space) + + setup_new_method_def(space) for func in INIT_FUNCTIONS: func(space) @@ -133,3 +140,17 @@ w_dict = w_mod.getdict(space) w_copy = space.call_method(w_dict, 'copy') self.extensions[path] = w_copy + + +class PyObjDeallocAction(AsyncAction): + """An action that invokes _Py_Dealloc() on the dying PyObjects. + """ + + def perform(self, executioncontext, frame): + from pypy.module.cpyext.pyobject import PyObject, decref + + while True: + py_obj = rawrefcount.next_dead(PyObject) + if not py_obj: + break + decref(self.space, py_obj) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/stringobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/stringobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/stringobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/stringobject.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,319 +0,0 @@ -from pypy.interpreter.error import OperationError -from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import ( - cpython_api, cpython_struct, bootstrap_function, build_type_checkers, - PyObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) -from pypy.module.cpyext.pyerrors import PyErr_BadArgument -from pypy.module.cpyext.pyobject import ( - PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, - make_typedescr, get_typedescr) - -## -## Implementation of PyStringObject -## ================================ -## -## The problem -## ----------- -## -## PyString_AsString() must return a (non-movable) pointer to the underlying -## buffer, whereas pypy strings are movable. C code may temporarily store -## this address and use it, as long as it owns a reference to the PyObject. -## There is no "release" function to specify that the pointer is not needed -## any more. -## -## Also, the pointer may be used to fill the initial value of string. This is -## valid only when the string was just allocated, and is not used elsewhere. -## -## Solution -## -------- -## -## PyStringObject contains two additional members: the size and a pointer to a -## char buffer; it may be NULL. -## -## - A string allocated by pypy will be converted into a PyStringObject with a -## NULL buffer. The first time PyString_AsString() is called, memory is -## allocated (with flavor='raw') and content is copied. -## -## - A string allocated with PyString_FromStringAndSize(NULL, size) will -## allocate a PyStringObject structure, and a buffer with the specified -## size, but the reference won't be stored in the global map; there is no -## corresponding object in pypy. When from_ref() or Py_INCREF() is called, -## the pypy string is created, and added to the global map of tracked -## objects. The buffer is then supposed to be immutable. -## -## - _PyString_Resize() works only on not-yet-pypy'd strings, and returns a -## similar object. -## -## - PyString_Size() doesn't need to force the object. -## -## - There could be an (expensive!) check in from_ref() that the buffer still -## corresponds to the pypy gc-managed string. -## - -PyStringObjectStruct = lltype.ForwardReference() -PyStringObject = lltype.Ptr(PyStringObjectStruct) -PyStringObjectFields = PyObjectFields + \ - (("buffer", rffi.CCHARP), ("size", Py_ssize_t)) -cpython_struct("PyStringObject", PyStringObjectFields, PyStringObjectStruct) - -@bootstrap_function -def init_stringobject(space): - "Type description of PyStringObject" - make_typedescr(space.w_str.instancetypedef, - basestruct=PyStringObject.TO, - attach=string_attach, - dealloc=string_dealloc, - realize=string_realize) - -PyString_Check, PyString_CheckExact = build_type_checkers("String", "w_str") - -def new_empty_str(space, length): - """ - Allocatse a PyStringObject and its buffer, but without a corresponding - interpreter object. The buffer may be mutated, until string_realize() is - called. - """ - typedescr = get_typedescr(space.w_str.instancetypedef) - py_obj = typedescr.allocate(space, space.w_str) - py_str = rffi.cast(PyStringObject, py_obj) - - buflen = length + 1 - py_str.c_size = length - py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen, - flavor='raw', zero=True) - return py_str - -def string_attach(space, py_obj, w_obj): - """ - Fills a newly allocated PyStringObject with the given string object. The - buffer must not be modified. - """ - py_str = rffi.cast(PyStringObject, py_obj) - py_str.c_size = len(space.str_w(w_obj)) - py_str.c_buffer = lltype.nullptr(rffi.CCHARP.TO) - -def string_realize(space, py_obj): - """ - Creates the string in the interpreter. The PyStringObject buffer must not - be modified after this call. - """ - py_str = rffi.cast(PyStringObject, py_obj) - s = rffi.charpsize2str(py_str.c_buffer, py_str.c_size) - w_obj = space.wrap(s) - track_reference(space, py_obj, w_obj) - return w_obj - -@cpython_api([PyObject], lltype.Void, external=False) -def string_dealloc(space, py_obj): - """Frees allocated PyStringObject resources. - """ - py_str = rffi.cast(PyStringObject, py_obj) - if py_str.c_buffer: - lltype.free(py_str.c_buffer, flavor="raw") - from pypy.module.cpyext.object import PyObject_dealloc - PyObject_dealloc(space, py_obj) - -#_______________________________________________________________________ - -@cpython_api([CONST_STRING, Py_ssize_t], PyObject) -def PyString_FromStringAndSize(space, char_p, length): - if char_p: - s = rffi.charpsize2str(char_p, length) - return make_ref(space, space.wrap(s)) - else: - return rffi.cast(PyObject, new_empty_str(space, length)) - -@cpython_api([CONST_STRING], PyObject) -def PyString_FromString(space, char_p): - s = rffi.charp2str(char_p) - return space.wrap(s) - -@cpython_api([PyObject], rffi.CCHARP, error=0) -def PyString_AsString(space, ref): - if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: - pass # typecheck returned "ok" without forcing 'ref' at all - elif not PyString_Check(space, ref): # otherwise, use the alternate way - raise OperationError(space.w_TypeError, space.wrap( - "PyString_AsString only support strings")) - ref_str = rffi.cast(PyStringObject, ref) - if not ref_str.c_buffer: - # copy string buffer - w_str = from_ref(space, ref) - s = space.str_w(w_str) - ref_str.c_buffer = rffi.str2charp(s) - return ref_str.c_buffer - -@cpython_api([PyObject, rffi.CCHARPP, rffi.CArrayPtr(Py_ssize_t)], rffi.INT_real, error=-1) -def PyString_AsStringAndSize(space, ref, buffer, length): - if not PyString_Check(space, ref): - raise OperationError(space.w_TypeError, space.wrap( - "PyString_AsStringAndSize only support strings")) - ref_str = rffi.cast(PyStringObject, ref) - if not ref_str.c_buffer: - # copy string buffer - w_str = from_ref(space, ref) - s = space.str_w(w_str) - ref_str.c_buffer = rffi.str2charp(s) - buffer[0] = ref_str.c_buffer - if length: - length[0] = ref_str.c_size - else: - i = 0 - while ref_str.c_buffer[i] != '\0': - i += 1 - if i != ref_str.c_size: - raise OperationError(space.w_TypeError, space.wrap( - "expected string without null bytes")) - return 0 - -@cpython_api([PyObject], Py_ssize_t, error=-1) -def PyString_Size(space, ref): - if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: - ref = rffi.cast(PyStringObject, ref) - return ref.c_size - else: - w_obj = from_ref(space, ref) - return space.len_w(w_obj) - -@cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) -def _PyString_Resize(space, ref, newsize): - """A way to resize a string object even though it is "immutable". Only use this to - build up a brand new string object; don't use this if the string may already be - known in other parts of the code. It is an error to call this function if the - refcount on the input string object is not one. Pass the address of an existing - string object as an lvalue (it may be written into), and the new size desired. - On success, *string holds the resized string object and 0 is returned; - the address in *string may differ from its input value. If the reallocation - fails, the original string object at *string is deallocated, *string is - set to NULL, a memory exception is set, and -1 is returned. - """ - # XXX always create a new string so far - py_str = rffi.cast(PyStringObject, ref[0]) - if not py_str.c_buffer: - raise OperationError(space.w_SystemError, space.wrap( - "_PyString_Resize called on already created string")) - try: - py_newstr = new_empty_str(space, newsize) - except MemoryError: - Py_DecRef(space, ref[0]) - ref[0] = lltype.nullptr(PyObject.TO) - raise - to_cp = newsize - oldsize = py_str.c_size - if oldsize < newsize: - to_cp = oldsize - for i in range(to_cp): - py_newstr.c_buffer[i] = py_str.c_buffer[i] - Py_DecRef(space, ref[0]) - ref[0] = rffi.cast(PyObject, py_newstr) - return 0 - -@cpython_api([PyObject, PyObject], rffi.INT, error=CANNOT_FAIL) -def _PyString_Eq(space, w_str1, w_str2): - return space.eq_w(w_str1, w_str2) - -@cpython_api([PyObjectP, PyObject], lltype.Void) -def PyString_Concat(space, ref, w_newpart): - """Create a new string object in *string containing the contents of newpart - appended to string; the caller will own the new reference. The reference to - the old value of string will be stolen. If the new string cannot be created, - the old reference to string will still be discarded and the value of - *string will be set to NULL; the appropriate exception will be set.""" - - if not ref[0]: - return - - if w_newpart is None or not PyString_Check(space, ref[0]) or \ - not PyString_Check(space, w_newpart): - Py_DecRef(space, ref[0]) - ref[0] = lltype.nullptr(PyObject.TO) - return - w_str = from_ref(space, ref[0]) - w_newstr = space.add(w_str, w_newpart) - Py_DecRef(space, ref[0]) - ref[0] = make_ref(space, w_newstr) - -@cpython_api([PyObjectP, PyObject], lltype.Void) -def PyString_ConcatAndDel(space, ref, newpart): - """Create a new string object in *string containing the contents of newpart - appended to string. This version decrements the reference count of newpart.""" - PyString_Concat(space, ref, newpart) - Py_DecRef(space, newpart) - -@cpython_api([PyObject, PyObject], PyObject) -def PyString_Format(space, w_format, w_args): - """Return a new string object from format and args. Analogous to format % - args. The args argument must be a tuple.""" - return space.mod(w_format, w_args) - -@cpython_api([CONST_STRING], PyObject) -def PyString_InternFromString(space, string): - """A combination of PyString_FromString() and - PyString_InternInPlace(), returning either a new string object that has - been interned, or a new ("owned") reference to an earlier interned string - object with the same value.""" - s = rffi.charp2str(string) - return space.new_interned_str(s) - -@cpython_api([PyObjectP], lltype.Void) -def PyString_InternInPlace(space, string): - """Intern the argument *string in place. The argument must be the - address of a pointer variable pointing to a Python string object. - If there is an existing interned string that is the same as - *string, it sets *string to it (decrementing the reference count - of the old string object and incrementing the reference count of - the interned string object), otherwise it leaves *string alone and - interns it (incrementing its reference count). (Clarification: - even though there is a lot of talk about reference counts, think - of this function as reference-count-neutral; you own the object - after the call if and only if you owned it before the call.) - - This function is not available in 3.x and does not have a PyBytes - alias.""" - w_str = from_ref(space, string[0]) - w_str = space.new_interned_w_str(w_str) - Py_DecRef(space, string[0]) - string[0] = make_ref(space, w_str) - -@cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) -def PyString_AsEncodedObject(space, w_str, encoding, errors): - """Encode a string object using the codec registered for encoding and return - the result as Python object. encoding and errors have the same meaning as - the parameters of the same name in the string encode() method. The codec to - be used is looked up using the Python codec registry. Return NULL if an - exception was raised by the codec. - - This function is not available in 3.x and does not have a PyBytes alias.""" - if not PyString_Check(space, w_str): - PyErr_BadArgument(space) - - w_encoding = w_errors = None - if encoding: - w_encoding = space.wrap(rffi.charp2str(encoding)) - if errors: - w_errors = space.wrap(rffi.charp2str(errors)) - return space.call_method(w_str, 'encode', w_encoding, w_errors) - -@cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) -def PyString_AsDecodedObject(space, w_str, encoding, errors): - """Decode a string object by passing it to the codec registered - for encoding and return the result as Python object. encoding and - errors have the same meaning as the parameters of the same name in - the string encode() method. The codec to be used is looked up - using the Python codec registry. Return NULL if an exception was - raised by the codec. - - This function is not available in 3.x and does not have a PyBytes alias.""" - if not PyString_Check(space, w_str): - PyErr_BadArgument(space) - - w_encoding = w_errors = None - if encoding: - w_encoding = space.wrap(rffi.charp2str(encoding)) - if errors: - w_errors = space.wrap(rffi.charp2str(errors)) - return space.call_method(w_str, "decode", w_encoding, w_errors) - -@cpython_api([PyObject, PyObject], PyObject) -def _PyString_Join(space, w_sep, w_seq): - return space.call_method(w_sep, 'join', w_seq) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/structmember.py pypy-5.0.1+dfsg/pypy/module/cpyext/structmember.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/structmember.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/structmember.py 2016-03-19 16:40:12.000000000 +0000 @@ -6,7 +6,7 @@ from pypy.module.cpyext.intobject import PyInt_AsLong, PyInt_AsUnsignedLong from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, from_ref, make_ref -from pypy.module.cpyext.stringobject import ( +from pypy.module.cpyext.bytesobject import ( PyString_FromString, PyString_FromStringAndSize) from pypy.module.cpyext.floatobject import PyFloat_AsDouble from pypy.module.cpyext.longobject import ( @@ -31,8 +31,10 @@ (T_PYSSIZET, rffi.SSIZE_T, PyLong_AsSsize_t), ]) +_HEADER = 'pypy_structmember_decl.h' -@cpython_api([PyObject, lltype.Ptr(PyMemberDef)], PyObject) + +@cpython_api([PyObject, lltype.Ptr(PyMemberDef)], PyObject, header=_HEADER) def PyMember_GetOne(space, obj, w_member): addr = rffi.cast(ADDR, obj) addr += w_member.c_offset @@ -83,7 +85,8 @@ return w_result -@cpython_api([PyObject, lltype.Ptr(PyMemberDef), PyObject], rffi.INT_real, error=-1) +@cpython_api([PyObject, lltype.Ptr(PyMemberDef), PyObject], rffi.INT_real, + error=-1, header=_HEADER) def PyMember_SetOne(space, obj, w_member, w_value): addr = rffi.cast(ADDR, obj) addr += w_member.c_offset diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/sysmodule.py pypy-5.0.1+dfsg/pypy/module/cpyext/sysmodule.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/sysmodule.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/sysmodule.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,16 +1,16 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import CANNOT_FAIL, cpython_api, CONST_STRING -from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.pyobject import PyObject -@cpython_api([CONST_STRING], PyObject, error=CANNOT_FAIL) +@cpython_api([CONST_STRING], PyObject, error=CANNOT_FAIL, result_borrowed=True) def PySys_GetObject(space, name): """Return the object name from the sys module or NULL if it does not exist, without setting an exception.""" name = rffi.charp2str(name) w_dict = space.sys.getdict(space) w_obj = space.finditem_str(w_dict, name) - return borrow_from(None, w_obj) + return w_obj # borrowed ref: kept alive in space.sys.w_dict @cpython_api([CONST_STRING, PyObject], rffi.INT_real, error=-1) def PySys_SetObject(space, name, w_obj): diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/foo.c pypy-5.0.1+dfsg/pypy/module/cpyext/test/foo.c --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/foo.c 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/foo.c 2016-03-19 16:40:12.000000000 +0000 @@ -623,11 +623,17 @@ }; +static PyObject *size_of_instances(PyObject *self, PyObject *t) +{ + return PyInt_FromLong(((PyTypeObject *)t)->tp_basicsize); +} + /* List of functions exported by this module */ static PyMethodDef foo_functions[] = { {"new", (PyCFunction)foo_new, METH_NOARGS, NULL}, {"newCustom", (PyCFunction)newCustom, METH_NOARGS, NULL}, + {"size_of_instances", (PyCFunction)size_of_instances, METH_O, NULL}, {NULL, NULL} /* Sentinel */ }; diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_api.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_api.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_api.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_api.py 2016-03-19 16:40:12.000000000 +0000 @@ -6,6 +6,7 @@ from pypy.module.cpyext.test.test_cpyext import freeze_refcnts, LeakCheckingTest PyObject = api.PyObject from pypy.interpreter.error import OperationError +from rpython.rlib import rawrefcount import os @api.cpython_api([PyObject], lltype.Void) @@ -36,6 +37,9 @@ cls.api = CAPI() CAPI.__dict__.update(api.INTERPLEVEL_API) + print 'DONT_FREE_ANY_MORE' + rawrefcount._dont_free_any_more() + def raises(self, space, api, expected_exc, f, *args): if not callable(f): raise Exception("%s is not callable" % (f,)) @@ -60,7 +64,7 @@ raise try: - del self.space.getexecutioncontext().cpyext_threadstate + self.space.getexecutioncontext().cleanup_cpyext_threadstate() except AttributeError: pass @@ -98,7 +102,7 @@ def test_copy_header_files(tmpdir): - api.copy_header_files(tmpdir) + api.copy_header_files(tmpdir, True) def check(name): f = tmpdir.join(name) assert f.check(file=True) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_borrow.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_borrow.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_borrow.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_borrow.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,20 +1,9 @@ import py from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.test.test_api import BaseApiTest -from pypy.module.cpyext.pyobject import make_ref, borrow_from, RefcountState +from pypy.module.cpyext.pyobject import make_ref -class TestBorrowing(BaseApiTest): - def test_borrowing(self, space, api): - w_int = space.wrap(1) - w_tuple = space.newtuple([w_int]) - api.Py_IncRef(w_tuple) - one_pyo = borrow_from(w_tuple, w_int).get_ref(space) - api.Py_DecRef(w_tuple) - state = space.fromcache(RefcountState) - state.print_refcounts() - py.test.raises(AssertionError, api.Py_DecRef, one_pyo) - class AppTestBorrow(AppTestCpythonExtensionBase): def test_tuple_borrowing(self): module = self.import_extension('foo', [ @@ -76,4 +65,5 @@ ]) wr = module.run() # check that the set() object was deallocated + self.debug_collect() assert wr() is None diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_bytesobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_bytesobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_bytesobject.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_bytesobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,329 @@ +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +from pypy.module.cpyext.bytesobject import new_empty_str, PyStringObject +from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP +from pypy.module.cpyext.pyobject import Py_DecRef, from_ref, make_ref + +import py +import sys + +class AppTestStringObject(AppTestCpythonExtensionBase): + def test_stringobject(self): + module = self.import_extension('foo', [ + ("get_hello1", "METH_NOARGS", + """ + return PyString_FromStringAndSize( + "Hello world", 11); + """), + ("get_hello2", "METH_NOARGS", + """ + return PyString_FromString("Hello world"); + """), + ("test_Size", "METH_NOARGS", + """ + PyObject* s = PyString_FromString("Hello world"); + int result = 0; + + if(PyString_Size(s) == 11) { + result = 1; + } + if(s->ob_type->tp_basicsize != sizeof(void*)*5) + result = 0; + Py_DECREF(s); + return PyBool_FromLong(result); + """), + ("test_Size_exception", "METH_NOARGS", + """ + PyObject* f = PyFloat_FromDouble(1.0); + Py_ssize_t size = PyString_Size(f); + + Py_DECREF(f); + return NULL; + """), + ("test_is_string", "METH_VARARGS", + """ + return PyBool_FromLong(PyString_Check(PyTuple_GetItem(args, 0))); + """)]) + assert module.get_hello1() == 'Hello world' + assert module.get_hello2() == 'Hello world' + assert module.test_Size() + raises(TypeError, module.test_Size_exception) + + assert module.test_is_string("") + assert not module.test_is_string(()) + + def test_string_buffer_init(self): + module = self.import_extension('foo', [ + ("getstring", "METH_NOARGS", + """ + PyObject *s, *t; + char* c; + Py_ssize_t len; + + s = PyString_FromStringAndSize(NULL, 4); + if (s == NULL) + return NULL; + t = PyString_FromStringAndSize(NULL, 3); + if (t == NULL) + return NULL; + Py_DECREF(t); + c = PyString_AsString(s); + c[0] = 'a'; + c[1] = 'b'; + c[3] = 'c'; + return s; + """), + ]) + s = module.getstring() + assert len(s) == 4 + assert s == 'ab\x00c' + + + + def test_AsString(self): + module = self.import_extension('foo', [ + ("getstring", "METH_NOARGS", + """ + PyObject* s1 = PyString_FromStringAndSize("test", 4); + char* c = PyString_AsString(s1); + PyObject* s2 = PyString_FromStringAndSize(c, 4); + Py_DECREF(s1); + return s2; + """), + ]) + s = module.getstring() + assert s == 'test' + + def test_py_string_as_string(self): + module = self.import_extension('foo', [ + ("string_as_string", "METH_VARARGS", + ''' + return PyString_FromStringAndSize(PyString_AsString( + PyTuple_GetItem(args, 0)), 4); + ''' + )]) + assert module.string_as_string("huheduwe") == "huhe" + + def test_py_string_as_string_None(self): + module = self.import_extension('foo', [ + ("string_None", "METH_VARARGS", + ''' + return PyString_AsString(Py_None); + ''' + )]) + raises(TypeError, module.string_None) + + def test_AsStringAndSize(self): + module = self.import_extension('foo', [ + ("getstring", "METH_NOARGS", + """ + PyObject* s1 = PyString_FromStringAndSize("te\\0st", 5); + char *buf; + Py_ssize_t len; + if (PyString_AsStringAndSize(s1, &buf, &len) < 0) + return NULL; + if (len != 5) { + PyErr_SetString(PyExc_AssertionError, "Bad Length"); + return NULL; + } + if (PyString_AsStringAndSize(s1, &buf, NULL) >= 0) { + PyErr_SetString(PyExc_AssertionError, "Should Have failed"); + return NULL; + } + PyErr_Clear(); + Py_DECREF(s1); + Py_INCREF(Py_None); + return Py_None; + """), + ]) + module.getstring() + + def test_format_v(self): + module = self.import_extension('foo', [ + ("test_string_format_v", "METH_VARARGS", + ''' + return helper("bla %d ble %s\\n", + PyInt_AsLong(PyTuple_GetItem(args, 0)), + PyString_AsString(PyTuple_GetItem(args, 1))); + ''' + ) + ], prologue=''' + PyObject* helper(char* fmt, ...) + { + va_list va; + PyObject* res; + va_start(va, fmt); + res = PyString_FromFormatV(fmt, va); + va_end(va); + return res; + } + ''') + res = module.test_string_format_v(1, "xyz") + assert res == "bla 1 ble xyz\n" + + def test_format(self): + module = self.import_extension('foo', [ + ("test_string_format", "METH_VARARGS", + ''' + return PyString_FromFormat("bla %d ble %s\\n", + PyInt_AsLong(PyTuple_GetItem(args, 0)), + PyString_AsString(PyTuple_GetItem(args, 1))); + ''' + ) + ]) + res = module.test_string_format(1, "xyz") + assert res == "bla 1 ble xyz\n" + + def test_intern_inplace(self): + module = self.import_extension('foo', [ + ("test_intern_inplace", "METH_O", + ''' + PyObject *s = args; + Py_INCREF(s); + PyString_InternInPlace(&s); + return s; + ''' + ) + ]) + # This does not test much, but at least the refcounts are checked. + assert module.test_intern_inplace('s') == 's' + +class TestString(BaseApiTest): + def test_string_resize(self, space, api): + py_str = new_empty_str(space, 10) + ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + py_str.c_buffer[0] = 'a' + py_str.c_buffer[1] = 'b' + py_str.c_buffer[2] = 'c' + ar[0] = rffi.cast(PyObject, py_str) + api._PyString_Resize(ar, 3) + py_str = rffi.cast(PyStringObject, ar[0]) + assert py_str.c_size == 3 + assert py_str.c_buffer[1] == 'b' + assert py_str.c_buffer[3] == '\x00' + # the same for growing + ar[0] = rffi.cast(PyObject, py_str) + api._PyString_Resize(ar, 10) + py_str = rffi.cast(PyStringObject, ar[0]) + assert py_str.c_size == 10 + assert py_str.c_buffer[1] == 'b' + assert py_str.c_buffer[10] == '\x00' + Py_DecRef(space, ar[0]) + lltype.free(ar, flavor='raw') + + def test_string_buffer(self, space, api): + py_str = new_empty_str(space, 10) + c_buf = py_str.c_ob_type.c_tp_as_buffer + assert c_buf + py_obj = rffi.cast(PyObject, py_str) + assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(Py_ssize_tP.TO)) == 1 + ref = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') + assert c_buf.c_bf_getsegcount(py_obj, ref) == 1 + assert ref[0] == 10 + lltype.free(ref, flavor='raw') + ref = lltype.malloc(rffi.VOIDPP.TO, 1, flavor='raw') + assert c_buf.c_bf_getreadbuffer(py_obj, 0, ref) == 10 + lltype.free(ref, flavor='raw') + Py_DecRef(space, py_obj) + + def test_Concat(self, space, api): + ref = make_ref(space, space.wrap('abc')) + ptr = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + ptr[0] = ref + prev_refcnt = ref.c_ob_refcnt + api.PyString_Concat(ptr, space.wrap('def')) + assert ref.c_ob_refcnt == prev_refcnt - 1 + assert space.str_w(from_ref(space, ptr[0])) == 'abcdef' + api.PyString_Concat(ptr, space.w_None) + assert not ptr[0] + ptr[0] = lltype.nullptr(PyObject.TO) + api.PyString_Concat(ptr, space.wrap('def')) # should not crash + lltype.free(ptr, flavor='raw') + + def test_ConcatAndDel(self, space, api): + ref1 = make_ref(space, space.wrap('abc')) + ref2 = make_ref(space, space.wrap('def')) + ptr = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + ptr[0] = ref1 + prev_refcnf = ref2.c_ob_refcnt + api.PyString_ConcatAndDel(ptr, ref2) + assert space.str_w(from_ref(space, ptr[0])) == 'abcdef' + assert ref2.c_ob_refcnt == prev_refcnf - 1 + Py_DecRef(space, ptr[0]) + ptr[0] = lltype.nullptr(PyObject.TO) + ref2 = make_ref(space, space.wrap('foo')) + prev_refcnf = ref2.c_ob_refcnt + api.PyString_ConcatAndDel(ptr, ref2) # should not crash + assert ref2.c_ob_refcnt == prev_refcnf - 1 + lltype.free(ptr, flavor='raw') + + def test_format(self, space, api): + assert "1 2" == space.unwrap( + api.PyString_Format(space.wrap('%s %d'), space.wrap((1, 2)))) + + def test_asbuffer(self, space, api): + bufp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') + lenp = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') + + w_text = space.wrap("text") + assert api.PyObject_AsCharBuffer(w_text, bufp, lenp) == 0 + assert lenp[0] == 4 + assert rffi.charp2str(bufp[0]) == 'text' + + lltype.free(bufp, flavor='raw') + lltype.free(lenp, flavor='raw') + + def test_intern(self, space, api): + buf = rffi.str2charp("test") + w_s1 = api.PyString_InternFromString(buf) + w_s2 = api.PyString_InternFromString(buf) + rffi.free_charp(buf) + assert w_s1 is w_s2 + + def test_AsEncodedObject(self, space, api): + ptr = space.wrap('abc') + + errors = rffi.str2charp("strict") + + encoding = rffi.str2charp("hex") + res = api.PyString_AsEncodedObject( + ptr, encoding, errors) + assert space.unwrap(res) == "616263" + + res = api.PyString_AsEncodedObject( + ptr, encoding, lltype.nullptr(rffi.CCHARP.TO)) + assert space.unwrap(res) == "616263" + rffi.free_charp(encoding) + + encoding = rffi.str2charp("unknown_encoding") + self.raises(space, api, LookupError, api.PyString_AsEncodedObject, + ptr, encoding, errors) + rffi.free_charp(encoding) + + rffi.free_charp(errors) + + res = api.PyString_AsEncodedObject( + ptr, lltype.nullptr(rffi.CCHARP.TO), lltype.nullptr(rffi.CCHARP.TO)) + assert space.unwrap(res) == "abc" + + self.raises(space, api, TypeError, api.PyString_AsEncodedObject, + space.wrap(2), lltype.nullptr(rffi.CCHARP.TO), lltype.nullptr(rffi.CCHARP.TO) + ) + + def test_AsDecodedObject(self, space, api): + w_str = space.wrap('caf\xe9') + encoding = rffi.str2charp("latin-1") + w_res = api.PyString_AsDecodedObject(w_str, encoding, None) + rffi.free_charp(encoding) + assert space.unwrap(w_res) == u"caf\xe9" + + def test_eq(self, space, api): + assert 1 == api._PyString_Eq(space.wrap("hello"), space.wrap("hello")) + assert 0 == api._PyString_Eq(space.wrap("hello"), space.wrap("world")) + + def test_join(self, space, api): + w_sep = space.wrap('') + w_seq = space.wrap(['a', 'b']) + w_joined = api._PyString_Join(w_sep, w_seq) + assert space.unwrap(w_joined) == 'ab' diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_cpyext.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_cpyext.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_cpyext.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_cpyext.py 2016-03-19 16:40:12.000000000 +0000 @@ -14,7 +14,7 @@ from rpython.tool.udir import udir from pypy.module.cpyext import api from pypy.module.cpyext.state import State -from pypy.module.cpyext.pyobject import RefcountState +from pypy.module.cpyext.pyobject import debug_collect from pypy.module.cpyext.pyobject import Py_DecRef, InvalidPointerException from rpython.tool.identity_dict import identity_dict from rpython.tool import leakfinder @@ -73,7 +73,9 @@ else: kwds["link_files"] = [str(api_library + '.so')] if sys.platform.startswith('linux'): - kwds["compile_extra"]=["-Werror=implicit-function-declaration"] + kwds["compile_extra"]=["-Werror=implicit-function-declaration", + "-g", "-O0"] + kwds["link_extra"]=["-g"] modname = modname.split('.')[-1] eci = ExternalCompilationInfo( @@ -92,6 +94,7 @@ return str(pydname) def freeze_refcnts(self): + return #ZZZ state = self.space.fromcache(RefcountState) self.frozen_refcounts = {} for w_obj, obj in state.py_objects_w2r.iteritems(): @@ -109,6 +112,7 @@ @staticmethod def cleanup_references(space): + return #ZZZ state = space.fromcache(RefcountState) import gc; gc.collect() @@ -127,10 +131,11 @@ state.reset_borrowed_references() def check_and_print_leaks(self): + debug_collect() # check for sane refcnts import gc - if not self.enable_leak_checking: + if 1: #ZZZ not self.enable_leak_checking: leakfinder.stop_tracking_allocations(check=False) return False @@ -195,6 +200,9 @@ "the test actually passed in the first place; if it failed " "it is likely to reach this place.") + def test_only_import(self): + import cpyext + def test_load_error(self): import cpyext raises(ImportError, cpyext.load_module, "missing.file", "foo") @@ -212,8 +220,8 @@ cls.space.getbuiltinmodule("cpyext") from pypy.module.imp.importing import importhook importhook(cls.space, "os") # warm up reference counts - state = cls.space.fromcache(RefcountState) - state.non_heaptypes_w[:] = [] + #state = cls.space.fromcache(RefcountState) ZZZ + #state.non_heaptypes_w[:] = [] def setup_method(self, func): @unwrap_spec(name=str) @@ -348,7 +356,7 @@ interp2app(record_imported_module)) self.w_here = self.space.wrap( str(py.path.local(pypydir)) + '/module/cpyext/test/') - + self.w_debug_collect = self.space.wrap(interp2app(debug_collect)) # create the file lock before we count allocations self.space.call_method(self.space.sys.get("stdout"), "flush") @@ -638,8 +646,8 @@ static PyObject* foo_pi(PyObject* self, PyObject *args) { PyObject *true_obj = Py_True; - int refcnt = true_obj->ob_refcnt; - int refcnt_after; + Py_ssize_t refcnt = true_obj->ob_refcnt; + Py_ssize_t refcnt_after; Py_INCREF(true_obj); Py_INCREF(true_obj); PyBool_Check(true_obj); @@ -647,14 +655,14 @@ Py_DECREF(true_obj); Py_DECREF(true_obj); fprintf(stderr, "REFCNT %i %i\\n", refcnt, refcnt_after); - return PyBool_FromLong(refcnt_after == refcnt+2 && refcnt < 3); + return PyBool_FromLong(refcnt_after == refcnt + 2); } static PyObject* foo_bar(PyObject* self, PyObject *args) { PyObject *true_obj = Py_True; PyObject *tup = NULL; - int refcnt = true_obj->ob_refcnt; - int refcnt_after; + Py_ssize_t refcnt = true_obj->ob_refcnt; + Py_ssize_t refcnt_after; tup = PyTuple_New(1); Py_INCREF(true_obj); @@ -662,8 +670,10 @@ return NULL; refcnt_after = true_obj->ob_refcnt; Py_DECREF(tup); - fprintf(stderr, "REFCNT2 %i %i\\n", refcnt, refcnt_after); - return PyBool_FromLong(refcnt_after == refcnt); + fprintf(stderr, "REFCNT2 %i %i %i\\n", refcnt, refcnt_after, + true_obj->ob_refcnt); + return PyBool_FromLong(refcnt_after == refcnt + 1 && + refcnt == true_obj->ob_refcnt); } static PyMethodDef methods[] = { @@ -863,3 +873,15 @@ os.unlink('_imported_already') except OSError: pass + + def test_no_structmember(self): + """structmember.h should not be included by default.""" + mod = self.import_extension('foo', [ + ('bar', 'METH_NOARGS', + ''' + /* reuse a name that is #defined in structmember.h */ + int RO; + Py_RETURN_NONE; + ''' + ), + ]) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_dictobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_dictobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_dictobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_dictobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -146,7 +146,7 @@ def test_dictproxy(self, space, api): w_dict = space.sys.get('modules') w_proxy = api.PyDictProxy_New(w_dict) - assert space.is_true(space.contains(w_proxy, space.wrap('sys'))) + assert space.contains_w(w_proxy, space.wrap('sys')) raises(OperationError, space.setitem, w_proxy, space.wrap('sys'), space.w_None) raises(OperationError, space.delitem, diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_floatobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_floatobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_floatobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_floatobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -45,3 +45,35 @@ ]) assert module.from_string() == 1234.56 assert type(module.from_string()) is float + +class AppTestFloatMacros(AppTestCpythonExtensionBase): + def test_return_nan(self): + import math + + module = self.import_extension('foo', [ + ("return_nan", "METH_NOARGS", + "Py_RETURN_NAN;"), + ]) + assert math.isnan(module.return_nan()) + + def test_return_inf(self): + import math + + module = self.import_extension('foo', [ + ("return_inf", "METH_NOARGS", + "Py_RETURN_INF(10);"), + ]) + inf = module.return_inf() + assert inf > 0 + assert math.isinf(inf) + + def test_return_inf_negative(self): + import math + + module = self.import_extension('foo', [ + ("return_neginf", "METH_NOARGS", + "Py_RETURN_INF(-10);"), + ]) + neginf = module.return_neginf() + assert neginf < 0 + assert math.isinf(neginf) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_frameobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_frameobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_frameobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_frameobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -9,6 +9,7 @@ PyObject *py_srcfile = PyString_FromString("filename"); PyObject *py_funcname = PyString_FromString("funcname"); PyObject *py_globals = PyDict_New(); + PyObject *py_locals = PyDict_New(); PyObject *empty_string = PyString_FromString(""); PyObject *empty_tuple = PyTuple_New(0); PyCodeObject *py_code; @@ -39,7 +40,7 @@ PyThreadState_Get(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ + py_locals /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = 48; /* Does not work with CPython */ @@ -51,6 +52,7 @@ Py_XDECREF(empty_string); Py_XDECREF(empty_tuple); Py_XDECREF(py_globals); + Py_XDECREF(py_locals); Py_XDECREF(py_code); Py_XDECREF(py_frame); return NULL; diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_funcobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_funcobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_funcobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_funcobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -78,6 +78,11 @@ w_code = api.PyCode_NewEmpty(filename, funcname, 3) assert w_code.co_filename == 'filename' assert w_code.co_firstlineno == 3 + + ref = make_ref(space, w_code) + assert "filename" == space.unwrap( + from_ref(space, rffi.cast(PyCodeObject, ref).c_co_filename)) + api.Py_DecRef(ref) rffi.free_charp(filename) rffi.free_charp(funcname) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_getargs.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_getargs.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_getargs.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_getargs.py 2016-03-19 16:40:12.000000000 +0000 @@ -161,7 +161,9 @@ freed.append('x') raises(TypeError, pybuffer, freestring("string"), freestring("other string"), 42) - import gc; gc.collect() + self.debug_collect() # gc.collect() is not enough in this test: + # we need to check and free the PyObject + # linked to the freestring object as well assert freed == ['x', 'x'] diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_import.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_import.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_import.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_import.py 2016-03-19 16:40:12.000000000 +0000 @@ -21,7 +21,7 @@ def test_getmoduledict(self, space, api): testmod = "_functools" w_pre_dict = api.PyImport_GetModuleDict() - assert not space.is_true(space.contains(w_pre_dict, space.wrap(testmod))) + assert not space.contains_w(w_pre_dict, space.wrap(testmod)) with rffi.scoped_str2charp(testmod) as modname: w_module = api.PyImport_ImportModule(modname) @@ -29,7 +29,7 @@ assert w_module w_dict = api.PyImport_GetModuleDict() - assert space.is_true(space.contains(w_dict, space.wrap(testmod))) + assert space.contains_w(w_dict, space.wrap(testmod)) def test_reload(self, space, api): stat = api.PyImport_Import(space.wrap("stat")) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_intobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_intobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_intobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_intobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -99,6 +99,7 @@ """), ], prologue=""" + #include "structmember.h" typedef struct { PyObject_HEAD diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_longobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_longobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_longobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_longobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -175,10 +175,26 @@ little_endian, is_signed); """), ]) - assert module.from_bytearray(True, False) == 0x9ABC - assert module.from_bytearray(True, True) == -0x6543 - assert module.from_bytearray(False, False) == 0xBC9A - assert module.from_bytearray(False, True) == -0x4365 + assert module.from_bytearray(True, False) == 0xBC9A + assert module.from_bytearray(True, True) == -0x4366 + assert module.from_bytearray(False, False) == 0x9ABC + assert module.from_bytearray(False, True) == -0x6544 + + def test_frombytearray_2(self): + module = self.import_extension('foo', [ + ("from_bytearray", "METH_VARARGS", + """ + int little_endian, is_signed; + if (!PyArg_ParseTuple(args, "ii", &little_endian, &is_signed)) + return NULL; + return _PyLong_FromByteArray("\x9A\xBC\x41", 3, + little_endian, is_signed); + """), + ]) + assert module.from_bytearray(True, False) == 0x41BC9A + assert module.from_bytearray(True, True) == 0x41BC9A + assert module.from_bytearray(False, False) == 0x9ABC41 + assert module.from_bytearray(False, True) == -0x6543BF def test_fromunicode(self): module = self.import_extension('foo', [ diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_ndarrayobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_ndarrayobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_ndarrayobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_ndarrayobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -80,7 +80,7 @@ a0 = scalar(space) assert a0.get_scalar_value().value == 10. - a = api._PyArray_FromAny(a0, NULL, 0, 0, 0, NULL) + a = api._PyArray_FromAny(a0, None, 0, 0, 0, NULL) assert api._PyArray_NDIM(a) == 0 ptr = rffi.cast(rffi.DOUBLEP, api._PyArray_DATA(a)) @@ -88,10 +88,10 @@ def test_FromAny(self, space, api): a = array(space, [10, 5, 3]) - assert api._PyArray_FromAny(a, NULL, 0, 0, 0, NULL) is a - assert api._PyArray_FromAny(a, NULL, 1, 4, 0, NULL) is a + assert api._PyArray_FromAny(a, None, 0, 0, 0, NULL) is a + assert api._PyArray_FromAny(a, None, 1, 4, 0, NULL) is a self.raises(space, api, ValueError, api._PyArray_FromAny, - a, NULL, 4, 5, 0, NULL) + a, None, 4, 5, 0, NULL) def test_FromObject(self, space, api): a = array(space, [10, 5, 3]) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_object.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_object.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_object.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_object.py 2016-03-19 16:40:12.000000000 +0000 @@ -202,7 +202,7 @@ def test_dir(self, space, api): w_dir = api.PyObject_Dir(space.sys) assert space.isinstance_w(w_dir, space.w_list) - assert space.is_true(space.contains(w_dir, space.wrap('modules'))) + assert space.contains_w(w_dir, space.wrap('modules')) class AppTestObject(AppTestCpythonExtensionBase): def setup_class(cls): diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_stringobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_stringobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_stringobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_stringobject.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,325 +0,0 @@ -from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.cpyext.test.test_api import BaseApiTest -from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase -from pypy.module.cpyext.stringobject import new_empty_str, PyStringObject -from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP -from pypy.module.cpyext.pyobject import Py_DecRef, from_ref, make_ref - -import py -import sys - -class AppTestStringObject(AppTestCpythonExtensionBase): - def test_stringobject(self): - module = self.import_extension('foo', [ - ("get_hello1", "METH_NOARGS", - """ - return PyString_FromStringAndSize( - "Hello world", 11); - """), - ("get_hello2", "METH_NOARGS", - """ - return PyString_FromString("Hello world"); - """), - ("test_Size", "METH_NOARGS", - """ - PyObject* s = PyString_FromString("Hello world"); - int result = 0; - - if(PyString_Size(s) == 11) { - result = 1; - } - if(s->ob_type->tp_basicsize != sizeof(void*)*4) - result = 0; - Py_DECREF(s); - return PyBool_FromLong(result); - """), - ("test_Size_exception", "METH_NOARGS", - """ - PyObject* f = PyFloat_FromDouble(1.0); - Py_ssize_t size = PyString_Size(f); - - Py_DECREF(f); - return NULL; - """), - ("test_is_string", "METH_VARARGS", - """ - return PyBool_FromLong(PyString_Check(PyTuple_GetItem(args, 0))); - """)]) - assert module.get_hello1() == 'Hello world' - assert module.get_hello2() == 'Hello world' - assert module.test_Size() - raises(TypeError, module.test_Size_exception) - - assert module.test_is_string("") - assert not module.test_is_string(()) - - def test_string_buffer_init(self): - module = self.import_extension('foo', [ - ("getstring", "METH_NOARGS", - """ - PyObject *s, *t; - char* c; - Py_ssize_t len; - - s = PyString_FromStringAndSize(NULL, 4); - if (s == NULL) - return NULL; - t = PyString_FromStringAndSize(NULL, 3); - if (t == NULL) - return NULL; - Py_DECREF(t); - c = PyString_AsString(s); - c[0] = 'a'; - c[1] = 'b'; - c[3] = 'c'; - return s; - """), - ]) - s = module.getstring() - assert len(s) == 4 - assert s == 'ab\x00c' - - - - def test_AsString(self): - module = self.import_extension('foo', [ - ("getstring", "METH_NOARGS", - """ - PyObject* s1 = PyString_FromStringAndSize("test", 4); - char* c = PyString_AsString(s1); - PyObject* s2 = PyString_FromStringAndSize(c, 4); - Py_DECREF(s1); - return s2; - """), - ]) - s = module.getstring() - assert s == 'test' - - def test_py_string_as_string(self): - module = self.import_extension('foo', [ - ("string_as_string", "METH_VARARGS", - ''' - return PyString_FromStringAndSize(PyString_AsString( - PyTuple_GetItem(args, 0)), 4); - ''' - )]) - assert module.string_as_string("huheduwe") == "huhe" - - def test_py_string_as_string_None(self): - module = self.import_extension('foo', [ - ("string_None", "METH_VARARGS", - ''' - return PyString_AsString(Py_None); - ''' - )]) - raises(TypeError, module.string_None) - - def test_AsStringAndSize(self): - module = self.import_extension('foo', [ - ("getstring", "METH_NOARGS", - """ - PyObject* s1 = PyString_FromStringAndSize("te\\0st", 5); - char *buf; - Py_ssize_t len; - if (PyString_AsStringAndSize(s1, &buf, &len) < 0) - return NULL; - if (len != 5) { - PyErr_SetString(PyExc_AssertionError, "Bad Length"); - return NULL; - } - if (PyString_AsStringAndSize(s1, &buf, NULL) >= 0) { - PyErr_SetString(PyExc_AssertionError, "Should Have failed"); - return NULL; - } - PyErr_Clear(); - Py_DECREF(s1); - Py_INCREF(Py_None); - return Py_None; - """), - ]) - module.getstring() - - def test_format_v(self): - module = self.import_extension('foo', [ - ("test_string_format_v", "METH_VARARGS", - ''' - return helper("bla %d ble %s\\n", - PyInt_AsLong(PyTuple_GetItem(args, 0)), - PyString_AsString(PyTuple_GetItem(args, 1))); - ''' - ) - ], prologue=''' - PyObject* helper(char* fmt, ...) - { - va_list va; - PyObject* res; - va_start(va, fmt); - res = PyString_FromFormatV(fmt, va); - va_end(va); - return res; - } - ''') - res = module.test_string_format_v(1, "xyz") - assert res == "bla 1 ble xyz\n" - - def test_format(self): - module = self.import_extension('foo', [ - ("test_string_format", "METH_VARARGS", - ''' - return PyString_FromFormat("bla %d ble %s\\n", - PyInt_AsLong(PyTuple_GetItem(args, 0)), - PyString_AsString(PyTuple_GetItem(args, 1))); - ''' - ) - ]) - res = module.test_string_format(1, "xyz") - assert res == "bla 1 ble xyz\n" - - def test_intern_inplace(self): - module = self.import_extension('foo', [ - ("test_intern_inplace", "METH_O", - ''' - PyObject *s = args; - Py_INCREF(s); - PyString_InternInPlace(&s); - return s; - ''' - ) - ]) - # This does not test much, but at least the refcounts are checked. - assert module.test_intern_inplace('s') == 's' - -class TestString(BaseApiTest): - def test_string_resize(self, space, api): - py_str = new_empty_str(space, 10) - ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') - py_str.c_buffer[0] = 'a' - py_str.c_buffer[1] = 'b' - py_str.c_buffer[2] = 'c' - ar[0] = rffi.cast(PyObject, py_str) - api._PyString_Resize(ar, 3) - py_str = rffi.cast(PyStringObject, ar[0]) - assert py_str.c_size == 3 - assert py_str.c_buffer[1] == 'b' - assert py_str.c_buffer[3] == '\x00' - # the same for growing - ar[0] = rffi.cast(PyObject, py_str) - api._PyString_Resize(ar, 10) - py_str = rffi.cast(PyStringObject, ar[0]) - assert py_str.c_size == 10 - assert py_str.c_buffer[1] == 'b' - assert py_str.c_buffer[10] == '\x00' - Py_DecRef(space, ar[0]) - lltype.free(ar, flavor='raw') - - def test_string_buffer(self, space, api): - py_str = new_empty_str(space, 10) - c_buf = py_str.c_ob_type.c_tp_as_buffer - assert c_buf - py_obj = rffi.cast(PyObject, py_str) - assert c_buf.c_bf_getsegcount(py_obj, lltype.nullptr(Py_ssize_tP.TO)) == 1 - ref = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') - assert c_buf.c_bf_getsegcount(py_obj, ref) == 1 - assert ref[0] == 10 - lltype.free(ref, flavor='raw') - ref = lltype.malloc(rffi.VOIDPP.TO, 1, flavor='raw') - assert c_buf.c_bf_getreadbuffer(py_obj, 0, ref) == 10 - lltype.free(ref, flavor='raw') - Py_DecRef(space, py_obj) - - def test_Concat(self, space, api): - ref = make_ref(space, space.wrap('abc')) - ptr = lltype.malloc(PyObjectP.TO, 1, flavor='raw') - ptr[0] = ref - api.PyString_Concat(ptr, space.wrap('def')) - assert space.str_w(from_ref(space, ptr[0])) == 'abcdef' - api.PyString_Concat(ptr, space.w_None) - assert not ptr[0] - ptr[0] = lltype.nullptr(PyObject.TO) - api.PyString_Concat(ptr, space.wrap('def')) # should not crash - lltype.free(ptr, flavor='raw') - - def test_ConcatAndDel(self, space, api): - ref1 = make_ref(space, space.wrap('abc')) - ref2 = make_ref(space, space.wrap('def')) - ptr = lltype.malloc(PyObjectP.TO, 1, flavor='raw') - ptr[0] = ref1 - api.PyString_ConcatAndDel(ptr, ref2) - assert space.str_w(from_ref(space, ptr[0])) == 'abcdef' - assert ref2.c_ob_refcnt == 0 - Py_DecRef(space, ptr[0]) - ptr[0] = lltype.nullptr(PyObject.TO) - ref2 = make_ref(space, space.wrap('foo')) - api.PyString_ConcatAndDel(ptr, ref2) # should not crash - assert ref2.c_ob_refcnt == 0 - lltype.free(ptr, flavor='raw') - - def test_format(self, space, api): - assert "1 2" == space.unwrap( - api.PyString_Format(space.wrap('%s %d'), space.wrap((1, 2)))) - - def test_asbuffer(self, space, api): - bufp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') - lenp = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') - - w_text = space.wrap("text") - assert api.PyObject_AsCharBuffer(w_text, bufp, lenp) == 0 - assert lenp[0] == 4 - assert rffi.charp2str(bufp[0]) == 'text' - - lltype.free(bufp, flavor='raw') - lltype.free(lenp, flavor='raw') - - def test_intern(self, space, api): - buf = rffi.str2charp("test") - w_s1 = api.PyString_InternFromString(buf) - w_s2 = api.PyString_InternFromString(buf) - rffi.free_charp(buf) - assert w_s1 is w_s2 - - def test_AsEncodedObject(self, space, api): - ptr = space.wrap('abc') - - errors = rffi.str2charp("strict") - - encoding = rffi.str2charp("hex") - res = api.PyString_AsEncodedObject( - ptr, encoding, errors) - assert space.unwrap(res) == "616263" - - res = api.PyString_AsEncodedObject( - ptr, encoding, lltype.nullptr(rffi.CCHARP.TO)) - assert space.unwrap(res) == "616263" - rffi.free_charp(encoding) - - encoding = rffi.str2charp("unknown_encoding") - self.raises(space, api, LookupError, api.PyString_AsEncodedObject, - ptr, encoding, errors) - rffi.free_charp(encoding) - - rffi.free_charp(errors) - - res = api.PyString_AsEncodedObject( - ptr, lltype.nullptr(rffi.CCHARP.TO), lltype.nullptr(rffi.CCHARP.TO)) - assert space.unwrap(res) == "abc" - - self.raises(space, api, TypeError, api.PyString_AsEncodedObject, - space.wrap(2), lltype.nullptr(rffi.CCHARP.TO), lltype.nullptr(rffi.CCHARP.TO) - ) - - def test_AsDecodedObject(self, space, api): - w_str = space.wrap('caf\xe9') - encoding = rffi.str2charp("latin-1") - w_res = api.PyString_AsDecodedObject(w_str, encoding, None) - rffi.free_charp(encoding) - assert space.unwrap(w_res) == u"caf\xe9" - - def test_eq(self, space, api): - assert 1 == api._PyString_Eq(space.wrap("hello"), space.wrap("hello")) - assert 0 == api._PyString_Eq(space.wrap("hello"), space.wrap("world")) - - def test_join(self, space, api): - w_sep = space.wrap('') - w_seq = space.wrap(['a', 'b']) - w_joined = api._PyString_Join(w_sep, w_seq) - assert space.unwrap(w_joined) == 'ab' diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_translate.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_translate.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_translate.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_translate.py 2016-03-19 16:40:12.000000000 +0000 @@ -19,7 +19,7 @@ @specialize.memo() def get_tp_function(space, typedef): - @cpython_api([], lltype.Signed, error=-1, external=False) + @cpython_api([], lltype.Signed, error=-1, header=None) def slot_tp_function(space): return typedef.value diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_tupleobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_tupleobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_tupleobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_tupleobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,7 +1,9 @@ import py from pypy.module.cpyext.pyobject import PyObject, PyObjectP, make_ref, from_ref +from pypy.module.cpyext.tupleobject import PyTupleObject from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype @@ -10,38 +12,47 @@ def test_tupleobject(self, space, api): assert not api.PyTuple_Check(space.w_None) assert api.PyTuple_SetItem(space.w_None, 0, space.w_None) == -1 - atuple = space.newtuple([0, 1, 'yay']) + atuple = space.newtuple([space.wrap(0), space.wrap(1), + space.wrap('yay')]) assert api.PyTuple_Size(atuple) == 3 - assert api.PyTuple_GET_SIZE(atuple) == 3 + #assert api.PyTuple_GET_SIZE(atuple) == 3 --- now a C macro raises(TypeError, api.PyTuple_Size(space.newlist([]))) api.PyErr_Clear() def test_tuple_resize(self, space, api): - py_tuple = api.PyTuple_New(3) + w_42 = space.wrap(42) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') - ar[0] = rffi.cast(PyObject, make_ref(space, py_tuple)) + + py_tuple = api.PyTuple_New(3) + # inside py_tuple is an array of "PyObject *" items which each hold + # a reference + rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + ar[0] = py_tuple api._PyTuple_Resize(ar, 2) - py_tuple = from_ref(space, ar[0]) - assert space.int_w(space.len(py_tuple)) == 2 - + w_tuple = from_ref(space, ar[0]) + assert space.int_w(space.len(w_tuple)) == 2 + assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + api.Py_DecRef(ar[0]) + + py_tuple = api.PyTuple_New(3) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + ar[0] = py_tuple api._PyTuple_Resize(ar, 10) - py_tuple = from_ref(space, ar[0]) - assert space.int_w(space.len(py_tuple)) == 10 - + w_tuple = from_ref(space, ar[0]) + assert space.int_w(space.len(w_tuple)) == 10 + assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 api.Py_DecRef(ar[0]) + lltype.free(ar, flavor='raw') def test_setitem(self, space, api): - atuple = space.newtuple([space.wrap(0), space.wrap("hello")]) - assert api.PyTuple_Size(atuple) == 2 - assert space.eq_w(space.getitem(atuple, space.wrap(0)), space.wrap(0)) - assert space.eq_w(space.getitem(atuple, space.wrap(1)), space.wrap("hello")) - w_obj = space.wrap(1) - api.Py_IncRef(w_obj) - api.PyTuple_SetItem(atuple, 1, w_obj) - assert api.PyTuple_Size(atuple) == 2 - assert space.eq_w(space.getitem(atuple, space.wrap(0)), space.wrap(0)) - assert space.eq_w(space.getitem(atuple, space.wrap(1)), space.wrap(1)) + py_tuple = api.PyTuple_New(2) + api.PyTuple_SetItem(py_tuple, 0, make_ref(space, space.wrap(42))) + api.PyTuple_SetItem(py_tuple, 1, make_ref(space, space.wrap(43))) + + w_tuple = from_ref(space, py_tuple) + assert space.eq_w(w_tuple, space.newtuple([space.wrap(42), + space.wrap(43)])) def test_getslice(self, space, api): w_tuple = space.newtuple([space.wrap(i) for i in range(10)]) @@ -49,3 +60,71 @@ assert space.eq_w(w_slice, space.newtuple([space.wrap(i) for i in range(3, 7)])) + +class AppTestTuple(AppTestCpythonExtensionBase): + def test_refcounts(self): + module = self.import_extension('foo', [ + ("run", "METH_NOARGS", + """ + PyObject *item = PyTuple_New(0); + PyObject *t = PyTuple_New(1); + if (t->ob_refcnt != 1 || item->ob_refcnt != 1) { + PyErr_SetString(PyExc_SystemError, "bad initial refcnt"); + return NULL; + } + + PyTuple_SetItem(t, 0, item); + if (t->ob_refcnt != 1) { + PyErr_SetString(PyExc_SystemError, "SetItem: t refcnt != 1"); + return NULL; + } + if (item->ob_refcnt != 1) { + PyErr_SetString(PyExc_SystemError, "SetItem: item refcnt != 1"); + return NULL; + } + + if (PyTuple_GetItem(t, 0) != item || + PyTuple_GetItem(t, 0) != item) { + PyErr_SetString(PyExc_SystemError, "GetItem: bogus item"); + return NULL; + } + + if (t->ob_refcnt != 1) { + PyErr_SetString(PyExc_SystemError, "GetItem: t refcnt != 1"); + return NULL; + } + if (item->ob_refcnt != 1) { + PyErr_SetString(PyExc_SystemError, "GetItem: item refcnt != 1"); + return NULL; + } + return t; + """), + ]) + x = module.run() + assert x == ((),) + + def test_refcounts_more(self): + module = self.import_extension('foo', [ + ("run", "METH_NOARGS", + """ + long prev, next; + PyObject *t = PyTuple_New(1); + prev = Py_True->ob_refcnt; + Py_INCREF(Py_True); + PyTuple_SetItem(t, 0, Py_True); + if (Py_True->ob_refcnt != prev + 1) { + PyErr_SetString(PyExc_SystemError, + "SetItem: Py_True refcnt != prev + 1"); + return NULL; + } + Py_DECREF(t); + if (Py_True->ob_refcnt != prev) { + PyErr_SetString(PyExc_SystemError, + "after: Py_True refcnt != prev"); + return NULL; + } + Py_INCREF(Py_None); + return Py_None; + """), + ]) + module.run() diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_typeobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_typeobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_typeobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_typeobject.py 2016-03-19 16:40:15.000000000 +0000 @@ -374,6 +374,11 @@ module = self.import_extension('foo', [ ("test_type", "METH_O", ''' + /* "args->ob_type" is a strange way to get at 'type', + which should have a different tp_getattro/tp_setattro + than its tp_base, which is 'object'. + */ + if (!args->ob_type->tp_setattro) { PyErr_SetString(PyExc_ValueError, "missing tp_setattro"); @@ -382,7 +387,22 @@ if (args->ob_type->tp_setattro == args->ob_type->tp_base->tp_setattro) { - PyErr_SetString(PyExc_ValueError, "recursive tp_setattro"); + /* Note that unlike CPython, in PyPy 'type.tp_setattro' + is the same function as 'object.tp_setattro'. This + test used to check that it was not, but that was an + artifact of the bootstrap logic only---in the final + C sources I checked and they are indeed the same. + So we ignore this problem here. */ + } + if (!args->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + if (args->ob_type->tp_getattro == + args->ob_type->tp_base->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "recursive tp_getattro"); return NULL; } Py_RETURN_TRUE; @@ -391,6 +411,47 @@ ]) assert module.test_type(type(None)) + def test_tp_getattro(self): + module = self.import_extension('foo', [ + ("test_tp_getattro", "METH_VARARGS", + ''' + PyObject *obj = PyTuple_GET_ITEM(args, 0); + PyIntObject *value = PyTuple_GET_ITEM(args, 1); + if (!obj->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + PyObject *name = PyString_FromString("attr1"); + PyIntObject *attr = obj->ob_type->tp_getattro(obj, name); + if (attr->ob_ival != value->ob_ival) + { + PyErr_SetString(PyExc_ValueError, + "tp_getattro returned wrong value"); + return NULL; + } + Py_DECREF(name); + Py_DECREF(attr); + name = PyString_FromString("attr2"); + attr = obj->ob_type->tp_getattro(obj, name); + if (attr == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) + { + PyErr_Clear(); + } else { + PyErr_SetString(PyExc_ValueError, + "tp_getattro should have raised"); + return NULL; + } + Py_DECREF(name); + Py_RETURN_TRUE; + ''' + ) + ]) + class C: + def __init__(self): + self.attr1 = 123 + assert module.test_tp_getattro(C(), 123) + def test_nb_int(self): module = self.import_extension('foo', [ ("nb_int", "METH_O", @@ -591,48 +652,117 @@ def test_binaryfunc(self): module = self.import_extension('foo', [ - ("new_obj", "METH_NOARGS", + ("newInt", "METH_VARARGS", """ - FooObject *fooObj; + IntLikeObject *intObj; + long intval; - Foo_Type.tp_as_number = &foo_as_number; - foo_as_number.nb_add = foo_nb_add_call; - if (PyType_Ready(&Foo_Type) < 0) return NULL; - fooObj = PyObject_New(FooObject, &Foo_Type); - if (!fooObj) { + if (!PyArg_ParseTuple(args, "l", &intval)) + return NULL; + + IntLike_Type.tp_as_number = &intlike_as_number; + IntLike_Type.tp_flags |= Py_TPFLAGS_CHECKTYPES; + intlike_as_number.nb_add = intlike_nb_add; + if (PyType_Ready(&IntLike_Type) < 0) return NULL; + intObj = PyObject_New(IntLikeObject, &IntLike_Type); + if (!intObj) { return NULL; } - return (PyObject *)fooObj; + intObj->ival = intval; + return (PyObject *)intObj; + """), + ("newIntNoOp", "METH_VARARGS", + """ + IntLikeObjectNoOp *intObjNoOp; + long intval; + + if (!PyArg_ParseTuple(args, "l", &intval)) + return NULL; + + IntLike_Type_NoOp.tp_flags |= Py_TPFLAGS_CHECKTYPES; + if (PyType_Ready(&IntLike_Type_NoOp) < 0) return NULL; + intObjNoOp = PyObject_New(IntLikeObjectNoOp, &IntLike_Type_NoOp); + if (!intObjNoOp) { + return NULL; + } + + intObjNoOp->ival = intval; + return (PyObject *)intObjNoOp; """)], """ typedef struct { PyObject_HEAD - } FooObject; + long ival; + } IntLikeObject; static PyObject * - foo_nb_add_call(PyObject *self, PyObject *other) + intlike_nb_add(PyObject *self, PyObject *other) { - return PyInt_FromLong(42); + long val1 = ((IntLikeObject *)(self))->ival; + if (PyInt_Check(other)) { + long val2 = PyInt_AsLong(other); + return PyInt_FromLong(val1+val2); + } + + long val2 = ((IntLikeObject *)(other))->ival; + return PyInt_FromLong(val1+val2); } - PyTypeObject Foo_Type = { + PyTypeObject IntLike_Type = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "IntLike", + /*tp_basicsize*/ sizeof(IntLikeObject), + }; + static PyNumberMethods intlike_as_number; + + typedef struct + { + PyObject_HEAD + long ival; + } IntLikeObjectNoOp; + + PyTypeObject IntLike_Type_NoOp = { PyObject_HEAD_INIT(0) /*ob_size*/ 0, - /*tp_name*/ "Foo", - /*tp_basicsize*/ sizeof(FooObject), + /*tp_name*/ "IntLikeNoOp", + /*tp_basicsize*/ sizeof(IntLikeObjectNoOp), }; - static PyNumberMethods foo_as_number; """) - a = module.new_obj() - b = module.new_obj() + a = module.newInt(1) + b = module.newInt(2) c = 3 - assert (a + b) == 42 - raises(TypeError, "b + c") + d = module.newIntNoOp(4) + assert (a + b) == 3 + assert (b + c) == 5 + assert (d + a) == 5 def test_tp_new_in_subclass_of_type(self): skip("BROKEN") module = self.import_module(name='foo3') print('calling module.Type()...') module.Type("X", (object,), {}) + + def test_app_subclass_of_c_type(self): + module = self.import_module(name='foo') + size = module.size_of_instances(module.fooType) + class f1(object): + pass + class f2(module.fooType): + pass + class bar(f1, f2): + pass + assert bar.__base__ is f2 + assert module.size_of_instances(bar) == size + + def test_app_cant_subclass_two_types(self): + module = self.import_module(name='foo') + try: + class bar(module.fooType, module.Property): + pass + except TypeError as e: + assert str(e) == 'instance layout conflicts in multiple inheritance' + else: + raise AssertionError("did not get TypeError!") diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_unicodeobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_unicodeobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_unicodeobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_unicodeobject.py 2016-03-19 16:40:15.000000000 +0000 @@ -24,7 +24,7 @@ if(PyUnicode_GetSize(s) == 11) { result = 1; } - if(s->ob_type->tp_basicsize != sizeof(void*)*4) + if(s->ob_type->tp_basicsize != sizeof(void*)*5) result = 0; Py_DECREF(s); return PyBool_FromLong(result); diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_version.py pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_version.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/test/test_version.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/test/test_version.py 2016-03-19 16:40:12.000000000 +0000 @@ -23,6 +23,7 @@ PyModule_AddIntConstant(m, "py_minor_version", PY_MINOR_VERSION); PyModule_AddIntConstant(m, "py_micro_version", PY_MICRO_VERSION); PyModule_AddStringConstant(m, "pypy_version", PYPY_VERSION); + PyModule_AddIntConstant(m, "pypy_version_num", PYPY_VERSION_NUM); } """ module = self.import_module(name='foo', init=init) @@ -35,3 +36,6 @@ if v.releaselevel != 'final': s += '-%s%d' % (v[3], v[4]) assert module.pypy_version == s + assert module.pypy_version_num == ((v[0] << 24) | + (v[1] << 16) | + (v[2] << 8)) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/tupleobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/tupleobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/tupleobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/tupleobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,59 +1,168 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL, - build_type_checkers) + build_type_checkers, PyObjectFields, + cpython_struct, bootstrap_function) from pypy.module.cpyext.pyobject import (PyObject, PyObjectP, Py_DecRef, - borrow_from, make_ref, from_ref) + make_ref, from_ref, decref, + track_reference, make_typedescr, get_typedescr) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.objspace.std.tupleobject import W_TupleObject +## +## Implementation of PyTupleObject +## =============================== +## +## Similar to stringobject.py. The reason is only the existance of +## W_SpecialisedTupleObject_ii and W_SpecialisedTupleObject_ff. +## These two PyPy classes implement getitem() by returning a freshly +## constructed W_IntObject or W_FloatObject. This is not compatible +## with PyTuple_GetItem, which returns a borrowed reference. +## +## So we use this more advanced (but also likely faster) solution: +## tuple_attach makes a real PyTupleObject with an array of N +## 'PyObject *', which are created immediately and own a reference. +## Then the macro PyTuple_GET_ITEM can be implemented like CPython. +## + +PyTupleObjectStruct = lltype.ForwardReference() +PyTupleObject = lltype.Ptr(PyTupleObjectStruct) +ObjectItems = rffi.CArray(PyObject) +PyTupleObjectFields = PyObjectFields + \ + (("ob_size", Py_ssize_t), ("ob_item", lltype.Ptr(ObjectItems))) +cpython_struct("PyTupleObject", PyTupleObjectFields, PyTupleObjectStruct) + +@bootstrap_function +def init_stringobject(space): + "Type description of PyTupleObject" + make_typedescr(space.w_tuple.layout.typedef, + basestruct=PyTupleObject.TO, + attach=tuple_attach, + dealloc=tuple_dealloc, + realize=tuple_realize) + PyTuple_Check, PyTuple_CheckExact = build_type_checkers("Tuple") +def tuple_check_ref(space, ref): + w_type = from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) + return (w_type is space.w_tuple or + space.is_true(space.issubtype(w_type, space.w_tuple))) + +def new_empty_tuple(space, length): + """ + Allocate a PyTupleObject and its array of PyObject *, but without a + corresponding interpreter object. The array may be mutated, until + tuple_realize() is called. Refcount of the result is 1. + """ + typedescr = get_typedescr(space.w_tuple.layout.typedef) + py_obj = typedescr.allocate(space, space.w_tuple) + py_tup = rffi.cast(PyTupleObject, py_obj) + + py_tup.c_ob_item = lltype.malloc(ObjectItems, length, + flavor='raw', zero=True, + add_memory_pressure=True) + py_tup.c_ob_size = length + return py_tup + +def tuple_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyTupleObject with the given tuple object. The + buffer must not be modified. + """ + items_w = space.fixedview(w_obj) + l = len(items_w) + p = lltype.malloc(ObjectItems, l, flavor='raw', + add_memory_pressure=True) + i = 0 + try: + while i < l: + p[i] = make_ref(space, items_w[i]) + i += 1 + except: + while i > 0: + i -= 1 + decref(space, p[i]) + lltype.free(p, flavor='raw') + raise + py_tup = rffi.cast(PyTupleObject, py_obj) + py_tup.c_ob_size = l + py_tup.c_ob_item = p + +def tuple_realize(space, py_obj): + """ + Creates the tuple in the interpreter. The PyTupleObject must not + be modified after this call. + """ + py_tup = rffi.cast(PyTupleObject, py_obj) + l = py_tup.c_ob_size + p = py_tup.c_ob_item + items_w = [None] * l + for i in range(l): + items_w[i] = from_ref(space, p[i]) + w_obj = space.newtuple(items_w) + track_reference(space, py_obj, w_obj) + return w_obj + +@cpython_api([PyObject], lltype.Void, header=None) +def tuple_dealloc(space, py_obj): + """Frees allocated PyTupleObject resources. + """ + py_tup = rffi.cast(PyTupleObject, py_obj) + p = py_tup.c_ob_item + if p: + for i in range(py_tup.c_ob_size): + decref(space, p[i]) + lltype.free(p, flavor="raw") + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + +#_______________________________________________________________________ + @cpython_api([Py_ssize_t], PyObject) def PyTuple_New(space, size): - return W_TupleObject([space.w_None] * size) + return rffi.cast(PyObject, new_empty_tuple(space, size)) @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1) -def PyTuple_SetItem(space, w_t, pos, w_obj): - if not PyTuple_Check(space, w_t): - # XXX this should also steal a reference, test it!!! +def PyTuple_SetItem(space, ref, index, py_obj): + # XXX this will not complain when changing tuples that have + # already been realized as a W_TupleObject, but won't update the + # W_TupleObject + if not tuple_check_ref(space, ref): + decref(space, py_obj) PyErr_BadInternalCall(space) - _setitem_tuple(w_t, pos, w_obj) - Py_DecRef(space, w_obj) # SetItem steals a reference! + ref = rffi.cast(PyTupleObject, ref) + size = ref.c_ob_size + if index < 0 or index >= size: + raise OperationError(space.w_IndexError, + space.wrap("tuple assignment index out of range")) + old_ref = ref.c_ob_item[index] + ref.c_ob_item[index] = py_obj # consumes a reference + if old_ref: + decref(space, old_ref) return 0 -def _setitem_tuple(w_t, pos, w_obj): - # this function checks that w_t is really a W_TupleObject. It - # should only ever be called with a freshly built tuple from - # PyTuple_New(), which always return a W_TupleObject, even if there - # are also other implementations of tuples. - assert isinstance(w_t, W_TupleObject) - w_t.wrappeditems[pos] = w_obj - -@cpython_api([PyObject, Py_ssize_t], PyObject) -def PyTuple_GetItem(space, w_t, pos): - if not PyTuple_Check(space, w_t): +@cpython_api([PyObject, Py_ssize_t], PyObject, result_borrowed=True) +def PyTuple_GetItem(space, ref, index): + if not tuple_check_ref(space, ref): PyErr_BadInternalCall(space) - w_obj = space.getitem(w_t, space.wrap(pos)) - return borrow_from(w_t, w_obj) - -@cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) -def PyTuple_GET_SIZE(space, w_t): - """Return the size of the tuple p, which must be non-NULL and point to a tuple; - no error checking is performed. """ - return space.int_w(space.len(w_t)) + ref = rffi.cast(PyTupleObject, ref) + size = ref.c_ob_size + if index < 0 or index >= size: + raise OperationError(space.w_IndexError, + space.wrap("tuple index out of range")) + return ref.c_ob_item[index] # borrowed ref @cpython_api([PyObject], Py_ssize_t, error=-1) def PyTuple_Size(space, ref): """Take a pointer to a tuple object, and return the size of that tuple.""" - if not PyTuple_Check(space, ref): - raise OperationError(space.w_TypeError, - space.wrap("expected tuple object")) - return PyTuple_GET_SIZE(space, ref) + if not tuple_check_ref(space, ref): + PyErr_BadInternalCall(space) + ref = rffi.cast(PyTupleObject, ref) + return ref.c_ob_size @cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) -def _PyTuple_Resize(space, ref, newsize): +def _PyTuple_Resize(space, p_ref, newsize): """Can be used to resize a tuple. newsize will be the new length of the tuple. Because tuples are supposed to be immutable, this should only be used if there is only one reference to the object. Do not use this if the tuple may already @@ -64,19 +173,28 @@ this function. If the object referenced by *p is replaced, the original *p is destroyed. On failure, returns -1 and sets *p to NULL, and raises MemoryError or SystemError.""" - py_tuple = from_ref(space, ref[0]) - if not PyTuple_Check(space, py_tuple): + ref = p_ref[0] + if not tuple_check_ref(space, ref): PyErr_BadInternalCall(space) - py_newtuple = PyTuple_New(space, newsize) - - to_cp = newsize - oldsize = space.int_w(space.len(py_tuple)) - if oldsize < newsize: - to_cp = oldsize - for i in range(to_cp): - _setitem_tuple(py_newtuple, i, space.getitem(py_tuple, space.wrap(i))) - Py_DecRef(space, ref[0]) - ref[0] = make_ref(space, py_newtuple) + ref = rffi.cast(PyTupleObject, ref) + oldsize = ref.c_ob_size + oldp = ref.c_ob_item + newp = lltype.malloc(ObjectItems, newsize, zero=True, flavor='raw', + add_memory_pressure=True) + try: + if oldsize < newsize: + to_cp = oldsize + else: + to_cp = newsize + for i in range(to_cp): + newp[i] = oldp[i] + except: + lltype.free(newp, flavor='raw') + raise + ref.c_ob_item = newp + ref.c_ob_size = newsize + lltype.free(oldp, flavor='raw') + # in this version, p_ref[0] never needs to be updated return 0 @cpython_api([PyObject, Py_ssize_t, Py_ssize_t], PyObject) diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/typeobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/typeobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/typeobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/typeobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -15,13 +15,13 @@ cpython_api, cpython_struct, bootstrap_function, Py_ssize_t, Py_ssize_tP, generic_cpy_call, Py_TPFLAGS_READY, Py_TPFLAGS_READYING, Py_TPFLAGS_HEAPTYPE, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL, - Py_TPFLAGS_HAVE_GETCHARBUFFER, build_type_checkers) + Py_TPFLAGS_HAVE_GETCHARBUFFER, build_type_checkers, StaticObjectBuilder) from pypy.module.cpyext.methodobject import ( PyDescr_NewWrapper, PyCFunction_NewEx, PyCFunction_typedef) from pypy.module.cpyext.modsupport import convert_method_defs from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, - track_reference, RefcountState, borrow_from, Py_DecRef) + track_reference, Py_DecRef, as_pyobj) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.module.cpyext.state import State @@ -116,7 +116,7 @@ def update_all_slots(space, w_type, pto): # XXX fill slots in pto - typedef = w_type.instancetypedef + typedef = w_type.layout.typedef for method_name, slot_name, slot_names, slot_func in slotdefs_for_tp_slots: w_descr = w_type.lookup(method_name) if w_descr is None: @@ -146,7 +146,7 @@ assert len(slot_names) == 2 struct = getattr(pto, slot_names[0]) if not struct: - assert not space.config.translating + #assert not space.config.translating assert not pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE if slot_names[0] == 'c_tp_as_number': STRUCT_TYPE = PyNumberMethods @@ -183,7 +183,7 @@ if pto.c_tp_new: add_tp_new_wrapper(space, dict_w, pto) -@cpython_api([PyObject, PyObject, PyObject], PyObject, external=False) +@cpython_api([PyObject, PyObject, PyObject], PyObject, header=None) def tp_new_wrapper(space, self, w_args, w_kwds): tp_new = rffi.cast(PyTypeObjectPtr, self).c_tp_new @@ -235,6 +235,9 @@ def inherit_special(space, pto, base_pto): # XXX missing: copy basicsize and flags in a magical way + # (minimally, if tp_basicsize is zero we copy it from the base) + if not pto.c_tp_basicsize: + pto.c_tp_basicsize = base_pto.c_tp_basicsize flags = rffi.cast(lltype.Signed, pto.c_tp_flags) base_object_pyo = make_ref(space, space.w_object) base_object_pto = rffi.cast(PyTypeObjectPtr, base_object_pyo) @@ -294,7 +297,7 @@ name = rffi.charp2str(pto.c_tp_name) W_TypeObject.__init__(self, space, name, - bases_w or [space.w_object], dict_w) + bases_w or [space.w_object], dict_w, force_new_layout=True) if not space.is_true(space.issubtype(self, space.w_type)): self.flag_cpytype = True self.flag_heaptype = False @@ -303,62 +306,15 @@ @bootstrap_function def init_typeobject(space): - make_typedescr(space.w_type.instancetypedef, + make_typedescr(space.w_type.layout.typedef, basestruct=PyTypeObject, alloc=type_alloc, attach=type_attach, realize=type_realize, dealloc=type_dealloc) - # some types are difficult to create because of cycles. - # - object.ob_type = type - # - type.ob_type = type - # - tuple.ob_type = type - # - type.tp_base = object - # - tuple.tp_base = object - # - type.tp_bases is a tuple - # - object.tp_bases is a tuple - # - tuple.tp_bases is a tuple - - # insert null placeholders to please create_ref() - track_reference(space, lltype.nullptr(PyObject.TO), space.w_type) - track_reference(space, lltype.nullptr(PyObject.TO), space.w_object) - track_reference(space, lltype.nullptr(PyObject.TO), space.w_tuple) - track_reference(space, lltype.nullptr(PyObject.TO), space.w_str) - - # create the objects - py_type = create_ref(space, space.w_type) - py_object = create_ref(space, space.w_object) - py_tuple = create_ref(space, space.w_tuple) - py_str = create_ref(space, space.w_str) - - # form cycles - pto_type = rffi.cast(PyTypeObjectPtr, py_type) - py_type.c_ob_type = pto_type - py_object.c_ob_type = pto_type - py_tuple.c_ob_type = pto_type - - pto_object = rffi.cast(PyTypeObjectPtr, py_object) - pto_type.c_tp_base = pto_object - pto_tuple = rffi.cast(PyTypeObjectPtr, py_tuple) - pto_tuple.c_tp_base = pto_object - - pto_type.c_tp_bases.c_ob_type = pto_tuple - pto_object.c_tp_bases.c_ob_type = pto_tuple - pto_tuple.c_tp_bases.c_ob_type = pto_tuple - - for typ in (py_type, py_object, py_tuple, py_str): - heaptype = rffi.cast(PyHeapTypeObject, typ) - heaptype.c_ht_name.c_ob_type = pto_type - - # Restore the mapping - track_reference(space, py_type, space.w_type, replace=True) - track_reference(space, py_object, space.w_object, replace=True) - track_reference(space, py_tuple, space.w_tuple, replace=True) - track_reference(space, py_str, space.w_str, replace=True) - -@cpython_api([PyObject], lltype.Void, external=False) +@cpython_api([PyObject], lltype.Void, header=None) def subtype_dealloc(space, obj): pto = obj.c_ob_type base = pto @@ -374,7 +330,7 @@ # hopefully this does not clash with the memory model assumed in # extension modules -@cpython_api([PyObject, Py_ssize_tP], lltype.Signed, external=False, +@cpython_api([PyObject, Py_ssize_tP], lltype.Signed, header=None, error=CANNOT_FAIL) def str_segcount(space, w_obj, ref): if ref: @@ -382,9 +338,9 @@ return 1 @cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, - external=False, error=-1) + header=None, error=-1) def str_getreadbuffer(space, w_str, segment, ref): - from pypy.module.cpyext.stringobject import PyString_AsString + from pypy.module.cpyext.bytesobject import PyString_AsString if segment != 0: raise OperationError(space.w_SystemError, space.wrap ("accessing non-existent string segment")) @@ -395,9 +351,9 @@ return space.len_w(w_str) @cpython_api([PyObject, Py_ssize_t, rffi.CCHARPP], lltype.Signed, - external=False, error=-1) + header=None, error=-1) def str_getcharbuffer(space, w_str, segment, ref): - from pypy.module.cpyext.stringobject import PyString_AsString + from pypy.module.cpyext.bytesobject import PyString_AsString if segment != 0: raise OperationError(space.w_SystemError, space.wrap ("accessing non-existent string segment")) @@ -408,7 +364,7 @@ return space.len_w(w_str) @cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, - external=False, error=-1) + header=None, error=-1) def buf_getreadbuffer(space, pyref, segment, ref): from pypy.module.cpyext.bufferobject import PyBufferObject if segment != 0: @@ -440,7 +396,7 @@ buf_getreadbuffer.api_func.get_wrapper(space)) pto.c_tp_as_buffer = c_buf -@cpython_api([PyObject], lltype.Void, external=False) +@cpython_api([PyObject], lltype.Void, header=None) def type_dealloc(space, obj): from pypy.module.cpyext.object import PyObject_dealloc obj_pto = rffi.cast(PyTypeObjectPtr, obj) @@ -465,7 +421,8 @@ Py_DecRef(space, w_metatype) heaptype = lltype.malloc(PyHeapTypeObject.TO, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) pto = heaptype.c_ht_type pto.c_ob_refcnt = 1 pto.c_ob_type = metatype @@ -474,6 +431,8 @@ pto.c_tp_as_sequence = heaptype.c_as_sequence pto.c_tp_as_mapping = heaptype.c_as_mapping pto.c_tp_as_buffer = heaptype.c_as_buffer + pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out + pto.c_tp_itemsize = 0 return rffi.cast(PyObject, heaptype) @@ -487,7 +446,7 @@ pto = rffi.cast(PyTypeObjectPtr, py_obj) - typedescr = get_typedescr(w_type.instancetypedef) + typedescr = get_typedescr(w_type.layout.typedef) # dealloc pto.c_tp_dealloc = typedescr.get_dealloc(space) @@ -505,12 +464,10 @@ w_typename = space.getattr(w_type, space.wrap('__name__')) heaptype = rffi.cast(PyHeapTypeObject, pto) heaptype.c_ht_name = make_ref(space, w_typename) - from pypy.module.cpyext.stringobject import PyString_AsString + from pypy.module.cpyext.bytesobject import PyString_AsString pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) else: pto.c_tp_name = rffi.str2charp(w_type.name) - pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out - pto.c_tp_itemsize = 0 # uninitialized fields: # c_tp_print, c_tp_getattr, c_tp_setattr # XXX implement @@ -518,8 +475,12 @@ w_base = best_base(space, w_type.bases_w) pto.c_tp_base = rffi.cast(PyTypeObjectPtr, make_ref(space, w_base)) - finish_type_1(space, pto) - finish_type_2(space, pto, w_type) + builder = space.fromcache(StaticObjectBuilder) + if builder.cpyext_type_init is not None: + builder.cpyext_type_init.append((pto, w_type)) + else: + finish_type_1(space, pto) + finish_type_2(space, pto, w_type) pto.c_tp_basicsize = rffi.sizeof(typedescr.basestruct) if pto.c_tp_base: @@ -546,6 +507,7 @@ def type_realize(space, py_obj): pto = rffi.cast(PyTypeObjectPtr, py_obj) + assert pto.c_tp_flags & Py_TPFLAGS_READY == 0 assert pto.c_tp_flags & Py_TPFLAGS_READYING == 0 pto.c_tp_flags |= Py_TPFLAGS_READYING try: @@ -556,13 +518,13 @@ return w_obj def solid_base(space, w_type): - typedef = w_type.instancetypedef + typedef = w_type.layout.typedef return space.gettypeobject(typedef) def best_base(space, bases_w): if not bases_w: return None - return find_best_base(space, bases_w) + return find_best_base(bases_w) def inherit_slots(space, pto, w_base): # XXX missing: nearly everything @@ -580,6 +542,8 @@ pto.c_tp_free = base.c_tp_free if not pto.c_tp_setattro: pto.c_tp_setattro = base.c_tp_setattro + if not pto.c_tp_getattro: + pto.c_tp_getattro = base.c_tp_getattro finally: Py_DecRef(space, base_pyo) @@ -595,8 +559,7 @@ if not py_type.c_tp_base: # borrowed reference, but w_object is unlikely to disappear - base = make_ref(space, space.w_object) - Py_DecRef(space, base) + base = as_pyobj(space, space.w_object) py_type.c_tp_base = rffi.cast(PyTypeObjectPtr, base) finish_type_1(space, py_type) @@ -610,9 +573,6 @@ finish_type_2(space, py_type, w_obj) - state = space.fromcache(RefcountState) - state.non_heaptypes_w.append(w_obj) - return w_obj def finish_type_1(space, pto): @@ -649,6 +609,12 @@ PyObject_GenericSetAttr.api_func.functype, PyObject_GenericSetAttr.api_func.get_wrapper(space)) + if not pto.c_tp_getattro: + from pypy.module.cpyext.object import PyObject_GenericGetAttr + pto.c_tp_getattro = llhelper( + PyObject_GenericGetAttr.api_func.functype, + PyObject_GenericGetAttr.api_func.get_wrapper(space)) + if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) w_dict = w_obj.getdict(space) @@ -672,7 +638,8 @@ return generic_cpy_call( space, type.c_tp_alloc, type, 0) -@cpython_api([PyTypeObjectPtr, PyObject], PyObject, error=CANNOT_FAIL) +@cpython_api([PyTypeObjectPtr, PyObject], PyObject, error=CANNOT_FAIL, + result_borrowed=True) def _PyType_Lookup(space, type, w_name): """Internal API to look for a name through the MRO. This returns a borrowed reference, and doesn't set an exception!""" @@ -683,7 +650,9 @@ return None name = space.str_w(w_name) w_obj = w_type.lookup(name) - return borrow_from(w_type, w_obj) + # this assumes that w_obj is not dynamically created, but will stay alive + # until w_type is modified or dies. Assuming this, we return a borrowed ref + return w_obj @cpython_api([PyTypeObjectPtr], lltype.Void) def PyType_Modified(space, w_obj): diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/unicodeobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/unicodeobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/unicodeobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/unicodeobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -9,7 +9,7 @@ from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, make_typedescr, get_typedescr) -from pypy.module.cpyext.stringobject import PyString_Check +from pypy.module.cpyext.bytesobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding from pypy.module._codecs.interp_codecs import CodecState from pypy.objspace.std import unicodeobject @@ -17,7 +17,7 @@ from rpython.tool.sourcetools import func_renamer import sys -## See comment in stringobject.py. +## See comment in bytesobject.py. PyUnicodeObjectStruct = lltype.ForwardReference() PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct) @@ -27,7 +27,7 @@ @bootstrap_function def init_unicodeobject(space): - make_typedescr(space.w_unicode.instancetypedef, + make_typedescr(space.w_unicode.layout.typedef, basestruct=PyUnicodeObject.TO, attach=unicode_attach, dealloc=unicode_dealloc, @@ -44,18 +44,19 @@ def new_empty_unicode(space, length): """ - Allocatse a PyUnicodeObject and its buffer, but without a corresponding + Allocate a PyUnicodeObject and its buffer, but without a corresponding interpreter object. The buffer may be mutated, until unicode_realize() is - called. + called. Refcount of the result is 1. """ - typedescr = get_typedescr(space.w_unicode.instancetypedef) + typedescr = get_typedescr(space.w_unicode.layout.typedef) py_obj = typedescr.allocate(space, space.w_unicode) py_uni = rffi.cast(PyUnicodeObject, py_obj) buflen = length + 1 py_uni.c_size = length py_uni.c_buffer = lltype.malloc(rffi.CWCHARP.TO, buflen, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) return py_uni def unicode_attach(space, py_obj, w_obj): @@ -75,7 +76,7 @@ track_reference(space, py_obj, w_obj) return w_obj -@cpython_api([PyObject], lltype.Void, external=False) +@cpython_api([PyObject], lltype.Void, header=None) def unicode_dealloc(space, py_obj): py_unicode = rffi.cast(PyUnicodeObject, py_obj) if py_unicode.c_buffer: diff -Nru pypy-4.0.1+dfsg/pypy/module/cpyext/weakrefobject.py pypy-5.0.1+dfsg/pypy/module/cpyext/weakrefobject.py --- pypy-4.0.1+dfsg/pypy/module/cpyext/weakrefobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/cpyext/weakrefobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,5 +1,5 @@ from pypy.module.cpyext.api import cpython_api -from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.pyobject import PyObject from pypy.module._weakref.interp__weakref import W_Weakref, proxy @cpython_api([PyObject, PyObject], PyObject) @@ -30,24 +30,26 @@ """ return proxy(space, w_obj, w_callback) -@cpython_api([PyObject], PyObject) +@cpython_api([PyObject], PyObject, result_borrowed=True) def PyWeakref_GetObject(space, w_ref): """Return the referenced object from a weak reference. If the referent is no longer live, returns None. This function returns a borrowed reference. """ - return PyWeakref_GET_OBJECT(space, w_ref) + return space.call_function(w_ref) # borrowed ref -@cpython_api([PyObject], PyObject) +@cpython_api([PyObject], PyObject, result_borrowed=True) def PyWeakref_GET_OBJECT(space, w_ref): """Similar to PyWeakref_GetObject(), but implemented as a macro that does no error checking. """ - return borrow_from(w_ref, space.call_function(w_ref)) + return space.call_function(w_ref) # borrowed ref @cpython_api([PyObject], PyObject) def PyWeakref_LockObject(space, w_ref): """Return the referenced object from a weak reference. If the referent is no longer live, returns None. This function returns a new reference. + + (A PyPy extension that may not be useful any more: use + PyWeakref_GetObject() and Py_INCREF().) """ return space.call_function(w_ref) - diff -Nru pypy-4.0.1+dfsg/pypy/module/_demo/test/test_import.py pypy-5.0.1+dfsg/pypy/module/_demo/test/test_import.py --- pypy-4.0.1+dfsg/pypy/module/_demo/test/test_import.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_demo/test/test_import.py 2016-03-19 16:40:11.000000000 +0000 @@ -12,8 +12,7 @@ w_modules = space.sys.get('modules') assert _demo.Module.demo_events == ['setup'] - assert not space.is_true(space.contains(w_modules, - space.wrap('_demo'))) + assert not space.contains_w(w_modules, space.wrap('_demo')) # first import w_import = space.builtin.get('__import__') diff -Nru pypy-4.0.1+dfsg/pypy/module/_file/test/test_large_file.py pypy-5.0.1+dfsg/pypy/module/_file/test/test_large_file.py --- pypy-4.0.1+dfsg/pypy/module/_file/test/test_large_file.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_file/test/test_large_file.py 2016-03-19 16:40:11.000000000 +0000 @@ -1,4 +1,4 @@ -import py +import py, sys from pypy.module._file.test.test_file import getfile @@ -13,6 +13,12 @@ def setup_method(self, meth): if getattr(meth, 'need_sparse_files', False): from rpython.translator.c.test.test_extfunc import need_sparse_files + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") need_sparse_files() def test_large_seek_offsets(self): diff -Nru pypy-4.0.1+dfsg/pypy/module/imp/importing.py pypy-5.0.1+dfsg/pypy/module/imp/importing.py --- pypy-4.0.1+dfsg/pypy/module/imp/importing.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/imp/importing.py 2016-03-19 16:40:12.000000000 +0000 @@ -38,7 +38,7 @@ # and cffi so's. If we do have to update it, we'd likely need a way to # split the two usages again. #DEFAULT_SOABI = 'pypy-%d%d' % PYPY_VERSION[:2] -DEFAULT_SOABI = 'pypy-26' +DEFAULT_SOABI = 'pypy-41' @specialize.memo() def get_so_extension(space): @@ -85,7 +85,7 @@ # The "imp" module does not respect this, and is allowed to find # lone .pyc files. # check the .pyc file - if space.config.objspace.usepycfiles and space.config.objspace.lonepycfiles: + if space.config.objspace.lonepycfiles: pycfile = filepart + ".pyc" if file_exists(pycfile): # existing .pyc file @@ -888,17 +888,11 @@ """ w = space.wrap - if space.config.objspace.usepycfiles: - src_stat = os.fstat(fd) - cpathname = pathname + 'c' - mtime = int(src_stat[stat.ST_MTIME]) - mode = src_stat[stat.ST_MODE] - stream = check_compiled_module(space, cpathname, mtime) - else: - cpathname = None - mtime = 0 - mode = 0 - stream = None + src_stat = os.fstat(fd) + cpathname = pathname + 'c' + mtime = int(src_stat[stat.ST_MTIME]) + mode = src_stat[stat.ST_MODE] + stream = check_compiled_module(space, cpathname, mtime) if stream: # existing and up-to-date .pyc file @@ -913,7 +907,7 @@ else: code_w = parse_source_module(space, pathname, source) - if space.config.objspace.usepycfiles and write_pyc: + if write_pyc: if not space.is_true(space.sys.get('dont_write_bytecode')): write_compiled_module(space, code_w, cpathname, mode, mtime) diff -Nru pypy-4.0.1+dfsg/pypy/module/imp/test/test_import.py pypy-5.0.1+dfsg/pypy/module/imp/test/test_import.py --- pypy-4.0.1+dfsg/pypy/module/imp/test/test_import.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/imp/test/test_import.py 2016-03-19 16:40:15.000000000 +0000 @@ -98,6 +98,10 @@ 'a=5\nb=6\rc="""hello\r\nworld"""\r', mode='wb') p.join('mod.py').write( 'a=15\nb=16\rc="""foo\r\nbar"""\r', mode='wb') + setuppkg("test_bytecode", + a = '', + b = '', + c = '') # create compiled/x.py and a corresponding pyc file p = setuppkg("compiled", x = "x = 84") @@ -119,7 +123,7 @@ stream.try_to_find_file_descriptor()) finally: stream.close() - if space.config.objspace.usepycfiles: + if not space.config.translation.sandbox: # also create a lone .pyc file p.join('lone.pyc').write(p.join('x.pyc').read(mode='rb'), mode='wb') @@ -146,6 +150,8 @@ """) def _teardown(space, w_saved_modules): + p = udir.join('impsubdir') + p.remove() space.appexec([w_saved_modules], """ ((saved_path, saved_modules)): import sys @@ -646,11 +652,13 @@ # one in sys.path. import sys assert '_md5' not in sys.modules - import _md5 - assert hasattr(_md5, 'hello_world') - assert not hasattr(_md5, 'count') - assert '(built-in)' not in repr(_md5) - del sys.modules['_md5'] + try: + import _md5 + assert hasattr(_md5, 'hello_world') + assert not hasattr(_md5, 'digest_size') + assert '(built-in)' not in repr(_md5) + finally: + sys.modules.pop('_md5', None) def test_shadow_extension_2(self): if self.runappdirect: skip("hard to test: module is already imported") @@ -669,7 +677,7 @@ assert '(built-in)' in repr(_md5) finally: sys.path.insert(0, sys.path.pop()) - del sys.modules['_md5'] + sys.modules.pop('_md5', None) def test_invalid_pathname(self): import imp @@ -1061,12 +1069,12 @@ py.test.skip("unresolved issues with win32 shell quoting rules") from pypy.interpreter.test.test_zpy import pypypath extrapath = udir.ensure("pythonpath", dir=1) - extrapath.join("urllib.py").write("print 42\n") + extrapath.join("sched.py").write("print 42\n") old = os.environ.get('PYTHONPATH', None) oldlang = os.environ.pop('LANG', None) try: os.environ['PYTHONPATH'] = str(extrapath) - output = py.process.cmdexec('''"%s" "%s" -c "import urllib"''' % + output = py.process.cmdexec('''"%s" "%s" -c "import sched"''' % (sys.executable, pypypath)) assert output.strip() == '42' finally: @@ -1342,15 +1350,56 @@ assert isinstance(importer, zipimport.zipimporter) -class AppTestNoPycFile(object): +class AppTestWriteBytecode(object): spaceconfig = { - "objspace.usepycfiles": False, - "objspace.lonepycfiles": False + "translation.sandbox": False + } + + def setup_class(cls): + cls.saved_modules = _setup(cls.space) + sandbox = cls.spaceconfig['translation.sandbox'] + cls.w_sandbox = cls.space.wrap(sandbox) + + def teardown_class(cls): + _teardown(cls.space, cls.saved_modules) + cls.space.appexec([], """ + (): + import sys + sys.dont_write_bytecode = False + """) + + def test_default(self): + import os.path + from test_bytecode import a + assert a.__file__.endswith('a.py') + assert os.path.exists(a.__file__ + 'c') == (not self.sandbox) + + def test_write_bytecode(self): + import os.path + import sys + sys.dont_write_bytecode = False + from test_bytecode import b + assert b.__file__.endswith('b.py') + assert os.path.exists(b.__file__ + 'c') + + def test_dont_write_bytecode(self): + import os.path + import sys + sys.dont_write_bytecode = True + from test_bytecode import c + assert c.__file__.endswith('c.py') + assert not os.path.exists(c.__file__ + 'c') + + +class AppTestWriteBytecodeSandbox(AppTestWriteBytecode): + spaceconfig = { + "translation.sandbox": True } + + +class _AppTestLonePycFileBase(object): def setup_class(cls): - usepycfiles = cls.spaceconfig['objspace.usepycfiles'] lonepycfiles = cls.spaceconfig['objspace.lonepycfiles'] - cls.w_usepycfiles = cls.space.wrap(usepycfiles) cls.w_lonepycfiles = cls.space.wrap(lonepycfiles) cls.saved_modules = _setup(cls.space) @@ -1359,10 +1408,7 @@ def test_import_possibly_from_pyc(self): from compiled import x - if self.usepycfiles: - assert x.__file__.endswith('x.pyc') - else: - assert x.__file__.endswith('x.py') + assert x.__file__.endswith('x.pyc') try: from compiled import lone except ImportError: @@ -1371,15 +1417,13 @@ assert self.lonepycfiles, "should not have found 'lone.pyc'" assert lone.__file__.endswith('lone.pyc') -class AppTestNoLonePycFile(AppTestNoPycFile): +class AppTestNoLonePycFile(_AppTestLonePycFileBase): spaceconfig = { - "objspace.usepycfiles": True, "objspace.lonepycfiles": False } -class AppTestLonePycFile(AppTestNoPycFile): +class AppTestLonePycFile(_AppTestLonePycFileBase): spaceconfig = { - "objspace.usepycfiles": True, "objspace.lonepycfiles": True } diff -Nru pypy-4.0.1+dfsg/pypy/module/_io/interp_io.py pypy-5.0.1+dfsg/pypy/module/_io/interp_io.py --- pypy-4.0.1+dfsg/pypy/module/_io/interp_io.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_io/interp_io.py 2016-03-19 16:40:11.000000000 +0000 @@ -7,7 +7,9 @@ from pypy.module.exceptions.interp_exceptions import W_IOError from pypy.module._io.interp_fileio import W_FileIO from pypy.module._io.interp_textio import W_TextIOWrapper -from rpython.rtyper.module.ll_os_stat import STAT_FIELD_TYPES +from rpython.rlib.rposix_stat import STAT_FIELD_TYPES + +HAS_BLKSIZE = 'st_blksize' in STAT_FIELD_TYPES class Cache: @@ -118,7 +120,7 @@ if buffering < 0: buffering = DEFAULT_BUFFER_SIZE - if 'st_blksize' in STAT_FIELD_TYPES: + if HAS_BLKSIZE: fileno = space.c_int_w(space.call_method(w_raw, "fileno")) try: st = os.fstat(fileno) diff -Nru pypy-4.0.1+dfsg/pypy/module/itertools/interp_itertools.py pypy-5.0.1+dfsg/pypy/module/itertools/interp_itertools.py --- pypy-4.0.1+dfsg/pypy/module/itertools/interp_itertools.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/itertools/interp_itertools.py 2016-03-19 16:40:12.000000000 +0000 @@ -372,7 +372,7 @@ def arg_int_w(self, w_obj, minimum, errormsg): space = self.space try: - result = space.int_w(w_obj) + result = space.int_w(space.int(w_obj)) # CPython allows floats as parameters except OperationError, e: if e.async(space): raise diff -Nru pypy-4.0.1+dfsg/pypy/module/itertools/test/test_itertools.py pypy-5.0.1+dfsg/pypy/module/itertools/test/test_itertools.py --- pypy-4.0.1+dfsg/pypy/module/itertools/test/test_itertools.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/itertools/test/test_itertools.py 2016-03-19 16:40:12.000000000 +0000 @@ -225,6 +225,12 @@ assert it.next() == x raises(StopIteration, it.next) + # CPython implementation allows floats + it = itertools.islice([1, 2, 3, 4, 5], 0.0, 3.0, 2.0) + for x in [1, 3]: + assert it.next() == x + raises(StopIteration, it.next) + it = itertools.islice([1, 2, 3], 0, None) for x in [1, 2, 3]: assert it.next() == x diff -Nru pypy-4.0.1+dfsg/pypy/module/micronumpy/appbridge.py pypy-5.0.1+dfsg/pypy/module/micronumpy/appbridge.py --- pypy-4.0.1+dfsg/pypy/module/micronumpy/appbridge.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/micronumpy/appbridge.py 2016-03-19 16:40:12.000000000 +0000 @@ -9,6 +9,7 @@ w_array_repr = None w_array_str = None w__usefields = None + w_partition = None def __init__(self, space): pass diff -Nru pypy-4.0.1+dfsg/pypy/module/micronumpy/base.py pypy-5.0.1+dfsg/pypy/module/micronumpy/base.py --- pypy-4.0.1+dfsg/pypy/module/micronumpy/base.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/micronumpy/base.py 2016-03-19 16:40:12.000000000 +0000 @@ -44,7 +44,7 @@ from pypy.module.micronumpy.strides import calc_strides if len(shape) > NPY.MAXDIMS: raise oefmt(space.w_ValueError, - "sequence too large; must be smaller than %d", NPY.MAXDIMS) + "sequence too large; cannot be greater than %d", NPY.MAXDIMS) try: ovfcheck(support.product_check(shape) * dtype.elsize) except OverflowError as e: @@ -69,7 +69,7 @@ isize = dtype.elsize if len(shape) > NPY.MAXDIMS: raise oefmt(space.w_ValueError, - "sequence too large; must be smaller than %d", NPY.MAXDIMS) + "sequence too large; cannot be greater than %d", NPY.MAXDIMS) try: totalsize = ovfcheck(support.product_check(shape) * isize) except OverflowError as e: diff -Nru pypy-4.0.1+dfsg/pypy/module/micronumpy/boxes.py pypy-5.0.1+dfsg/pypy/module/micronumpy/boxes.py --- pypy-4.0.1+dfsg/pypy/module/micronumpy/boxes.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/micronumpy/boxes.py 2016-03-19 16:40:12.000000000 +0000 @@ -444,7 +444,7 @@ @unwrap_spec(axis1=int, axis2=int) def descr_swapaxes(self, space, axis1, axis2): - return self + raise oefmt(space.w_ValueError, 'bad axis1 argument to swapaxes') def descr_fill(self, space, w_value): self.get_dtype(space).coerce(space, w_value) @@ -573,7 +573,7 @@ try: ofs, dtype = self.dtype.fields[item] except KeyError: - raise oefmt(space.w_IndexError, "invalid index") + raise oefmt(space.w_ValueError, "no field of name %s", item) from pypy.module.micronumpy.types import VoidType if isinstance(dtype.itemtype, VoidType): diff -Nru pypy-4.0.1+dfsg/pypy/module/micronumpy/compile.py pypy-5.0.1+dfsg/pypy/module/micronumpy/compile.py --- pypy-4.0.1+dfsg/pypy/module/micronumpy/compile.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/micronumpy/compile.py 2016-03-19 16:40:12.000000000 +0000 @@ -8,6 +8,7 @@ from pypy.interpreter.error import OperationError from rpython.rlib.objectmodel import specialize, instantiate from rpython.rlib.nonconst import NonConstant +from rpython.rlib.rarithmetic import base_int from pypy.module.micronumpy import boxes, ufuncs from pypy.module.micronumpy.arrayops import where from pypy.module.micronumpy.ndarray import W_NDimArray @@ -65,6 +66,7 @@ w_KeyError = W_TypeObject("KeyError") w_SystemExit = W_TypeObject("SystemExit") w_KeyboardInterrupt = W_TypeObject("KeyboardInterrupt") + w_VisibleDeprecationWarning = W_TypeObject("VisibleDeprecationWarning") w_None = None w_bool = W_TypeObject("bool") @@ -177,7 +179,7 @@ return BoolObject(obj) elif isinstance(obj, int): return IntObject(obj) - elif isinstance(obj, long): + elif isinstance(obj, base_int): return LongObject(obj) elif isinstance(obj, W_Root): return obj @@ -194,32 +196,36 @@ def newfloat(self, f): return self.float(f) + def newslice(self, start, stop, step): + return SliceObject(self.int_w(start), self.int_w(stop), + self.int_w(step)) + def le(self, w_obj1, w_obj2): - assert isinstance(w_obj1, boxes.W_GenericBox) - assert isinstance(w_obj2, boxes.W_GenericBox) + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) return w_obj1.descr_le(self, w_obj2) def lt(self, w_obj1, w_obj2): - assert isinstance(w_obj1, boxes.W_GenericBox) - assert isinstance(w_obj2, boxes.W_GenericBox) + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) return w_obj1.descr_lt(self, w_obj2) def ge(self, w_obj1, w_obj2): - assert isinstance(w_obj1, boxes.W_GenericBox) - assert isinstance(w_obj2, boxes.W_GenericBox) + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) return w_obj1.descr_ge(self, w_obj2) def add(self, w_obj1, w_obj2): - assert isinstance(w_obj1, boxes.W_GenericBox) - assert isinstance(w_obj2, boxes.W_GenericBox) + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) return w_obj1.descr_add(self, w_obj2) def sub(self, w_obj1, w_obj2): return self.wrap(1) def mul(self, w_obj1, w_obj2): - assert isinstance(w_obj1, boxes.W_GenericBox) - assert isinstance(w_obj2, boxes.W_GenericBox) + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) return w_obj1.descr_mul(self, w_obj2) def pow(self, w_obj1, w_obj2, _): @@ -402,6 +408,9 @@ assert isinstance(w_check_class, W_TypeObject) return w_exc_type.name == w_check_class.name + def warn(self, w_msg, w_warn_type): + pass + class FloatObject(W_Root): tp = FakeSpace.w_float def __init__(self, floatval): @@ -832,7 +841,7 @@ elif self.name == 'reshape': w_arg = self.args[1] assert isinstance(w_arg, ArrayConstant) - order = -1 + order = -1 w_res = arr.reshape(interp.space, w_arg.wrap(interp.space), order) else: assert False diff -Nru pypy-4.0.1+dfsg/pypy/module/micronumpy/concrete.py pypy-5.0.1+dfsg/pypy/module/micronumpy/concrete.py --- pypy-4.0.1+dfsg/pypy/module/micronumpy/concrete.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/micronumpy/concrete.py 2016-03-19 16:40:12.000000000 +0000 @@ -12,8 +12,8 @@ ArrayArgumentException, W_NumpyObject from pypy.module.micronumpy.iterators import ArrayIter from pypy.module.micronumpy.strides import ( - IntegerChunk, SliceChunk, NewAxisChunk, EllipsisChunk, new_view, - calc_strides, calc_new_strides, shape_agreement, + IntegerChunk, SliceChunk, NewAxisChunk, EllipsisChunk, BooleanChunk, + new_view, calc_strides, calc_new_strides, shape_agreement, calculate_broadcast_strides, calc_backstrides, calc_start, is_c_contiguous, is_f_contiguous) from rpython.rlib.objectmodel import keepalive_until_here @@ -236,6 +236,7 @@ @jit.unroll_safe def _prepare_slice_args(self, space, w_idx): + from pypy.module.micronumpy import boxes if space.isinstance_w(w_idx, space.w_str): raise oefmt(space.w_IndexError, "only integers, slices (`:`), " "ellipsis (`...`), numpy.newaxis (`None`) and integer or " @@ -258,6 +259,7 @@ result = [] i = 0 has_ellipsis = False + has_filter = False for w_item in space.fixedview(w_idx): if space.is_w(w_item, space.w_Ellipsis): if has_ellipsis: @@ -272,6 +274,16 @@ elif space.isinstance_w(w_item, space.w_slice): result.append(SliceChunk(w_item)) i += 1 + elif isinstance(w_item, W_NDimArray) and w_item.get_dtype().is_bool(): + if has_filter: + # in CNumPy, the support for this is incomplete + raise oefmt(space.w_ValueError, + "an index can only have a single boolean mask; " + "use np.take or create a sinlge mask array") + has_filter = True + result.append(BooleanChunk(w_item)) + elif isinstance(w_item, boxes.W_GenericBox): + result.append(IntegerChunk(w_item.descr_int(space))) else: result.append(IntegerChunk(w_item)) i += 1 @@ -286,7 +298,14 @@ except IndexError: # not a single result chunks = self._prepare_slice_args(space, w_index) - return new_view(space, orig_arr, chunks) + copy = False + if isinstance(chunks[0], BooleanChunk): + # numpy compatibility + copy = True + w_ret = new_view(space, orig_arr, chunks) + if copy: + w_ret = w_ret.descr_copy(space, space.wrap(w_ret.get_order())) + return w_ret def descr_setitem(self, space, orig_arr, w_index, w_value): try: @@ -457,7 +476,7 @@ def set_shape(self, space, orig_array, new_shape): if len(new_shape) > NPY.MAXDIMS: raise oefmt(space.w_ValueError, - "sequence too large; must be smaller than %d", NPY.MAXDIMS) + "sequence too large; cannot be greater than %d", NPY.MAXDIMS) try: ovfcheck(support.product_check(new_shape) * self.dtype.elsize) except OverflowError as e: @@ -601,7 +620,7 @@ def set_shape(self, space, orig_array, new_shape): if len(new_shape) > NPY.MAXDIMS: raise oefmt(space.w_ValueError, - "sequence too large; must be smaller than %d", NPY.MAXDIMS) + "sequence too large; cannot be greater than %d", NPY.MAXDIMS) try: ovfcheck(support.product_check(new_shape) * self.dtype.elsize) except OverflowError as e: diff -Nru pypy-4.0.1+dfsg/pypy/module/micronumpy/ctors.py pypy-5.0.1+dfsg/pypy/module/micronumpy/ctors.py --- pypy-4.0.1+dfsg/pypy/module/micronumpy/ctors.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/micronumpy/ctors.py 2016-03-19 16:40:12.000000000 +0000 @@ -18,7 +18,7 @@ raise oefmt(space.w_TypeError, "argument 1 must be numpy.dtype, not %T", w_dtype) if w_dtype.elsize == 0: - raise oefmt(space.w_ValueError, "itemsize cannot be zero") + raise oefmt(space.w_TypeError, "Empty data-type") if not space.isinstance_w(w_state, space.w_str): raise oefmt(space.w_TypeError, "initializing object must be a string") if space.len_w(w_state) != w_dtype.elsize: diff -Nru pypy-4.0.1+dfsg/pypy/module/micronumpy/descriptor.py pypy-5.0.1+dfsg/pypy/module/micronumpy/descriptor.py --- pypy-4.0.1+dfsg/pypy/module/micronumpy/descriptor.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/micronumpy/descriptor.py 2016-03-19 16:40:12.000000000 +0000 @@ -217,6 +217,8 @@ endian = ignore if self.num == NPY.UNICODE: size >>= 2 + if self.num == NPY.OBJECT: + return "%s%s" %(endian, basic) return "%s%s%s" % (endian, basic, size) def descr_get_descr(self, space, style='descr', force_dict=False): @@ -420,6 +422,10 @@ if space.is_w(self, w_other): return True if isinstance(w_other, W_Dtype): + if self.is_object() and w_other.is_object(): + # ignore possible 'record' unions + # created from dtype(('O', spec)) + return True return space.eq_w(self.descr_reduce(space), w_other.descr_reduce(space)) return False @@ -485,7 +491,12 @@ def descr_str(self, space): if self.fields: - return space.str(self.descr_get_descr(space, style='str')) + r = self.descr_get_descr(space, style='str') + name = space.str_w(space.str(self.w_box_type)) + if name != "": + boxname = space.str(self.w_box_type) + r = space.newtuple([self.w_box_type, r]) + return space.str(r) elif self.subdtype is not None: return space.str(space.newtuple([ self.subdtype.descr_get_str(space), @@ -497,8 +508,13 @@ return self.descr_get_name(space) def descr_repr(self, space): + if isinstance(self.itemtype, types.CharType): + return space.wrap("dtype('S1')") if self.fields: r = self.descr_get_descr(space, style='repr') + name = space.str_w(space.str(self.w_box_type)) + if name != "": + r = space.newtuple([space.wrap(self.w_box_type), r]) elif self.subdtype is not None: r = space.newtuple([self.subdtype.descr_get_str(space), self.descr_get_shape(space)]) @@ -800,8 +816,8 @@ def _usefields(space, w_dict, align): # Only for testing, a shortened version of the real _usefields allfields = [] - for fname in w_dict.iterkeys().iterator: - obj = _get_list_or_none(space, w_dict, fname) + for fname_w in space.unpackiterable(w_dict): + obj = _get_list_or_none(space, w_dict, space.str_w(fname_w)) num = space.int_w(obj[1]) if align: alignment = 0 @@ -812,8 +828,8 @@ title = space.wrap(obj[2]) else: title = space.w_None - allfields.append((space.wrap(fname), format, num, title)) - allfields.sort(key=lambda x: x[2]) + allfields.append((fname_w, format, num, title)) + #allfields.sort(key=lambda x: x[2]) names = [space.newtuple([x[0], x[3]]) for x in allfields] formats = [x[1] for x in allfields] offsets = [x[2] for x in allfields] @@ -837,12 +853,14 @@ aligned_w = _get_val_or_none(space, w_dict, 'aligned') itemsize_w = _get_val_or_none(space, w_dict, 'itemsize') if names_w is None or formats_w is None: - if we_are_translated(): + try: return get_appbridge_cache(space).call_method(space, 'numpy.core._internal', '_usefields', Arguments(space, [w_dict, space.wrap(alignment >= 0)])) - else: - return _usefields(space, w_dict, alignment >= 0) + except OperationError as e: + if e.match(space, space.w_ImportError): + return _usefields(space, w_dict, alignment >= 0) + raise n = len(names_w) if (n != len(formats_w) or (offsets_w is not None and n != len(offsets_w)) or @@ -882,16 +900,17 @@ def dtype_from_spec(space, w_spec, alignment): - if we_are_translated(): + w_lst = w_spec + try: w_lst = get_appbridge_cache(space).call_method(space, 'numpy.core._internal', '_commastring', Arguments(space, [w_spec])) - else: + except OperationError as e: + if not e.match(space, space.w_ImportError): + raise # handle only simple cases for testing if space.isinstance_w(w_spec, space.w_str): spec = [s.strip() for s in space.str_w(w_spec).split(',')] w_lst = space.newlist([space.wrap(s) for s in spec]) - elif space.isinstance_w(w_spec, space.w_list): - w_lst = w_spec if not space.isinstance_w(w_lst, space.w_list) or space.len_w(w_lst) < 1: raise oefmt(space.w_RuntimeError, "_commastring is not returning a list with len >= 1") @@ -942,7 +961,7 @@ shape_w = space.fixedview(w_shape) if len(shape_w) < 1: return None - elif len(shape_w) == 1 and space.isinstance_w(shape_w[0], space.w_tuple): + elif space.isinstance_w(shape_w[0], space.w_tuple): # (base_dtype, new_dtype) dtype spectification return None shape = [] @@ -997,12 +1016,17 @@ if len(spec) > 0: # this is (base_dtype, new_dtype) so just make it a union by setting both # parts' offset to 0 - try: - dtype1 = make_new_dtype(space, w_subtype, w_shape, alignment) - except: - raise - raise oefmt(space.w_NotImplementedError, - "(base_dtype, new_dtype) dtype spectification discouraged, not implemented") + w_dtype1 = make_new_dtype(space, w_subtype, w_shape, alignment) + assert isinstance(w_dtype, W_Dtype) + assert isinstance(w_dtype1, W_Dtype) + if (w_dtype.elsize != 0 and w_dtype1.elsize != 0 and + w_dtype1.elsize != w_dtype.elsize): + raise oefmt(space.w_ValueError, + 'mismatch in size of old and new data-descriptor') + retval = W_Dtype(w_dtype.itemtype, w_dtype.w_box_type, + names=w_dtype1.names[:], fields=w_dtype1.fields.copy(), + elsize=w_dtype1.elsize) + return retval if space.is_none(w_dtype): return cache.w_float64dtype if space.isinstance_w(w_dtype, w_subtype): @@ -1032,19 +1056,22 @@ elif space.isinstance_w(w_dtype, space.w_tuple): w_dtype0 = space.getitem(w_dtype, space.wrap(0)) w_dtype1 = space.getitem(w_dtype, space.wrap(1)) - if space.isinstance_w(w_dtype0, space.w_type) and \ - space.isinstance_w(w_dtype1, space.w_list): - #obscure api - (subclass, spec). Ignore the subclass - return make_new_dtype(space, w_subtype, w_dtype1, alignment, - copy=copy, w_shape=w_shape, w_metadata=w_metadata) - subdtype = make_new_dtype(space, w_subtype, w_dtype0, alignment, copy) - assert isinstance(subdtype, W_Dtype) - if subdtype.elsize == 0: - name = "%s%d" % (subdtype.kind, space.int_w(w_dtype1)) + # create a new dtype object + l_side = make_new_dtype(space, w_subtype, w_dtype0, alignment, copy) + assert isinstance(l_side, W_Dtype) + if l_side.elsize == 0 and space.isinstance_w(w_dtype1, space.w_int): + #(flexible_dtype, itemsize) + name = "%s%d" % (l_side.kind, space.int_w(w_dtype1)) retval = make_new_dtype(space, w_subtype, space.wrap(name), alignment, copy) - else: - retval = make_new_dtype(space, w_subtype, w_dtype0, alignment, copy, w_shape=w_dtype1) - return _set_metadata_and_copy(space, w_metadata, retval, copy) + return _set_metadata_and_copy(space, w_metadata, retval, copy) + elif (space.isinstance_w(w_dtype1, space.w_int) or + space.isinstance_w(w_dtype1, space.w_tuple) or + space.isinstance_w(w_dtype1, space.w_list) or + isinstance(w_dtype1, W_NDimArray)): + #(fixed_dtype, shape) or (base_dtype, new_dtype) + retval = make_new_dtype(space, w_subtype, l_side, alignment, + copy, w_shape=w_dtype1) + return _set_metadata_and_copy(space, w_metadata, retval, copy) elif space.isinstance_w(w_dtype, space.w_dict): return _set_metadata_and_copy(space, w_metadata, dtype_from_dict(space, w_dtype, alignment), copy) @@ -1122,7 +1149,7 @@ size = int(name[1:]) except ValueError: raise oefmt(space.w_TypeError, "data type not understood") - if char == NPY.CHARLTR: + if char == NPY.CHARLTR and size == 0: return W_Dtype( types.CharType(space), elsize=1, @@ -1133,7 +1160,7 @@ return new_unicode_dtype(space, size) elif char == NPY.VOIDLTR: return new_void_dtype(space, size) - assert False + raise oefmt(space.w_TypeError, 'data type "%s" not understood', name) def new_string_dtype(space, size): diff -Nru pypy-4.0.1+dfsg/pypy/module/micronumpy/__init__.py pypy-5.0.1+dfsg/pypy/module/micronumpy/__init__.py --- pypy-4.0.1+dfsg/pypy/module/micronumpy/__init__.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/micronumpy/__init__.py 2016-03-19 16:40:12.000000000 +0000 @@ -34,6 +34,7 @@ 'nditer': 'nditer.W_NDIter', 'set_docstring': 'support.descr_set_docstring', + 'VisibleDeprecationWarning': 'support.W_VisibleDeprecationWarning', } for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c @@ -42,6 +43,7 @@ from pypy.module.micronumpy.concrete import _setup _setup() + class UMathModule(MixedModule): appleveldefs = {} interpleveldefs = { @@ -138,3 +140,9 @@ 'multiarray': MultiArrayModule, 'umath': UMathModule, } + + def setup_after_space_initialization(self): + from pypy.module.micronumpy.support import W_VisibleDeprecationWarning + for name, w_type in {'VisibleDeprecationWarning': W_VisibleDeprecationWarning}.items(): + setattr(self.space, 'w_' + name, self.space.gettypefor(w_type)) + diff -Nru pypy-4.0.1+dfsg/pypy/module/micronumpy/ndarray.py pypy-5.0.1+dfsg/pypy/module/micronumpy/ndarray.py --- pypy-4.0.1+dfsg/pypy/module/micronumpy/ndarray.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/micronumpy/ndarray.py 2016-03-19 16:40:15.000000000 +0000 @@ -15,14 +15,15 @@ from pypy.module.micronumpy.arrayops import repeat, choose, put from pypy.module.micronumpy.base import W_NDimArray, convert_to_array, \ ArrayArgumentException, wrap_impl -from pypy.module.micronumpy.concrete import BaseConcreteArray +from pypy.module.micronumpy.concrete import BaseConcreteArray, V_OBJECTSTORE from pypy.module.micronumpy.converters import ( multi_axis_converter, order_converter, shape_converter, searchside_converter, out_converter) from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.strides import ( get_shape_from_iterable, shape_agreement, shape_agreement_multiple, - is_c_contiguous, is_f_contiguous, calc_strides, new_view) + is_c_contiguous, is_f_contiguous, calc_strides, new_view, BooleanChunk, + SliceChunk) from pypy.module.micronumpy.casting import can_cast_array from pypy.module.micronumpy.descriptor import get_dtype_cache @@ -75,7 +76,7 @@ dtype = space.interp_w(descriptor.W_Dtype, space.call_function( space.gettypefor(descriptor.W_Dtype), w_dtype)) if (dtype.elsize != self.get_dtype().elsize or - dtype.is_flexible() or self.get_dtype().is_flexible()): + (not dtype.is_record() and self.get_dtype().is_flexible())): raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) self.implementation.set_dtype(space, dtype) @@ -107,8 +108,9 @@ arr = W_NDimArray(self.implementation.transpose(self, None)) return space.wrap(loop.tostring(space, arr)) - def getitem_filter(self, space, arr): - if arr.ndims() > 1 and arr.get_shape() != self.get_shape(): + def getitem_filter(self, space, arr, axis=0): + shape = self.get_shape() + if arr.ndims() > 1 and arr.get_shape() != shape: raise OperationError(space.w_IndexError, space.wrap( "boolean index array should have 1 dimension")) if arr.get_size() > self.get_size(): @@ -116,7 +118,14 @@ "index out of range for array")) size = loop.count_all_true(arr) if arr.ndims() == 1: - res_shape = [size] + self.get_shape()[1:] + if self.ndims() > 1 and arr.get_shape()[0] != shape[axis]: + msg = ("boolean index did not match indexed array along" + " dimension %d; dimension is %d but corresponding" + " boolean dimension is %d" % (axis, shape[axis], + arr.get_shape()[0])) + #warning = space.gettypefor(support.W_VisibleDeprecationWarning) + space.warn(space.wrap(msg), space.w_VisibleDeprecationWarning) + res_shape = shape[:axis] + [size] + shape[axis+1:] else: res_shape = [size] w_res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), @@ -142,6 +151,8 @@ def _prepare_array_index(self, space, w_index): if isinstance(w_index, W_NDimArray): return [], w_index.get_shape(), w_index.get_shape(), [w_index] + if isinstance(w_index, boxes.W_GenericBox): + return [], [1], [1], [w_index] w_lst = space.listview(w_index) for w_item in w_lst: if not (space.isinstance_w(w_item, space.w_int) or space.isinstance_w(w_item, space.w_float)): @@ -155,7 +166,14 @@ arr_index_in_shape = False prefix = [] for i, w_item in enumerate(w_lst): - if (isinstance(w_item, W_NDimArray) or + if isinstance(w_item, W_NDimArray) and w_item.get_dtype().is_bool(): + if w_item.ndims() > 0: + indexes_w[i] = w_item + else: + raise oefmt(space.w_IndexError, + "in the future, 0-d boolean arrays will be " + "interpreted as a valid boolean index") + elif (isinstance(w_item, W_NDimArray) or space.isinstance_w(w_item, space.w_list)): w_item = convert_to_array(space, w_item) if shape is None: @@ -187,7 +205,13 @@ if iter_shape is None: # w_index is a list of slices, return a view chunks = self.implementation._prepare_slice_args(space, w_index) - return new_view(space, self, chunks) + copy = False + if isinstance(chunks[0], BooleanChunk): + copy = True + w_ret = new_view(space, self, chunks) + if copy: + w_ret = w_ret.descr_copy(space, space.wrap(w_ret.get_order())) + return w_ret shape = res_shape + self.get_shape()[len(indexes):] w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(), self.get_order(), w_instance=self) @@ -203,8 +227,24 @@ if iter_shape is None: # w_index is a list of slices chunks = self.implementation._prepare_slice_args(space, w_index) - view = new_view(space, self, chunks) - view.implementation.setslice(space, val_arr) + dim = -1 + view = self + for i, c in enumerate(chunks): + if isinstance(c, BooleanChunk): + dim = i + idx = c.w_idx + chunks.pop(i) + chunks.insert(0, SliceChunk(space.newslice(space.wrap(0), + space.w_None, space.w_None))) + break + if dim > 0: + view = self.implementation.swapaxes(space, self, 0, dim) + if dim >= 0: + view = new_view(space, self, chunks) + view.setitem_filter(space, idx, val_arr) + else: + view = new_view(space, self, chunks) + view.implementation.setslice(space, val_arr) return if support.product(iter_shape) == 0: return @@ -225,6 +265,8 @@ raise oefmt(space.w_IndexError, "in the future, 0-d boolean arrays will be " "interpreted as a valid boolean index") + elif isinstance(w_idx, boxes.W_GenericBox): + w_ret = self.getitem_array_int(space, w_idx) else: try: w_ret = self.implementation.descr_getitem(space, self, w_idx) @@ -278,9 +320,12 @@ def getfield(self, space, field): dtype = self.get_dtype() if field not in dtype.fields: - raise oefmt(space.w_ValueError, "field named %s not found", field) + raise oefmt(space.w_ValueError, "no field of name %s", field) arr = self.implementation ofs, subdtype = arr.dtype.fields[field][:2] + if subdtype.is_object() and arr.gcstruct is V_OBJECTSTORE: + raise oefmt(space.w_NotImplementedError, + "cannot read object from array with no gc hook") # ofs only changes start # create a view of the original array by extending # the shape, strides, backstrides of the array @@ -489,10 +534,8 @@ numpy.swapaxes : equivalent function """ if axis1 == axis2: - return self + return self.descr_view(space) n = self.ndims() - if n <= 1: - return self if axis1 < 0: axis1 += n if axis2 < 0: @@ -501,6 +544,8 @@ raise oefmt(space.w_ValueError, "bad axis1 argument to swapaxes") if axis2 < 0 or axis2 >= n: raise oefmt(space.w_ValueError, "bad axis2 argument to swapaxes") + if n <= 1: + return self return self.implementation.swapaxes(space, self, axis1, axis2) def descr_nonzero(self, space): @@ -512,8 +557,12 @@ return self.get_scalar_value().item(space) l_w = [] for i in range(self.get_shape()[0]): - l_w.append(space.call_method(self.descr_getitem(space, - space.wrap(i)), "tolist")) + item_w = self.descr_getitem(space, space.wrap(i)) + if (isinstance(item_w, W_NDimArray) or + isinstance(item_w, boxes.W_GenericBox)): + l_w.append(space.call_method(item_w, "tolist")) + else: + l_w.append(item_w) return space.newlist(l_w) def descr_ravel(self, space, w_order=None): @@ -889,6 +938,10 @@ return return self.implementation.sort(space, w_axis, w_order) + def descr_partition(self, space, __args__): + return get_appbridge_cache(space).call_method( + space, 'numpy.core._partition_use', 'partition', __args__.prepend(self)) + def descr_squeeze(self, space, w_axis=None): cur_shape = self.get_shape() if not space.is_none(w_axis): @@ -899,7 +952,7 @@ if cur_shape[i] != 1: raise OperationError(space.w_ValueError, space.wrap( "cannot select an axis to squeeze out " - "which has size greater than one")) + "which has size not equal to one")) else: new_shape.append(cur_shape[i]) else: @@ -995,7 +1048,7 @@ # --------------------- operations ---------------------------- # TODO: support all kwargs like numpy ufunc_object.c sig = None - cast = 'unsafe' + cast = 'safe' extobj = None @@ -1374,7 +1427,7 @@ shape = shape_converter(space, w_shape, dtype) if len(shape) > NPY.MAXDIMS: raise oefmt(space.w_ValueError, - "sequence too large; must be smaller than %d", NPY.MAXDIMS) + "sequence too large; cannot be greater than %d", NPY.MAXDIMS) if not space.is_none(w_buffer): if (not space.is_none(w_strides)): strides = [space.int_w(w_i) for w_i in @@ -1613,6 +1666,7 @@ argsort = interp2app(W_NDimArray.descr_argsort), sort = interp2app(W_NDimArray.descr_sort), + partition = interp2app(W_NDimArray.descr_partition), astype = interp2app(W_NDimArray.descr_astype), base = GetSetProperty(W_NDimArray.descr_get_base), byteswap = interp2app(W_NDimArray.descr_byteswap), diff -Nru pypy-4.0.1+dfsg/pypy/module/micronumpy/strides.py pypy-5.0.1+dfsg/pypy/module/micronumpy/strides.py --- pypy-4.0.1+dfsg/pypy/module/micronumpy/strides.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/micronumpy/strides.py 2016-03-19 16:40:12.000000000 +0000 @@ -77,11 +77,36 @@ backstride = base_stride * max(0, base_length - 1) return 0, base_length, base_stride, backstride +class BooleanChunk(BaseChunk): + input_dim = 1 + out_dim = 1 + def __init__(self, w_idx): + self.w_idx = w_idx + + def compute(self, space, base_length, base_stride): + raise oefmt(space.w_NotImplementedError, 'cannot reach') def new_view(space, w_arr, chunks): arr = w_arr.implementation - r = calculate_slice_strides(space, arr.shape, arr.start, arr.get_strides(), - arr.get_backstrides(), chunks) + dim = -1 + for i, c in enumerate(chunks): + if isinstance(c, BooleanChunk): + dim = i + break + if dim >= 0: + # filter by axis dim + filtr = chunks[dim] + assert isinstance(filtr, BooleanChunk) + # XXX this creates a new array, and fails in setitem + w_arr = w_arr.getitem_filter(space, filtr.w_idx, axis=dim) + arr = w_arr.implementation + chunks[dim] = SliceChunk(space.newslice(space.wrap(0), + space.w_None, space.w_None)) + r = calculate_slice_strides(space, arr.shape, arr.start, + arr.get_strides(), arr.get_backstrides(), chunks) + else: + r = calculate_slice_strides(space, arr.shape, arr.start, + arr.get_strides(), arr.get_backstrides(), chunks) shape, start, strides, backstrides = r return W_NDimArray.new_slice(space, start, strides[:], backstrides[:], shape[:], arr, w_arr) @@ -127,7 +152,7 @@ jit.isconstant(len(chunks))) def calculate_slice_strides(space, shape, start, strides, backstrides, chunks): """ - Note: `chunks` must contain exactly one EllipsisChunk object. + Note: `chunks` can contain at most one EllipsisChunk object. """ size = 0 used_dims = 0 diff -Nru pypy-4.0.1+dfsg/pypy/module/micronumpy/support.py pypy-5.0.1+dfsg/pypy/module/micronumpy/support.py --- pypy-4.0.1+dfsg/pypy/module/micronumpy/support.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/micronumpy/support.py 2016-03-19 16:40:12.000000000 +0000 @@ -8,6 +8,17 @@ from pypy.objspace.std.typeobject import W_TypeObject from pypy.objspace.std.objspace import StdObjSpace from pypy.module.micronumpy import constants as NPY +from pypy.module.exceptions.interp_exceptions import _new_exception, W_UserWarning + +W_VisibleDeprecationWarning = _new_exception('VisibleDeprecationWarning', W_UserWarning, + """Visible deprecation warning. + + By default, python will not show deprecation warnings, so this class + can be used when a very visible warning is helpful, for example because + the usage is most likely a user bug. + + """) + def issequence_w(space, w_obj): from pypy.module.micronumpy.base import W_NDimArray diff -Nru pypy-4.0.1+dfsg/pypy/module/micronumpy/test/test_deprecations.py pypy-5.0.1+dfsg/pypy/module/micronumpy/test/test_deprecations.py --- pypy-4.0.1+dfsg/pypy/module/micronumpy/test/test_deprecations.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/micronumpy/test/test_deprecations.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,33 @@ +import py +import sys + +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestDeprecations(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + + def test_getitem(self): + import numpy as np + import warnings, sys + warnings.simplefilter('error', np.VisibleDeprecationWarning) + try: + arr = np.ones((5, 4, 3)) + index = np.array([True]) + raises(np.VisibleDeprecationWarning, arr.__getitem__, index) + + index = np.array([False] * 6) + raises(np.VisibleDeprecationWarning, arr.__getitem__, index) + + index = np.zeros((4, 4), dtype=bool) + if '__pypy__' in sys.builtin_module_names: + # boolean indexing matches the dims in index + # to the first index.ndims in arr, not implemented in pypy yet + raises(IndexError, arr.__getitem__, index) + raises(IndexError, arr.__getitem__, (slice(None), index)) + else: + raises(np.VisibleDeprecationWarning, arr.__getitem__, index) + raises(np.VisibleDeprecationWarning, arr.__getitem__, (slice(None), index)) + finally: + warnings.simplefilter('default', np.VisibleDeprecationWarning) + diff -Nru pypy-4.0.1+dfsg/pypy/module/micronumpy/test/test_dtypes.py pypy-5.0.1+dfsg/pypy/module/micronumpy/test/test_dtypes.py --- pypy-4.0.1+dfsg/pypy/module/micronumpy/test/test_dtypes.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/micronumpy/test/test_dtypes.py 2016-03-19 16:40:15.000000000 +0000 @@ -345,14 +345,29 @@ def test_can_subclass(self): import numpy as np + import sys, pickle class xyz(np.void): pass assert np.dtype(xyz).name == 'xyz' # another obscure API, used in numpy record.py - # it seems numpy throws away the subclass type and parses the spec a = np.dtype((xyz, [('x', 'int32'), ('y', 'float32')])) - assert repr(a) == "dtype([('x', ' 0 @@ -648,6 +657,7 @@ assert m[0xFFFFFFF] == b'A' finally: m.close() + test_large_offset.is_large = True def test_large_filesize(self): import mmap @@ -665,6 +675,7 @@ assert m.size() == 0x180000000 finally: m.close() + test_large_filesize.is_large = True def test_all(self): # this is a global test, ported from test_mmap.py diff -Nru pypy-4.0.1+dfsg/pypy/module/posix/__init__.py pypy-5.0.1+dfsg/pypy/module/posix/__init__.py --- pypy-4.0.1+dfsg/pypy/module/posix/__init__.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/posix/__init__.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,5 +1,5 @@ from pypy.interpreter.mixedmodule import MixedModule -from rpython.rtyper.module.ll_os import RegisterOs +from rpython.rlib import rposix import os exec 'import %s as posix' % os.name @@ -172,7 +172,7 @@ if hasattr(os, 'chroot'): interpleveldefs['chroot'] = 'interp_posix.chroot' - for name in RegisterOs.w_star: + for name in rposix.WAIT_MACROS: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.' + name diff -Nru pypy-4.0.1+dfsg/pypy/module/posix/interp_posix.py pypy-5.0.1+dfsg/pypy/module/posix/interp_posix.py --- pypy-4.0.1+dfsg/pypy/module/posix/interp_posix.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/posix/interp_posix.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,12 +1,11 @@ import os import sys -from rpython.rlib import rposix, objectmodel, rurandom +from rpython.rlib import rposix, rposix_stat +from rpython.rlib import objectmodel, rurandom from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib.unroll import unrolling_iterable -from rpython.rtyper.module import ll_os_stat -from rpython.rtyper.module.ll_os import RegisterOs from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2 @@ -43,6 +42,8 @@ return space.str0_w(w_obj) class FileEncoder(object): + is_unicode = True + def __init__(self, space, w_obj): self.space = space self.w_obj = w_obj @@ -54,6 +55,8 @@ return self.space.unicode0_w(self.w_obj) class FileDecoder(object): + is_unicode = False + def __init__(self, space, w_obj): self.space = space self.w_obj = w_obj @@ -212,13 +215,13 @@ # ____________________________________________________________ -STAT_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STAT_FIELDS)) +STAT_FIELDS = unrolling_iterable(enumerate(rposix_stat.STAT_FIELDS)) -STATVFS_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STATVFS_FIELDS)) +STATVFS_FIELDS = unrolling_iterable(enumerate(rposix_stat.STATVFS_FIELDS)) def build_stat_result(space, st): FIELDS = STAT_FIELDS # also when not translating at all - lst = [None] * ll_os_stat.N_INDEXABLE_FIELDS + lst = [None] * rposix_stat.N_INDEXABLE_FIELDS w_keywords = space.newdict() stat_float_times = space.fromcache(StatState).stat_float_times for i, (name, TYPE) in FIELDS: @@ -226,7 +229,7 @@ if name in ('st_atime', 'st_mtime', 'st_ctime'): value = int(value) # rounded to an integer for indexed access w_value = space.wrap(value) - if i < ll_os_stat.N_INDEXABLE_FIELDS: + if i < rposix_stat.N_INDEXABLE_FIELDS: lst[i] = w_value else: space.setitem(w_keywords, space.wrap(name), w_value) @@ -254,7 +257,7 @@ def build_statvfs_result(space, st): - vals_w = [None] * len(ll_os_stat.STATVFS_FIELDS) + vals_w = [None] * len(rposix_stat.STATVFS_FIELDS) for i, (name, _) in STATVFS_FIELDS: vals_w[i] = space.wrap(getattr(st, name)) w_tuple = space.newtuple(vals_w) @@ -267,7 +270,7 @@ """Perform a stat system call on the file referenced to by an open file descriptor.""" try: - st = os.fstat(fd) + st = rposix_stat.fstat(fd) except OSError, e: raise wrap_oserror(space, e) else: @@ -289,16 +292,16 @@ """ try: - st = dispatch_filename(rposix.stat)(space, w_path) + st = dispatch_filename(rposix_stat.stat)(space, w_path) except OSError, e: raise wrap_oserror2(space, e, w_path) else: return build_stat_result(space, st) def lstat(space, w_path): - "Like stat(path), but do no follow symbolic links." + "Like stat(path), but do not follow symbolic links." try: - st = dispatch_filename(rposix.lstat)(space, w_path) + st = dispatch_filename(rposix_stat.lstat)(space, w_path) except OSError, e: raise wrap_oserror2(space, e, w_path) else: @@ -327,7 +330,7 @@ @unwrap_spec(fd=c_int) def fstatvfs(space, fd): try: - st = os.fstatvfs(fd) + st = rposix_stat.fstatvfs(fd) except OSError as e: raise wrap_oserror(space, e) else: @@ -336,7 +339,7 @@ def statvfs(space, w_path): try: - st = dispatch_filename(rposix.statvfs)(space, w_path) + st = dispatch_filename(rposix_stat.statvfs)(space, w_path) except OSError as e: raise wrap_oserror2(space, e, w_path) else: @@ -427,11 +430,11 @@ try: if space.isinstance_w(w_path, space.w_unicode): path = FileEncoder(space, w_path) - fullpath = rposix._getfullpathname(path) + fullpath = rposix.getfullpathname(path) w_fullpath = space.wrap(fullpath) else: path = space.str0_w(w_path) - fullpath = rposix._getfullpathname(path) + fullpath = rposix.getfullpathname(path) w_fullpath = space.wrap(fullpath) except OSError, e: raise wrap_oserror2(space, e, w_path) @@ -661,7 +664,7 @@ def kill(space, pid, sig): "Kill a process with a signal." try: - rposix.os_kill(pid, sig) + rposix.kill(pid, sig) except OSError, e: raise wrap_oserror(space, e) @@ -677,7 +680,7 @@ """Abort the interpreter immediately. This 'dumps core' or otherwise fails in the hardest way possible on the hosting operating system.""" import signal - rposix.os_kill(os.getpid(), signal.SIGABRT) + rposix.kill(os.getpid(), signal.SIGABRT) @unwrap_spec(src='str0', dst='str0') def link(space, src, dst): @@ -1199,7 +1202,7 @@ raise wrap_oserror(space, e) def declare_new_w_star(name): - if name in RegisterOs.w_star_returning_int: + if name in ('WEXITSTATUS', 'WSTOPSIG', 'WTERMSIG'): @unwrap_spec(status=c_int) def WSTAR(space, status): return space.wrap(getattr(os, name)(status)) @@ -1211,7 +1214,7 @@ WSTAR.func_name = name return WSTAR -for name in RegisterOs.w_star: +for name in rposix.WAIT_MACROS: if hasattr(os, name): func = declare_new_w_star(name) globals()[name] = func diff -Nru pypy-4.0.1+dfsg/pypy/module/posix/test/test_posix2.py pypy-5.0.1+dfsg/pypy/module/posix/test/test_posix2.py --- pypy-4.0.1+dfsg/pypy/module/posix/test/test_posix2.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/posix/test/test_posix2.py 2016-03-19 16:40:12.000000000 +0000 @@ -6,8 +6,8 @@ from rpython.tool.udir import udir from pypy.tool.pytest.objspace import gettestobjspace from pypy.conftest import pypydir -from rpython.rtyper.module.ll_os import RegisterOs from rpython.translator.c.test.test_extfunc import need_sparse_files +from rpython.rlib import rposix import os import py import sys @@ -93,6 +93,12 @@ def setup_method(self, meth): if getattr(meth, 'need_sparse_files', False): + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") need_sparse_files() def test_posix_is_pypy_s(self): @@ -576,7 +582,7 @@ raises(TypeError, "os.utime('xxx', 3)") raises(OSError, "os.utime('somefilewhichihopewouldneverappearhere', None)") - for name in RegisterOs.w_star: + for name in rposix.WAIT_MACROS: if hasattr(os, name): values = [0, 1, 127, 128, 255] code = py.code.Source(""" diff -Nru pypy-4.0.1+dfsg/pypy/module/__pypy__/__init__.py pypy-5.0.1+dfsg/pypy/module/__pypy__/__init__.py --- pypy-4.0.1+dfsg/pypy/module/__pypy__/__init__.py 2015-11-19 19:21:43.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/__pypy__/__init__.py 2016-03-19 16:40:11.000000000 +0000 @@ -86,8 +86,10 @@ 'specialized_zip_2_lists' : 'interp_magic.specialized_zip_2_lists', 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', + 'set_code_callback' : 'interp_magic.set_code_callback', 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', + 'decode_long' : 'interp_magic.decode_long', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff -Nru pypy-4.0.1+dfsg/pypy/module/__pypy__/interp_magic.py pypy-5.0.1+dfsg/pypy/module/__pypy__/interp_magic.py --- pypy-4.0.1+dfsg/pypy/module/__pypy__/interp_magic.py 2015-11-19 19:21:43.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/__pypy__/interp_magic.py 2016-03-19 16:40:11.000000000 +0000 @@ -1,5 +1,6 @@ -from pypy.interpreter.error import OperationError, wrap_oserror +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter.pycode import CodeHookCache from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib.objectmodel import we_are_translated @@ -92,7 +93,7 @@ Return the underlying strategy currently used by a dict, list or set object """ if isinstance(w_obj, W_DictMultiObject): - name = w_obj.strategy.__class__.__name__ + name = w_obj.get_strategy().__class__.__name__ elif isinstance(w_obj, W_ListObject): name = w_obj.strategy.__class__.__name__ elif isinstance(w_obj, W_BaseSetObject): @@ -151,3 +152,19 @@ def specialized_zip_2_lists(space, w_list1, w_list2): from pypy.objspace.std.specialisedtupleobject import specialized_zip_2_lists return specialized_zip_2_lists(space, w_list1, w_list2) + +def set_code_callback(space, w_callable): + cache = space.fromcache(CodeHookCache) + if space.is_none(w_callable): + cache._code_hook = None + else: + cache._code_hook = w_callable + +@unwrap_spec(string=str, byteorder=str, signed=int) +def decode_long(space, string, byteorder='little', signed=1): + from rpython.rlib.rbigint import rbigint, InvalidEndiannessError + try: + result = rbigint.frombytes(string, byteorder, bool(signed)) + except InvalidEndiannessError: + raise oefmt(space.w_ValueError, "invalid byteorder argument") + return space.newlong_from_rbigint(result) diff -Nru pypy-4.0.1+dfsg/pypy/module/__pypy__/test/test_magic.py pypy-5.0.1+dfsg/pypy/module/__pypy__/test/test_magic.py --- pypy-4.0.1+dfsg/pypy/module/__pypy__/test/test_magic.py 2015-11-19 19:21:43.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/__pypy__/test/test_magic.py 2016-03-19 16:40:11.000000000 +0000 @@ -13,3 +13,37 @@ # sys.dont_write_bytecode = d __pypy__.save_module_content_for_future_reload(sys) + + def test_new_code_hook(self): + l = [] + + def callable(code): + l.append(code) + + import __pypy__ + __pypy__.set_code_callback(callable) + d = {} + try: + exec """ +def f(): + pass +""" in d + finally: + __pypy__.set_code_callback(None) + assert d['f'].__code__ in l + + def test_decode_long(self): + from __pypy__ import decode_long + assert decode_long('') == 0 + assert decode_long('\xff\x00') == 255 + assert decode_long('\xff\x7f') == 32767 + assert decode_long('\x00\xff') == -256 + assert decode_long('\x00\x80') == -32768 + assert decode_long('\x80') == -128 + assert decode_long('\x7f') == 127 + assert decode_long('\x55' * 97) == (1 << (97 * 8)) // 3 + assert decode_long('\x00\x80', 'big') == 128 + assert decode_long('\xff\x7f', 'little', False) == 32767 + assert decode_long('\x00\x80', 'little', False) == 32768 + assert decode_long('\x00\x80', 'little', True) == -32768 + raises(ValueError, decode_long, '', 'foo') diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/hooks.py pypy-5.0.1+dfsg/pypy/module/pypyjit/hooks.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/hooks.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/hooks.py 2016-03-19 16:40:12.000000000 +0000 @@ -28,6 +28,23 @@ finally: cache.in_recursion = False + def on_trace_too_long(self, jitdriver, greenkey, greenkey_repr): + space = self.space + cache = space.fromcache(Cache) + if cache.in_recursion: + return + if space.is_true(cache.w_trace_too_long_hook): + cache.in_recursion = True + try: + try: + space.call_function(cache.w_trace_too_long_hook, + space.wrap(jitdriver.name), + wrap_greenkey(space, jitdriver, greenkey, greenkey_repr)) + except OperationError, e: + e.write_unraisable(space, "jit hook", cache.w_trace_too_long_hook) + finally: + cache.in_recursion = False + def after_compile(self, debug_info): self._compile_hook(debug_info, is_bridge=False) diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/__init__.py pypy-5.0.1+dfsg/pypy/module/pypyjit/__init__.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/__init__.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/__init__.py 2016-03-19 16:40:12.000000000 +0000 @@ -14,6 +14,7 @@ 'trace_next_iteration_hash': 'interp_jit.trace_next_iteration_hash', 'set_compile_hook': 'interp_resop.set_compile_hook', 'set_abort_hook': 'interp_resop.set_abort_hook', + 'set_trace_too_long_hook': 'interp_resop.set_trace_too_long_hook', 'get_stats_snapshot': 'interp_resop.get_stats_snapshot', 'get_stats_asmmemmgr': 'interp_resop.get_stats_asmmemmgr', # those things are disabled because they have bugs, but if @@ -23,6 +24,7 @@ #'enable_debug': 'interp_resop.enable_debug', #'disable_debug': 'interp_resop.disable_debug', 'ResOperation': 'interp_resop.WrappedOp', + 'GuardOp': 'interp_resop.GuardOp', 'DebugMergePoint': 'interp_resop.DebugMergePoint', 'JitLoopInfo': 'interp_resop.W_JitLoopInfo', 'PARAMETER_DOCS': 'space.wrap(rpython.rlib.jit.PARAMETER_DOCS)', diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/interp_jit.py pypy-5.0.1+dfsg/pypy/module/pypyjit/interp_jit.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/interp_jit.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/interp_jit.py 2016-03-19 16:40:12.000000000 +0000 @@ -54,7 +54,8 @@ get_unique_id = get_unique_id, should_unroll_one_iteration = should_unroll_one_iteration, - name='pypyjit') + name='pypyjit', + is_recursive=True) class __extend__(PyFrame): diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/interp_resop.py pypy-5.0.1+dfsg/pypy/module/pypyjit/interp_resop.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/interp_resop.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/interp_resop.py 2016-03-19 16:40:12.000000000 +0000 @@ -10,6 +10,7 @@ from rpython.rtyper.rclass import OBJECT #from rpython.jit.metainterp.resoperation import rop from rpython.rlib.nonconst import NonConstant +from rpython.rlib.rarithmetic import r_uint from rpython.rlib import jit_hooks from rpython.rlib.jit import Counters from rpython.rlib.objectmodel import compute_unique_id @@ -22,6 +23,7 @@ def __init__(self, space): self.w_compile_hook = space.w_None self.w_abort_hook = space.w_None + self.w_trace_too_long_hook = space.w_None def getno(self): self.no += 1 @@ -79,6 +81,21 @@ cache.w_abort_hook = w_hook cache.in_recursion = NonConstant(False) +def set_trace_too_long_hook(space, w_hook): + """ set_trace_too_long_hook(hook) + + Set a hook (callable) that will be called each time we abort + tracing because the trace is too long. + + The hook will be called with the signature: + + hook(jitdriver_name, greenkey) + """ + cache = space.fromcache(Cache) + assert w_hook is not None + cache.w_trace_too_long_hook = w_hook + cache.in_recursion = NonConstant(False) + def wrap_oplist(space, logops, operations, ops_offset=None): # this function is called from the JIT from rpython.jit.metainterp.resoperation import rop @@ -103,6 +120,9 @@ op.getarg(1).getint(), op.getarg(2).getint(), w_greenkey)) + elif op.is_guard(): + l_w.append(GuardOp(name, ofs, logops.repr_of_resop(op), + op.getdescr().get_jitcounter_hash())) else: l_w.append(WrappedOp(name, ofs, logops.repr_of_resop(op))) return l_w @@ -111,6 +131,10 @@ def descr_new_resop(space, w_tp, name, offset=-1, repr=''): return WrappedOp(name, offset, repr) +@unwrap_spec(offset=int, repr=str, name=str, hash=r_uint) +def descr_new_guardop(space, w_tp, name, offset=-1, repr='', hash=r_uint(0)): + return GuardOp(name, offset, repr, hash) + @unwrap_spec(repr=str, name=str, jd_name=str, call_depth=int, call_id=int) def descr_new_dmp(space, w_tp, name, repr, jd_name, call_depth, call_id, w_greenkey): @@ -133,6 +157,11 @@ def descr_name(self, space): return space.wrap(self.name) +class GuardOp(WrappedOp): + def __init__(self, name, offset, repr_of_resop, hash): + WrappedOp.__init__(self, name, offset, repr_of_resop) + self.hash = hash + class DebugMergePoint(WrappedOp): """ A class representing Debug Merge Point - the entry point to a jitted loop. @@ -170,6 +199,17 @@ ) WrappedOp.typedef.acceptable_as_base_class = False +GuardOp.typedef = TypeDef( + 'GuardOp', + __doc__ = GuardOp.__doc__, + __new__ = interp2app(descr_new_guardop), + __repr__ = interp2app(GuardOp.descr_repr), + name = GetSetProperty(GuardOp.descr_name), + offset = interp_attrproperty("offset", cls=GuardOp), + hash = interp_attrproperty("hash", cls=GuardOp), + ) +GuardOp.typedef.acceptable_as_base_class = False + DebugMergePoint.typedef = TypeDef( 'DebugMergePoint', WrappedOp.typedef, __new__ = interp2app(descr_new_dmp), diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/test/test_jit_hook.py pypy-5.0.1+dfsg/pypy/module/pypyjit/test/test_jit_hook.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/test/test_jit_hook.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/test/test_jit_hook.py 2016-03-19 16:40:12.000000000 +0000 @@ -65,6 +65,14 @@ if i != 1: offset[op] = i + class FailDescr(BasicFailDescr): + def get_jitcounter_hash(self): + from rpython.rlib.rarithmetic import r_uint + return r_uint(13) + + oplist[-1].setdescr(FailDescr()) + oplist[-2].setdescr(FailDescr()) + token = JitCellToken() token.number = 0 di_loop = JitDebugInfo(MockJitDriverSD, logger, token, oplist, 'loop', @@ -73,7 +81,7 @@ oplist, 'loop', greenkey) di_loop.asminfo = AsmInfo(offset, 0x42, 12) di_bridge = JitDebugInfo(MockJitDriverSD, logger, JitCellToken(), - oplist, 'bridge', fail_descr=BasicFailDescr()) + oplist, 'bridge', fail_descr=FailDescr()) di_bridge.asminfo = AsmInfo(offset, 0, 0) def interp_on_compile(): diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_00_model.py pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_00_model.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_00_model.py 2016-03-19 16:40:12.000000000 +0000 @@ -68,9 +68,12 @@ pipe.returncode,)) if stderr.startswith('SKIP:'): py.test.skip(stderr) - if stderr.startswith('debug_alloc.h:'): # lldebug builds - stderr = '' + #if stderr.startswith('debug_alloc.h:'): # lldebug builds + # stderr = '' #assert not stderr + if stderr: + print '*** stderr of the subprocess: ***' + print stderr # if discard_stdout_before_last_line: stdout = stdout.splitlines(True)[-1] @@ -526,7 +529,7 @@ log = self.run(f) loop, = log.loops_by_filename(self.filepath) call_ops = log.opnames(loop.ops_by_id('call')) - assert call_ops == ['force_token'] # it does not follow inlining + assert call_ops == ['guard_not_invalidated', 'force_token'] # it does not follow inlining # add_ops = log.opnames(loop.ops_by_id('add')) assert add_ops == ['int_add'] @@ -534,9 +537,10 @@ ops = log.opnames(loop.allops()) assert ops == [ # this is the actual loop - 'int_lt', 'guard_true', 'force_token', 'int_add', + 'int_lt', 'guard_true', + 'guard_not_invalidated', 'force_token', 'int_add', # this is the signal checking stuff - 'guard_not_invalidated', 'getfield_raw_i', 'int_lt', 'guard_false', + 'getfield_raw_i', 'int_lt', 'guard_false', 'jump' ] diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_call.py pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_call.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_call.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_call.py 2016-03-19 16:40:12.000000000 +0000 @@ -72,8 +72,6 @@ # LOAD_GLOBAL of OFFSET ops = entry_bridge.ops_by_id('cond', opcode='LOAD_GLOBAL') assert log.opnames(ops) == ["guard_value", - "guard_value", - "getfield_gc_r", "guard_value", "guard_not_invalidated"] ops = entry_bridge.ops_by_id('add', opcode='LOAD_GLOBAL') assert log.opnames(ops) == [] @@ -85,9 +83,9 @@ p38 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p39 = getfield_gc_r(p38, descr=) i40 = force_token() - p41 = getfield_gc_pure_r(p38, descr=) + p41 = getfield_gc_r(p38, descr=) guard_value(p41, ConstPtr(ptr42), descr=...) - i42 = getfield_gc_pure_i(p38, descr=) + i42 = getfield_gc_i(p38, descr=) i43 = int_is_zero(i42) guard_true(i43, descr=...) i50 = force_token() @@ -200,6 +198,7 @@ assert log.result == 1000 loop, = log.loops_by_id('call') assert loop.match_by_id('call', """ + guard_not_invalidated? i14 = force_token() i16 = force_token() """) @@ -222,7 +221,7 @@ loop, = log.loops_by_id('call') ops = log.opnames(loop.ops_by_id('call')) guards = [ops for ops in ops if ops.startswith('guard')] - assert guards == ["guard_no_overflow"] + assert guards == ["guard_not_invalidated", "guard_no_overflow"] def test_kwargs(self): # this is not a very precise test, could be improved @@ -281,6 +280,7 @@ assert log.result == 13000 loop0, = log.loops_by_id('g1') assert loop0.match_by_id('g1', """ + guard_not_invalidated? i20 = force_token() i22 = int_add_ovf(i8, 3) guard_no_overflow(descr=...) @@ -435,24 +435,21 @@ guard_isnull(p5, descr=...) guard_nonnull_class(p12, ConstClass(W_IntObject), descr=...) guard_value(p2, ConstPtr(ptr21), descr=...) - i22 = getfield_gc_pure_i(p12, descr=) + i22 = getfield_gc_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) - guard_value(p7, ConstPtr(ptr25), descr=...) - p26 = getfield_gc_r(p7, descr=) - guard_value(p26, ConstPtr(ptr27), descr=...) guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p30 = getfield_gc_r(p29, descr=) p31 = force_token() - p32 = getfield_gc_pure_r(p29, descr=) + p32 = getfield_gc_r(p29, descr=) guard_value(p32, ConstPtr(ptr33), descr=...) - i34 = getfield_gc_pure_i(p29, descr=) + i34 = getfield_gc_i(p29, descr=) i35 = int_is_zero(i34) guard_true(i35, descr=...) p37 = getfield_gc_r(ConstPtr(ptr36), descr=) guard_nonnull_class(p37, ConstClass(W_IntObject), descr=...) - i39 = getfield_gc_pure_i(p37, descr=) + i39 = getfield_gc_i(p37, descr=) i40 = int_add_ovf(i22, i39) guard_no_overflow(descr=...) --TICK-- @@ -469,9 +466,10 @@ """, []) loop, = log.loops_by_id('call') assert loop.match(""" - i8 = getfield_gc_pure_i(p6, descr=) + i8 = getfield_gc_i(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) + guard_not_invalidated? i11 = force_token() i13 = int_add(i8, 1) --TICK-- diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_containers.py pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_containers.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_containers.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_containers.py 2016-03-19 16:40:12.000000000 +0000 @@ -66,6 +66,7 @@ guard_not_invalidated(descr=...) p10 = call_r(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=) guard_no_exception(descr=...) + guard_nonnull(p10, descr=...) i12 = call_i(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) p15 = new_array_clear(16, descr=) @@ -83,7 +84,7 @@ guard_no_exception(descr=...) p20 = new_with_vtable(descr=...) call_n(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) - setfield_gc(p20, i5, descr=) + setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) i23 = call_i(ConstClass(ll_call_lookup_function), p13, p10, i12, 0, descr=) guard_no_exception(descr=...) @@ -92,7 +93,7 @@ p28 = getfield_gc_r(p13, descr=) p29 = getinteriorfield_gc_r(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) - i31 = getfield_gc_pure_i(p29, descr=) + i31 = getfield_gc_i(p29, descr=) i32 = int_sub_ovf(i31, i5) guard_no_overflow(descr=...) i34 = int_add_ovf(i32, 1) diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_globals.py pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_globals.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_globals.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_globals.py 2016-03-19 16:40:12.000000000 +0000 @@ -16,9 +16,5 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("loadglobal", """ - p12 = getfield_gc_r(p10, descr=) - guard_value(p12, ConstPtr(ptr13), descr=...) guard_not_invalidated(descr=...) - p19 = getfield_gc_r(ConstPtr(p17), descr=) - guard_value(p19, ConstPtr(ptr20), descr=...) """) diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_instance.py pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_instance.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_instance.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_instance.py 2016-03-19 16:40:12.000000000 +0000 @@ -124,7 +124,7 @@ setfield_gc(ConstPtr(ptr39), i59, descr=...) i62 = int_lt(i61, 0) guard_false(i62, descr=...) - jump(p0, p1, p3, p6, p7, p12, i59, p18, i31, i59, p100, descr=...) + jump(..., descr=...) """) def test_mutate_class(self): @@ -183,7 +183,7 @@ setfield_gc(p77, ConstPtr(null), descr=...) setfield_gc(p77, ConstPtr(ptr42), descr=...) setfield_gc(ConstPtr(ptr69), p77, descr=...) - jump(p0, p1, p3, p6, p7, p12, i74, p20, p26, i33, p77, p100, descr=...) + jump(..., descr=...) """) diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py 2016-03-19 16:40:15.000000000 +0000 @@ -101,13 +101,13 @@ loop = log._filter(log.loops[0]) assert loop.match(""" guard_class(p1, #, descr=...) - p4 = getfield_gc_pure_r(p1, descr=) + p4 = getfield_gc_r(p1, descr=) i5 = getfield_gc_i(p0, descr=) - p6 = getfield_gc_pure_r(p4, descr=) - p7 = getfield_gc_pure_r(p6, descr=) + p6 = getfield_gc_r(p4, descr=) + p7 = getfield_gc_r(p6, descr=) guard_class(p7, ConstClass(Float64), descr=...) - i9 = getfield_gc_pure_i(p4, descr=) - i10 = getfield_gc_pure_i(p6, descr=) + i9 = getfield_gc_i(p4, descr=) + i10 = getfield_gc_i(p6, descr=) i12 = int_eq(i10, 61) i14 = int_eq(i10, 60) i15 = int_or(i12, i14) @@ -117,28 +117,28 @@ i18 = float_ne(f16, 0.000000) guard_true(i18, descr=...) guard_nonnull_class(p2, ConstClass(W_BoolBox), descr=...) - i20 = getfield_gc_pure_i(p2, descr=) + i20 = getfield_gc_i(p2, descr=) i21 = int_is_true(i20) guard_false(i21, descr=...) i22 = getfield_gc_i(p0, descr=) - i23 = getfield_gc_pure_i(p1, descr=) + i23 = getfield_gc_i(p1, descr=) guard_true(i23, descr=...) i25 = int_add(i22, 1) - p26 = getfield_gc_pure_r(p0, descr=) - i27 = getfield_gc_pure_i(p1, descr=) + p26 = getfield_gc_r(p0, descr=) + i27 = getfield_gc_i(p1, descr=) i28 = int_is_true(i27) guard_true(i28, descr=...) - i29 = getfield_gc_pure_i(p6, descr=) + i29 = getfield_gc_i(p6, descr=) guard_value(i29, 8, descr=...) i30 = int_add(i5, 8) - i31 = getfield_gc_pure_i(p1, descr=) + i31 = getfield_gc_i(p1, descr=) i32 = int_ge(i25, i31) guard_false(i32, descr=...) p34 = new_with_vtable(descr=...) {{{ - setfield_gc(p34, p1, descr=) + setfield_gc(p34, p1, descr=) setfield_gc(p34, i25, descr=) - setfield_gc(p34, p26, descr=) + setfield_gc(p34, p26, descr=) setfield_gc(p34, i30, descr=) }}} jump(..., descr=...) diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_min_max.py pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_min_max.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_min_max.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_min_max.py 2016-03-19 16:40:12.000000000 +0000 @@ -54,7 +54,7 @@ i19 = int_add(i11, 1) setfield_gc(p2, i19, descr=...) guard_nonnull_class(p18, ConstClass(W_IntObject), descr=...) - i20 = getfield_gc_pure_i(p18, descr=...) + i20 = getfield_gc_i(p18, descr=...) i21 = int_gt(i20, i14) guard_true(i21, descr=...) jump(..., descr=...) diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_misc.py pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_misc.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_misc.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_misc.py 2016-03-19 16:40:12.000000000 +0000 @@ -113,7 +113,7 @@ i12 = int_is_true(i4) guard_true(i12, descr=...) guard_not_invalidated(descr=...) - i10p = getfield_gc_pure_i(p10, descr=...) + i10p = getfield_gc_i(p10, descr=...) i10 = int_mul_ovf(2, i10p) guard_no_overflow(descr=...) i14 = int_add_ovf(i13, i10) @@ -145,9 +145,9 @@ i15 = int_lt(i10, i11) guard_true(i15, descr=...) i17 = int_add(i10, 1) - i18 = force_token() setfield_gc(p9, i17, descr=<.* .*W_XRangeIterator.inst_current .*>) guard_not_invalidated(descr=...) + i18 = force_token() i84 = int_sub(i14, 1) i21 = int_lt(i10, 0) guard_false(i21, descr=...) @@ -178,9 +178,9 @@ i16 = int_ge(i11, i12) guard_false(i16, descr=...) i20 = int_add(i11, 1) - i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i21 = force_token() i88 = int_sub(i9, 1) i25 = int_ge(i11, i9) guard_false(i25, descr=...) @@ -211,9 +211,9 @@ i17 = int_mul(i11, i14) i18 = int_add(i15, i17) i20 = int_add(i11, 1) - i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i21 = force_token() i95 = int_sub(i9, 1) i23 = int_lt(i18, 0) guard_false(i23, descr=...) diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_string.py pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_string.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_string.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_string.py 2016-03-19 16:40:12.000000000 +0000 @@ -28,7 +28,6 @@ guard_true(i14, descr=...) guard_not_invalidated(descr=...) i16 = int_eq(i6, %d) - guard_false(i16, descr=...) i15 = int_mod(i6, i10) i17 = int_rshift(i15, %d) i18 = int_and(i10, i17) @@ -68,7 +67,6 @@ guard_true(i11, descr=...) guard_not_invalidated(descr=...) i13 = int_eq(i6, %d) # value provided below - guard_false(i13, descr=...) i15 = int_mod(i6, 10) i17 = int_rshift(i15, %d) # value provided below i18 = int_and(10, i17) @@ -82,7 +80,7 @@ strsetitem(p25, 0, i23) p93 = call_r(ConstClass(fromstr), p25, 16, descr=) guard_no_exception(descr=...) - i95 = getfield_gc_pure_i(p93, descr=) + i95 = getfield_gc_i(p93, descr=) i96 = int_gt(i95, #) guard_false(i96, descr=...) i94 = call_i(ConstClass(rbigint._toint_helper), p93, descr=) @@ -135,6 +133,7 @@ guard_no_exception(descr=...) p95 = call_r(..., descr=) # ll_build guard_no_exception(descr=...) + guard_nonnull(p95, descr=...) i96 = strlen(p95) i97 = int_add_ovf(i71, i96) guard_no_overflow(descr=...) @@ -143,43 +142,6 @@ jump(..., descr=...) """) - def test_getattr_promote(self): - def main(n): - class A(object): - def meth_a(self): - return 1 - def meth_b(self): - return 2 - a = A() - - l = ['a', 'b'] - s = 0 - for i in range(n): - name = 'meth_' + l[i & 1] - meth = getattr(a, name) # ID: getattr - s += meth() - return s - - log = self.run(main, [1000]) - assert log.result == main(1000) - loops = log.loops_by_filename(self.filepath) - assert len(loops) == 1 - for loop in loops: - assert loop.match_by_id('getattr',''' - guard_not_invalidated? - i32 = strlen(p31) - i34 = int_add(5, i32) - p35 = newstr(i34) - strsetitem(p35, 0, 109) - strsetitem(p35, 1, 101) - strsetitem(p35, 2, 116) - strsetitem(p35, 3, 104) - strsetitem(p35, 4, 95) - copystrcontent(p31, p35, 0, 5, i32) - i49 = call_i(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=) - guard_value(i49, 1, descr=...) - ''') - def test_remove_duplicate_method_calls(self): def main(n): lst = [] @@ -250,6 +212,7 @@ guard_not_invalidated(descr=...) p80 = call_r(ConstClass(ll_str__IntegerR_SignedConst_Signed), i47, descr=) guard_no_exception(descr=...) + guard_nonnull(p80, descr=...) p53 = call_r(ConstClass(fast_str_decode_ascii), p80, descr=) guard_no_exception(descr=...) guard_nonnull(p53, descr=...) diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_struct.py pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_struct.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_struct.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_struct.py 2016-03-19 16:40:15.000000000 +0000 @@ -19,7 +19,8 @@ import struct i = 1 while i < n: - x = struct.unpack("i", struct.pack("i", i))[0] # ID: struct + buf = struct.pack("i", i) # ID: pack + x = struct.unpack("i", buf)[0] # ID: unpack i += x / i return i @@ -29,7 +30,7 @@ loop, = log.loops_by_filename(self.filepath) # This could, of course stand some improvement, to remove all these # arithmatic ops, but we've removed all the core overhead. - assert loop.match_by_id("struct", """ + assert loop.match_by_id("pack", """ guard_not_invalidated(descr=...) # struct.pack %s @@ -40,25 +41,31 @@ i17 = int_and(i16, 255) i19 = int_rshift(i16, 8) i20 = int_and(i19, 255) + """ % extra) + # the newstr and the strsetitems are because the string is forced, + # which is in turn because the optimizer doesn't know how to handle a + # gc_load_indexed_i on a virtual string. It could be improved, but it + # is also true that in real life cases struct.unpack is called on + # strings which come from the outside, so it's a minor issue. + assert loop.match_by_id("unpack", """ # struct.unpack - i22 = int_lshift(i14, 8) - i23 = int_or(i11, i22) - i25 = int_lshift(i17, 16) - i26 = int_or(i23, i25) - i28 = int_ge(i20, 128) - guard_false(i28, descr=...) - i30 = int_lshift(i20, 24) - i31 = int_or(i26, i30) - """ % extra) + p88 = newstr(4) + strsetitem(p88, 0, i11) + strsetitem(p88, 1, i14) + strsetitem(p88, 2, i17) + strsetitem(p88, 3, i20) + i91 = gc_load_indexed_i(p88, 0, 1, _, -4) + """) def test_struct_object(self): def main(n): import struct - s = struct.Struct("i") + s = struct.Struct("ii") i = 1 while i < n: - x = s.unpack(s.pack(i))[0] # ID: struct + buf = s.pack(-1, i) # ID: pack + x = s.unpack(buf)[1] # ID: unpack i += x / i return i @@ -66,7 +73,7 @@ assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) - assert loop.match_by_id('struct', """ + assert loop.match_by_id('pack', """ guard_not_invalidated(descr=...) # struct.pack %s @@ -77,14 +84,19 @@ i17 = int_and(i16, 255) i19 = int_rshift(i16, 8) i20 = int_and(i19, 255) + """ % extra) + assert loop.match_by_id('unpack', """ # struct.unpack - i22 = int_lshift(i14, 8) - i23 = int_or(i11, i22) - i25 = int_lshift(i17, 16) - i26 = int_or(i23, i25) - i28 = int_ge(i20, 128) - guard_false(i28, descr=...) - i30 = int_lshift(i20, 24) - i31 = int_or(i26, i30) - """ % extra) + p88 = newstr(8) + strsetitem(p88, 0, 255) + strsetitem(p88, 1, 255) + strsetitem(p88, 2, 255) + strsetitem(p88, 3, 255) + strsetitem(p88, 4, i11) + strsetitem(p88, 5, i14) + strsetitem(p88, 6, i17) + strsetitem(p88, 7, i20) + i90 = gc_load_indexed_i(p88, 0, 1, _, -4) + i91 = gc_load_indexed_i(p88, 4, 1, _, -4) + """) diff -Nru pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_weakref.py pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- pypy-4.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_weakref.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/pypyjit/test_pypy_c/test_weakref.py 2016-03-19 16:40:12.000000000 +0000 @@ -23,12 +23,8 @@ i60 = int_lt(i58, i31) guard_true(i60, descr=...) i61 = int_add(i58, 1) - p62 = getfield_gc_r(ConstPtr(ptr37), descr=) setfield_gc(p18, i61, descr=) - guard_value(p62, ConstPtr(ptr39), descr=...) guard_not_invalidated(descr=...) - p64 = getfield_gc_r(ConstPtr(ptr40), descr=) - guard_value(p64, ConstPtr(ptr42), descr=...) p65 = getfield_gc_r(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) p66 = getfield_gc_r(p14, descr=) diff -Nru pypy-4.0.1+dfsg/pypy/module/signal/__init__.py pypy-5.0.1+dfsg/pypy/module/signal/__init__.py --- pypy-4.0.1+dfsg/pypy/module/signal/__init__.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/signal/__init__.py 2016-03-19 16:40:12.000000000 +0000 @@ -48,3 +48,6 @@ use_bytecode_counter=False) space.actionflag.__class__ = interp_signal.SignalActionFlag # xxx yes I know the previous line is a hack + + def startup(self, space): + space.check_signal_action.startup(space) diff -Nru pypy-4.0.1+dfsg/pypy/module/signal/interp_signal.py pypy-5.0.1+dfsg/pypy/module/signal/interp_signal.py --- pypy-4.0.1+dfsg/pypy/module/signal/interp_signal.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/signal/interp_signal.py 2016-03-19 16:40:12.000000000 +0000 @@ -63,19 +63,25 @@ AsyncAction.__init__(self, space) self.pending_signal = -1 self.fire_in_another_thread = False - if self.space.config.objspace.usemodules.thread: - from pypy.module.thread import gil - gil.after_thread_switch = self._after_thread_switch + # + @rgc.no_collect + def _after_thread_switch(): + if self.fire_in_another_thread: + if self.space.threadlocals.signals_enabled(): + self.fire_in_another_thread = False + self.space.actionflag.rearm_ticker() + # this occurs when we just switched to the main thread + # and there is a signal pending: we force the ticker to + # -1, which should ensure perform() is called quickly. + self._after_thread_switch = _after_thread_switch + # ^^^ so that 'self._after_thread_switch' can be annotated as a + # constant - @rgc.no_collect - def _after_thread_switch(self): - if self.fire_in_another_thread: - if self.space.threadlocals.signals_enabled(): - self.fire_in_another_thread = False - self.space.actionflag.rearm_ticker() - # this occurs when we just switched to the main thread - # and there is a signal pending: we force the ticker to - # -1, which should ensure perform() is called quickly. + def startup(self, space): + # this is translated + if space.config.objspace.usemodules.thread: + from rpython.rlib import rgil + rgil.invoke_after_thread_switch(self._after_thread_switch) def perform(self, executioncontext, frame): self._poll_for_signals() diff -Nru pypy-4.0.1+dfsg/pypy/module/_socket/test/test_sock_app.py pypy-5.0.1+dfsg/pypy/module/_socket/test/test_sock_app.py --- pypy-4.0.1+dfsg/pypy/module/_socket/test/test_sock_app.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_socket/test/test_sock_app.py 2016-03-19 16:40:12.000000000 +0000 @@ -102,7 +102,7 @@ fd = space.appexec([w_socket, space.wrap(orig_fd.fileno()), space.wrap(socket.AF_INET), space.wrap(socket.SOCK_STREAM), space.wrap(0)], - """(_socket, fd, family, type, proto): + """(_socket, fd, family, type, proto): return _socket.fromfd(fd, family, type, proto)""") assert space.unwrap(space.call_method(fd, 'fileno')) @@ -251,7 +251,7 @@ from pypy.module._socket.interp_socket import addr_as_object if not hasattr(rsocket._c, 'sockaddr_ll'): py.test.skip("posix specific test") - # HACK: To get the correct interface numer of lo, which in most cases is 1, + # HACK: To get the correct interface number of lo, which in most cases is 1, # but can be anything (i.e. 39), we need to call the libc function # if_nametoindex to get the correct index import ctypes @@ -326,7 +326,7 @@ def test_ntoa_exception(self): import _socket - raises(_socket.error, _socket.inet_ntoa, "ab") + raises(_socket.error, _socket.inet_ntoa, b"ab") def test_aton_exceptions(self): import _socket @@ -418,7 +418,7 @@ # it if there is no connection. try: s.connect(("www.python.org", 80)) - except _socket.gaierror, ex: + except _socket.gaierror as ex: skip("GAIError - probably no connection: %s" % str(ex.args)) name = s.getpeername() # Will raise socket.error if not connected assert name[1] == 80 @@ -465,7 +465,7 @@ sizes = {socket.htonl: 32, socket.ntohl: 32, socket.htons: 16, socket.ntohs: 16} for func, size in sizes.items(): - mask = (1L< len(str): + pos = len(str) + if endpos > len(str): + endpos = len(str) + return rsre_core.StrMatchContext(self.code, str, + pos, endpos, self.flags) else: buf = space.readbuf_w(w_string) size = buf.getlength() @@ -216,6 +227,11 @@ def subx(self, w_ptemplate, w_string, count): space = self.space + # use a (much faster) string/unicode builder if w_ptemplate and + # w_string are both string or both unicode objects, and if w_ptemplate + # is a literal + use_builder = False + filter_as_unicode = filter_as_string = None if space.is_true(space.callable(w_ptemplate)): w_filter = w_ptemplate filter_is_callable = True @@ -223,6 +239,8 @@ if space.isinstance_w(w_ptemplate, space.w_unicode): filter_as_unicode = space.unicode_w(w_ptemplate) literal = u'\\' not in filter_as_unicode + use_builder = ( + space.isinstance_w(w_string, space.w_unicode) and literal) else: try: filter_as_string = space.str_w(w_ptemplate) @@ -232,6 +250,8 @@ literal = False else: literal = '\\' not in filter_as_string + use_builder = ( + space.isinstance_w(w_string, space.w_str) and literal) if literal: w_filter = w_ptemplate filter_is_callable = False @@ -242,19 +262,44 @@ space.wrap(self), w_ptemplate) filter_is_callable = space.is_true(space.callable(w_filter)) # + # XXX this is a bit of a mess, but it improves performance a lot ctx = self.make_ctx(w_string) - sublist_w = [] + sublist_w = strbuilder = unicodebuilder = None + if use_builder: + if filter_as_unicode is not None: + unicodebuilder = UnicodeBuilder(ctx.end) + else: + assert filter_as_string is not None + strbuilder = StringBuilder(ctx.end) + else: + sublist_w = [] n = last_pos = 0 while not count or n < count: + sub_jitdriver.jit_merge_point( + self=self, + use_builder=use_builder, + filter_is_callable=filter_is_callable, + filter_type=type(w_filter), + ctx=ctx, + w_filter=w_filter, + strbuilder=strbuilder, + unicodebuilder=unicodebuilder, + filter_as_string=filter_as_string, + filter_as_unicode=filter_as_unicode, + count=count, + w_string=w_string, + n=n, last_pos=last_pos, sublist_w=sublist_w + ) + space = self.space if not searchcontext(space, ctx): break if last_pos < ctx.match_start: - sublist_w.append(slice_w(space, ctx, last_pos, - ctx.match_start, space.w_None)) + _sub_append_slice( + ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.match_start) start = ctx.match_end if start == ctx.match_start: start += 1 - nextctx = ctx.fresh_copy(start) if not (last_pos == ctx.match_start == ctx.match_end and n > 0): # the above ignores empty matches on latest position @@ -262,27 +307,70 @@ w_match = self.getmatch(ctx, True) w_piece = space.call_function(w_filter, w_match) if not space.is_w(w_piece, space.w_None): + assert strbuilder is None and unicodebuilder is None + assert not use_builder sublist_w.append(w_piece) else: - sublist_w.append(w_filter) + if use_builder: + if strbuilder is not None: + assert filter_as_string is not None + strbuilder.append(filter_as_string) + else: + assert unicodebuilder is not None + assert filter_as_unicode is not None + unicodebuilder.append(filter_as_unicode) + else: + sublist_w.append(w_filter) last_pos = ctx.match_end n += 1 elif last_pos >= ctx.end: break # empty match at the end: finished - ctx = nextctx + ctx.reset(start) if last_pos < ctx.end: - sublist_w.append(slice_w(space, ctx, last_pos, ctx.end, - space.w_None)) - - if space.isinstance_w(w_string, space.w_unicode): - w_emptystr = space.wrap(u'') + _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.end) + if use_builder: + if strbuilder is not None: + return space.wrap(strbuilder.build()), n + else: + assert unicodebuilder is not None + return space.wrap(unicodebuilder.build()), n else: - w_emptystr = space.wrap('') - w_item = space.call_method(w_emptystr, 'join', - space.newlist(sublist_w)) - return w_item, n - + if space.isinstance_w(w_string, space.w_unicode): + w_emptystr = space.wrap(u'') + else: + w_emptystr = space.wrap('') + w_item = space.call_method(w_emptystr, 'join', + space.newlist(sublist_w)) + return w_item, n + +sub_jitdriver = jit.JitDriver( + reds="""count n last_pos + ctx w_filter + strbuilder unicodebuilder + filter_as_string + filter_as_unicode + w_string sublist_w + self""".split(), + greens=["filter_is_callable", "use_builder", "filter_type", "ctx.pattern"]) + + +def _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, start, end): + if use_builder: + if isinstance(ctx, rsre_core.BufMatchContext): + assert strbuilder is not None + return strbuilder.append(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + assert strbuilder is not None + return strbuilder.append_slice(ctx._string, start, end) + elif isinstance(ctx, rsre_core.UnicodeMatchContext): + assert unicodebuilder is not None + return unicodebuilder.append_slice(ctx._unicodestr, start, end) + assert 0, "unreachable" + else: + sublist_w.append(slice_w(space, ctx, start, end, space.w_None)) @unwrap_spec(flags=int, groups=int, w_groupindex=WrappedDefault(None), w_indexgroup=WrappedDefault(None)) @@ -482,6 +570,8 @@ ctx = self.ctx if isinstance(ctx, rsre_core.BufMatchContext): return space.wrap(ctx._buffer.as_str()) + elif isinstance(ctx, rsre_core.StrMatchContext): + return space.wrap(ctx._string) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff -Nru pypy-4.0.1+dfsg/pypy/module/struct/formatiterator.py pypy-5.0.1+dfsg/pypy/module/struct/formatiterator.py --- pypy-4.0.1+dfsg/pypy/module/struct/formatiterator.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/struct/formatiterator.py 2016-03-19 16:40:12.000000000 +0000 @@ -149,3 +149,13 @@ @specialize.argtype(1) def appendobj(self, value): self.result_w.append(self.space.wrap(value)) + + def get_pos(self): + return self.pos + + def get_buffer_as_string_maybe(self): + string, pos = self.buf.as_str_and_offset_maybe() + return string, pos+self.pos + + def skip(self, size): + self.read(size) # XXX, could avoid taking the slice diff -Nru pypy-4.0.1+dfsg/pypy/module/struct/test/test_struct.py pypy-5.0.1+dfsg/pypy/module/struct/test/test_struct.py --- pypy-4.0.1+dfsg/pypy/module/struct/test/test_struct.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/struct/test/test_struct.py 2016-03-19 16:40:12.000000000 +0000 @@ -462,3 +462,29 @@ assert self.struct.unpack_from("ii", b, 2) == (17, 42) b[:sz] = self.struct.pack("ii", 18, 43) assert self.struct.unpack_from("ii", b) == (18, 43) + + +class AppTestFastPath(object): + spaceconfig = dict(usemodules=['struct', '__pypy__']) + + def setup_class(cls): + from rpython.rlib.rstruct import standardfmttable + standardfmttable.ALLOW_SLOWPATH = False + # + cls.w_struct = cls.space.appexec([], """(): + import struct + return struct + """) + cls.w_bytebuffer = cls.space.appexec([], """(): + import __pypy__ + return __pypy__.bytebuffer + """) + + def teardown_class(cls): + from rpython.rlib.rstruct import standardfmttable + standardfmttable.ALLOW_SLOWPATH = True + + def test_unpack_from(self): + buf = self.struct.pack("iii", 0, 42, 43) + offset = self.struct.calcsize("i") + assert self.struct.unpack_from("ii", buf, offset) == (42, 43) diff -Nru pypy-4.0.1+dfsg/pypy/module/sys/app.py pypy-5.0.1+dfsg/pypy/module/sys/app.py --- pypy-4.0.1+dfsg/pypy/module/sys/app.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/sys/app.py 2016-03-19 16:40:12.000000000 +0000 @@ -70,11 +70,11 @@ return None copyright_str = """ -Copyright 2003-2014 PyPy development team. +Copyright 2003-2016 PyPy development team. All Rights Reserved. For further information, see -Portions Copyright (c) 2001-2014 Python Software Foundation. +Portions Copyright (c) 2001-2016 Python Software Foundation. All Rights Reserved. Portions Copyright (c) 2000 BeOpen.com. diff -Nru pypy-4.0.1+dfsg/pypy/module/sys/__init__.py pypy-5.0.1+dfsg/pypy/module/sys/__init__.py --- pypy-4.0.1+dfsg/pypy/module/sys/__init__.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/sys/__init__.py 2016-03-19 16:40:12.000000000 +0000 @@ -77,7 +77,7 @@ 'meta_path' : 'space.wrap([])', 'path_hooks' : 'space.wrap([])', 'path_importer_cache' : 'space.wrap({})', - 'dont_write_bytecode' : 'space.w_False', + 'dont_write_bytecode' : 'space.wrap(space.config.translation.sandbox)', 'getdefaultencoding' : 'interp_encoding.getdefaultencoding', 'setdefaultencoding' : 'interp_encoding.setdefaultencoding', diff -Nru pypy-4.0.1+dfsg/pypy/module/sys/interp_encoding.py pypy-5.0.1+dfsg/pypy/module/sys/interp_encoding.py --- pypy-4.0.1+dfsg/pypy/module/sys/interp_encoding.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/sys/interp_encoding.py 2016-03-19 16:40:12.000000000 +0000 @@ -34,11 +34,15 @@ elif sys.platform == "darwin": base_encoding = "utf-8" else: - base_encoding = None + # In CPython, the default base encoding is NULL. This is paired with a + # comment that says "If non-NULL, this is different than the default + # encoding for strings". Therefore, the default filesystem encoding is the + # default encoding for strings, which is ASCII. + base_encoding = "ascii" def _getfilesystemencoding(space): encoding = base_encoding - if rlocale.HAVE_LANGINFO and rlocale.CODESET: + if rlocale.HAVE_LANGINFO: try: oldlocale = rlocale.setlocale(rlocale.LC_CTYPE, None) rlocale.setlocale(rlocale.LC_CTYPE, "") diff -Nru pypy-4.0.1+dfsg/pypy/module/sys/version.py pypy-5.0.1+dfsg/pypy/module/sys/version.py --- pypy-4.0.1+dfsg/pypy/module/sys/version.py 2015-11-19 19:21:43.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/sys/version.py 2016-03-19 16:40:15.000000000 +0000 @@ -10,7 +10,8 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (4, 0, 1, "final", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 0, 1, "final", 0) #XXX # sync patchlevel.h + import pypy pypydir = os.path.dirname(os.path.abspath(pypy.__file__)) diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py 2016-03-19 16:40:12.000000000 +0000 @@ -1353,8 +1353,8 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("enum foo;") from cffi import __version_info__ - if __version_info__ < (1, 4): - py.test.skip("re-enable me in version 1.4") + if __version_info__ < (1, 6): + py.test.skip("re-enable me in version 1.6") e = py.test.raises(CDefError, ffi.cast, "enum foo", -1) assert str(e.value) == ( "'enum foo' has no values explicitly defined: refusing to guess " @@ -1810,3 +1810,45 @@ assert lib.EE1 == 0 assert lib.EE2 == 0 assert lib.EE3 == 1 + + def test_init_once(self): + def do_init(): + seen.append(1) + return 42 + ffi = FFI() + seen = [] + for i in range(3): + res = ffi.init_once(do_init, "tag1") + assert res == 42 + assert seen == [1] + for i in range(3): + res = ffi.init_once(do_init, "tag2") + assert res == 42 + assert seen == [1, 1] + + def test_init_once_multithread(self): + import sys, time + if sys.version_info < (3,): + import thread + else: + import _thread as thread + # + def do_init(): + seen.append('init!') + time.sleep(1) + seen.append('init done') + return 7 + ffi = FFI() + seen = [] + for i in range(6): + def f(): + res = ffi.init_once(do_init, "tag") + seen.append(res) + thread.start_new_thread(f, ()) + time.sleep(1.5) + assert seen == ['init!', 'init done'] + 6 * [7] + + def test_sizeof_struct_directly(self): + # only works with the Python FFI instances + ffi = FFI(backend=self.Backend()) + assert ffi.sizeof("struct{int a;}") == ffi.sizeof("int") diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py 2016-03-19 16:40:12.000000000 +0000 @@ -420,3 +420,7 @@ ]: x = ffi.sizeof(name) assert 1 <= x <= 16 + + def test_ffi_def_extern(self): + ffi = FFI() + py.test.raises(ValueError, ffi.def_extern) diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py 2016-03-19 16:40:12.000000000 +0000 @@ -5,6 +5,7 @@ import ctypes.util from cffi.backend_ctypes import CTypesBackend from pypy.module.test_lib_pypy.cffi_tests.udir import udir +from pypy.module.test_lib_pypy.cffi_tests.support import FdWriteCapture try: from StringIO import StringIO @@ -12,29 +13,6 @@ from io import StringIO -class FdWriteCapture(object): - """xxx limited to capture at most 512 bytes of output, according - to the Posix manual.""" - - def __init__(self, capture_fd): - self.capture_fd = capture_fd - - def __enter__(self): - self.read_fd, self.write_fd = os.pipe() - self.copy_fd = os.dup(self.capture_fd) - os.dup2(self.write_fd, self.capture_fd) - return self - - def __exit__(self, *args): - os.dup2(self.copy_fd, self.capture_fd) - os.close(self.copy_fd) - os.close(self.write_fd) - self._value = os.read(self.read_fd, 512) - os.close(self.read_fd) - - def getvalue(self): - return self._value - lib_m = 'm' if sys.platform == 'win32': #there is a small chance this fails on Mingw via environ $CC @@ -136,7 +114,7 @@ """) ffi.C = ffi.dlopen(None) ffi.C.fputs # fetch before capturing, for easier debugging - with FdWriteCapture(2) as fd: + with FdWriteCapture() as fd: ffi.C.fputs(b"hello\n", ffi.C.stderr) ffi.C.fputs(b" world\n", ffi.C.stderr) res = fd.getvalue() @@ -152,7 +130,7 @@ """) ffi.C = ffi.dlopen(None) ffi.C.fputs # fetch before capturing, for easier debugging - with FdWriteCapture(2) as fd: + with FdWriteCapture() as fd: ffi.C.fputs(b"hello\n", ffi.C.stderr) ffi.C.fputs(b" world\n", ffi.C.stderr) res = fd.getvalue() @@ -167,7 +145,7 @@ void *stderr; """) ffi.C = ffi.dlopen(None) - with FdWriteCapture(2) as fd: + with FdWriteCapture() as fd: ffi.C.fprintf(ffi.C.stderr, b"hello with no arguments\n") ffi.C.fprintf(ffi.C.stderr, b"hello, %s!\n", ffi.new("char[]", b"world")) @@ -229,7 +207,7 @@ fptr = ffi.cast("int(*)(const char *txt, void *)", ffi.C.fputs) assert fptr == ffi.C.fputs assert repr(fptr).startswith("" % (stdcall, stdcall)) + +def test_extern_python(): + ffi = FFI() + ffi.cdef(""" + int bok(int, int); + extern "Python" int foobar(int, int); + int baz(int, int); + """) + assert sorted(ffi._parser._declarations) == [ + 'extern_python foobar', 'function baz', 'function bok'] + assert (ffi._parser._declarations['function bok'] == + ffi._parser._declarations['extern_python foobar'] == + ffi._parser._declarations['function baz']) + +def test_extern_python_group(): + ffi = FFI() + ffi.cdef(""" + int bok(int); + extern "Python" {int foobar(int, int);int bzrrr(int);} + int baz(int, int); + """) + assert sorted(ffi._parser._declarations) == [ + 'extern_python bzrrr', 'extern_python foobar', + 'function baz', 'function bok'] + assert (ffi._parser._declarations['function baz'] == + ffi._parser._declarations['extern_python foobar'] != + ffi._parser._declarations['function bok'] == + ffi._parser._declarations['extern_python bzrrr']) diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py 2016-03-19 16:40:12.000000000 +0000 @@ -92,8 +92,8 @@ assert lib.sin(1.23) == math.sin(1.23) def _Wconversion(cdef, source, **kargs): - if sys.platform == 'win32': - py.test.skip("needs GCC or Clang") + if sys.platform in ('win32', 'darwin'): + py.test.skip("needs GCC") ffi = FFI() ffi.cdef(cdef) py.test.raises(VerificationError, ffi.verify, source, **kargs) diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_version.py 2016-03-19 16:40:12.000000000 +0000 @@ -54,3 +54,10 @@ content = open(p).read() #v = BACKEND_VERSIONS.get(v, v) assert (('assert __version__ == "%s"' % v) in content) + +def test_embedding_h(): + parent = os.path.dirname(os.path.dirname(cffi.__file__)) + v = cffi.__version__ + p = os.path.join(parent, 'cffi', '_embedding.h') + content = open(p).read() + assert ('cffi version: %s"' % (v,)) in content diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py 2016-03-19 16:40:12.000000000 +0000 @@ -5,11 +5,16 @@ if sys.platform == 'win32': py.test.skip('snippets do not run on win32') +if sys.version_info < (2, 7): + py.test.skip('fails e.g. on a Debian/Ubuntu which patches virtualenv' + ' in a non-2.6-friendly way') def create_venv(name): tmpdir = udir.join(name) try: - subprocess.check_call(['virtualenv', '--distribute', + subprocess.check_call(['virtualenv', + #'--never-download', <= could be added, but causes failures + # in random cases on random machines '-p', os.path.abspath(sys.executable), str(tmpdir)]) except OSError as e: diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_commontypes.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_commontypes.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_commontypes.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_commontypes.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,35 @@ +# Generated by pypy/tool/import_cffi.py +import py, os, cffi, re +import _cffi_backend + + +def getlines(): + try: + f = open(os.path.join(os.path.dirname(cffi.__file__), + '..', 'c', 'commontypes.c')) + except IOError: + py.test.skip("cannot find ../c/commontypes.c") + lines = [line for line in f.readlines() if line.strip().startswith('EQ(')] + f.close() + return lines + +def test_alphabetical_order(): + lines = getlines() + assert lines == sorted(lines) + +def test_dependencies(): + r = re.compile(r'EQ[(]"([^"]+)",(?:\s*"([A-Z0-9_]+)\s*[*]*"[)])?') + lines = getlines() + d = {} + for line in lines: + match = r.search(line) + if match is not None: + d[match.group(1)] = match.group(2) + for value in d.values(): + if value: + assert value in d + +def test_get_common_types(): + d = {} + _cffi_backend._get_common_types(d) + assert d["bool"] == "_Bool" diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py 2016-03-19 16:40:12.000000000 +0000 @@ -8,6 +8,7 @@ p = ffi.new("int *") p[0] = -42 assert p[0] == -42 + assert type(ffi) is ffi.__class__ is _cffi1_backend.FFI def test_ffi_subclass(): class FOO(_cffi1_backend.FFI): @@ -17,6 +18,7 @@ assert foo.x == 42 p = foo.new("int *") assert p[0] == 0 + assert type(foo) is foo.__class__ is FOO def test_ffi_no_argument(): py.test.raises(TypeError, _cffi1_backend.FFI, 42) @@ -194,6 +196,11 @@ yp = ffi.new_handle([6, 4, 2]) assert ffi.from_handle(yp) == [6, 4, 2] +def test_handle_unique(): + ffi = _cffi1_backend.FFI() + assert ffi.new_handle(None) is not ffi.new_handle(None) + assert ffi.new_handle(None) != ffi.new_handle(None) + def test_ffi_cast(): ffi = _cffi1_backend.FFI() assert ffi.cast("int(*)(int)", 0) == ffi.NULL @@ -416,3 +423,76 @@ assert int(ffi.cast("_Bool", ffi.cast(type, 42))) == 1 assert int(ffi.cast("bool", ffi.cast(type, 42))) == 1 assert int(ffi.cast("_Bool", ffi.cast(type, 0))) == 0 + +def test_init_once(): + def do_init(): + seen.append(1) + return 42 + ffi = _cffi1_backend.FFI() + seen = [] + for i in range(3): + res = ffi.init_once(do_init, "tag1") + assert res == 42 + assert seen == [1] + for i in range(3): + res = ffi.init_once(do_init, "tag2") + assert res == 42 + assert seen == [1, 1] + +def test_init_once_multithread(): + if sys.version_info < (3,): + import thread + else: + import _thread as thread + import time + # + def do_init(): + print('init!') + seen.append('init!') + time.sleep(1) + seen.append('init done') + print('init done') + return 7 + ffi = _cffi1_backend.FFI() + seen = [] + for i in range(6): + def f(): + res = ffi.init_once(do_init, "tag") + seen.append(res) + thread.start_new_thread(f, ()) + time.sleep(1.5) + assert seen == ['init!', 'init done'] + 6 * [7] + +def test_init_once_failure(): + def do_init(): + seen.append(1) + raise ValueError + ffi = _cffi1_backend.FFI() + seen = [] + for i in range(5): + py.test.raises(ValueError, ffi.init_once, do_init, "tag") + assert seen == [1] * (i + 1) + +def test_init_once_multithread_failure(): + if sys.version_info < (3,): + import thread + else: + import _thread as thread + import time + def do_init(): + seen.append('init!') + time.sleep(1) + seen.append('oops') + raise ValueError + ffi = _cffi1_backend.FFI() + seen = [] + for i in range(3): + def f(): + py.test.raises(ValueError, ffi.init_once, do_init, "tag") + thread.start_new_thread(f, ()) + i = 0 + while len(seen) < 6: + i += 1 + assert i < 20 + time.sleep(0.51) + assert seen == ['init!', 'oops'] * 3 diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py 2016-03-19 16:40:12.000000000 +0000 @@ -1719,3 +1719,10 @@ exec("from _test_import_from_lib.lib import *", d) assert (set(key for key in d if not key.startswith('_')) == set(['myfunc', 'MYFOO'])) + # + # also test "import *" on the module itself, which should be + # equivalent to "import ffi, lib" + d = {} + exec("from _test_import_from_lib import *", d) + assert (sorted([x for x in d.keys() if not x.startswith('__')]) == + ['ffi', 'lib']) diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py 2016-03-19 16:40:12.000000000 +0000 @@ -4,7 +4,8 @@ from cffi import FFI, VerificationError, FFIError from cffi import recompiler from pypy.module.test_lib_pypy.cffi_tests.udir import udir -from pypy.module.test_lib_pypy.cffi_tests.support import u +from pypy.module.test_lib_pypy.cffi_tests.support import u, long +from pypy.module.test_lib_pypy.cffi_tests.support import FdWriteCapture, StdErrCapture def check_type_table(input, expected_output, included=None): @@ -948,6 +949,19 @@ """, sources=[str(extra_c_source)]) assert lib.external_foo == 42 +def test_dotdot_in_source_file_names(): + extra_c_source = udir.join( + 'extra_test_dotdot_in_source_file_names.c') + extra_c_source.write('const int external_foo = 42;\n') + ffi = FFI() + ffi.cdef("const int external_foo;") + lib = verify(ffi, 'test_dotdot_in_source_file_names', """ + extern const int external_foo; + """, sources=[os.path.join(os.path.dirname(str(extra_c_source)), + 'foobar', '..', + os.path.basename(str(extra_c_source)))]) + assert lib.external_foo == 42 + def test_call_with_incomplete_structs(): ffi = FFI() ffi.cdef("typedef struct {...;} foo_t; " @@ -1143,6 +1157,7 @@ assert hasattr(lib, '__dict__') assert lib.__all__ == ['MYFOO', 'mybar'] # but not 'myvar' assert lib.__name__ == repr(lib) + assert lib.__class__ is type(lib) def test_macro_var_callback(): ffi = FFI() @@ -1485,3 +1500,247 @@ assert (pt.x, pt.y) == (99*500*999, -99*500*999) pt = ptr_call2(ffi.addressof(lib, 'cb2')) assert (pt.x, pt.y) == (99*500*999, -99*500*999) + +def test_extern_python_1(): + ffi = FFI() + ffi.cdef(""" + extern "Python" { + int bar(int, int); + void baz(int, int); + int bok(void); + void boz(void); + } + """) + lib = verify(ffi, 'test_extern_python_1', "") + assert ffi.typeof(lib.bar) == ffi.typeof("int(*)(int, int)") + with FdWriteCapture() as f: + res = lib.bar(4, 5) + assert res == 0 + assert f.getvalue() == ( + b"extern \"Python\": function bar() called, but no code was attached " + b"to it yet with @ffi.def_extern(). Returning 0.\n") + + @ffi.def_extern("bar") + def my_bar(x, y): + seen.append(("Bar", x, y)) + return x * y + assert my_bar != lib.bar + seen = [] + res = lib.bar(6, 7) + assert seen == [("Bar", 6, 7)] + assert res == 42 + + def baz(x, y): + seen.append(("Baz", x, y)) + baz1 = ffi.def_extern()(baz) + assert baz1 is baz + seen = [] + baz(long(40), long(4)) + res = lib.baz(long(50), long(8)) + assert res is None + assert seen == [("Baz", 40, 4), ("Baz", 50, 8)] + assert type(seen[0][1]) is type(seen[0][2]) is long + assert type(seen[1][1]) is type(seen[1][2]) is int + + @ffi.def_extern(name="bok") + def bokk(): + seen.append("Bok") + return 42 + seen = [] + assert lib.bok() == 42 + assert seen == ["Bok"] + + @ffi.def_extern() + def boz(): + seen.append("Boz") + seen = [] + assert lib.boz() is None + assert seen == ["Boz"] + +def test_extern_python_bogus_name(): + ffi = FFI() + ffi.cdef("int abc;") + lib = verify(ffi, 'test_extern_python_bogus_name', "int abc;") + def fn(): + pass + py.test.raises(ffi.error, ffi.def_extern("unknown_name"), fn) + py.test.raises(ffi.error, ffi.def_extern("abc"), fn) + assert lib.abc == 0 + e = py.test.raises(ffi.error, ffi.def_extern("abc"), fn) + assert str(e.value) == ("ffi.def_extern('abc'): no 'extern \"Python\"' " + "function with this name") + e = py.test.raises(ffi.error, ffi.def_extern(), fn) + assert str(e.value) == ("ffi.def_extern('fn'): no 'extern \"Python\"' " + "function with this name") + # + py.test.raises(TypeError, ffi.def_extern(42), fn) + py.test.raises((TypeError, AttributeError), ffi.def_extern(), "foo") + class X: + pass + x = X() + x.__name__ = x + py.test.raises(TypeError, ffi.def_extern(), x) + +def test_extern_python_bogus_result_type(): + ffi = FFI() + ffi.cdef("""extern "Python" void bar(int);""") + lib = verify(ffi, 'test_extern_python_bogus_result_type', "") + # + @ffi.def_extern() + def bar(n): + return n * 10 + with StdErrCapture() as f: + res = lib.bar(321) + assert res is None + assert f.getvalue() == ( + "From cffi callback %r:\n" % (bar,) + + "Trying to convert the result back to C:\n" + "TypeError: callback with the return type 'void' must return None\n") + +def test_extern_python_redefine(): + ffi = FFI() + ffi.cdef("""extern "Python" int bar(int);""") + lib = verify(ffi, 'test_extern_python_redefine', "") + # + @ffi.def_extern() + def bar(n): + return n * 10 + assert lib.bar(42) == 420 + # + @ffi.def_extern() + def bar(n): + return -n + assert lib.bar(42) == -42 + +def test_extern_python_struct(): + ffi = FFI() + ffi.cdef(""" + struct foo_s { int a, b, c; }; + extern "Python" int bar(int, struct foo_s, int); + extern "Python" { struct foo_s baz(int, int); + struct foo_s bok(void); } + """) + lib = verify(ffi, 'test_extern_python_struct', + "struct foo_s { int a, b, c; };") + # + @ffi.def_extern() + def bar(x, s, z): + return x + s.a + s.b + s.c + z + res = lib.bar(1000, [1001, 1002, 1004], 1008) + assert res == 5015 + # + @ffi.def_extern() + def baz(x, y): + return [x + y, x - y, x * y] + res = lib.baz(1000, 42) + assert res.a == 1042 + assert res.b == 958 + assert res.c == 42000 + # + @ffi.def_extern() + def bok(): + return [10, 20, 30] + res = lib.bok() + assert [res.a, res.b, res.c] == [10, 20, 30] + +def test_extern_python_long_double(): + ffi = FFI() + ffi.cdef(""" + extern "Python" int bar(int, long double, int); + extern "Python" long double baz(int, int); + extern "Python" long double bok(void); + """) + lib = verify(ffi, 'test_extern_python_long_double', "") + # + @ffi.def_extern() + def bar(x, l, z): + seen.append((x, l, z)) + return 6 + seen = [] + lib.bar(10, 3.5, 20) + expected = ffi.cast("long double", 3.5) + assert repr(seen) == repr([(10, expected, 20)]) + # + @ffi.def_extern() + def baz(x, z): + assert x == 10 and z == 20 + return expected + res = lib.baz(10, 20) + assert repr(res) == repr(expected) + # + @ffi.def_extern() + def bok(): + return expected + res = lib.bok() + assert repr(res) == repr(expected) + +def test_extern_python_signature(): + ffi = FFI() + lib = verify(ffi, 'test_extern_python_signature', "") + py.test.raises(TypeError, ffi.def_extern(425), None) + py.test.raises(TypeError, ffi.def_extern, 'a', 'b', 'c', 'd') + +def test_extern_python_errors(): + ffi = FFI() + ffi.cdef(""" + extern "Python" int bar(int); + """) + lib = verify(ffi, 'test_extern_python_errors', "") + + seen = [] + def oops(*args): + seen.append(args) + + @ffi.def_extern(onerror=oops) + def bar(x): + return x + "" + assert lib.bar(10) == 0 + + @ffi.def_extern(name="bar", onerror=oops, error=-66) + def bar2(x): + return x + "" + assert lib.bar(10) == -66 + + assert len(seen) == 2 + exc, val, tb = seen[0] + assert exc is TypeError + assert isinstance(val, TypeError) + assert tb.tb_frame.f_code.co_name == "bar" + exc, val, tb = seen[1] + assert exc is TypeError + assert isinstance(val, TypeError) + assert tb.tb_frame.f_code.co_name == "bar2" + # + # a case where 'onerror' is not callable + py.test.raises(TypeError, ffi.def_extern(name='bar', onerror=42), + lambda x: x) + +def test_extern_python_stdcall(): + ffi = FFI() + ffi.cdef(""" + extern "Python" int __stdcall foo(int); + extern "Python" int WINAPI bar(int); + int (__stdcall * mycb1)(int); + int indirect_call(int); + """) + lib = verify(ffi, 'test_extern_python_stdcall', """ + #ifndef _MSC_VER + # define __stdcall + #endif + static int (__stdcall * mycb1)(int); + static int indirect_call(int x) { + return mycb1(x); + } + """) + # + @ffi.def_extern() + def foo(x): + return x + 42 + @ffi.def_extern() + def bar(x): + return x + 43 + assert lib.foo(100) == 142 + assert lib.bar(100) == 143 + lib.mycb1 = lib.foo + assert lib.mycb1(200) == 242 + assert lib.indirect_call(300) == 342 diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py 2016-03-19 16:40:12.000000000 +0000 @@ -72,8 +72,8 @@ assert lib.sin(1.23) == math.sin(1.23) def _Wconversion(cdef, source, **kargs): - if sys.platform == 'win32': - py.test.skip("needs GCC or Clang") + if sys.platform in ('win32', 'darwin'): + py.test.skip("needs GCC") ffi = FFI() ffi.cdef(cdef) py.test.raises(VerificationError, ffi.verify, source, **kargs) @@ -2092,20 +2092,20 @@ old = sys.getdlopenflags() try: ffi1 = FFI() - ffi1.cdef("int foo_verify_dlopen_flags;") + ffi1.cdef("int foo_verify_dlopen_flags_1;") sys.setdlopenflags(ffi1.RTLD_GLOBAL | ffi1.RTLD_NOW) - lib1 = ffi1.verify("int foo_verify_dlopen_flags;") + lib1 = ffi1.verify("int foo_verify_dlopen_flags_1;") finally: sys.setdlopenflags(old) ffi2 = FFI() ffi2.cdef("int *getptr(void);") lib2 = ffi2.verify(""" - extern int foo_verify_dlopen_flags; - static int *getptr(void) { return &foo_verify_dlopen_flags; } + extern int foo_verify_dlopen_flags_1; + static int *getptr(void) { return &foo_verify_dlopen_flags_1; } """) p = lib2.getptr() - assert ffi1.addressof(lib1, 'foo_verify_dlopen_flags') == p + assert ffi1.addressof(lib1, 'foo_verify_dlopen_flags_1') == p def test_consider_not_implemented_function_type(): ffi = FFI() diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py 2016-03-19 16:40:12.000000000 +0000 @@ -49,7 +49,8 @@ import setuptools except ImportError: py.test.skip("setuptools not found") - self.run(['setup.py', 'egg_info'], cwd=self.rootdir) + if os.path.exists(os.path.join(self.rootdir, 'setup.py')): + self.run(['setup.py', 'egg_info'], cwd=self.rootdir) TestDist._setuptools_ready = True def check_produced_files(self, content, curdir=None): @@ -58,13 +59,18 @@ found_so = None for name in os.listdir(curdir): if (name.endswith('.so') or name.endswith('.pyd') or - name.endswith('.dylib')): + name.endswith('.dylib') or name.endswith('.dll')): found_so = os.path.join(curdir, name) - # foo.cpython-34m.so => foo - name = name.split('.')[0] - # foo_d.so => foo (Python 2 debug builds) + # foo.so => foo + parts = name.split('.') + del parts[-1] + if len(parts) > 1 and parts[-1] != 'bar': + # foo.cpython-34m.so => foo, but foo.bar.so => foo.bar + del parts[-1] + name = '.'.join(parts) + # foo_d => foo (Python 2 debug builds) if name.endswith('_d') and hasattr(sys, 'gettotalrefcount'): - name = name.rsplit('_', 1)[0] + name = name[:-2] name += '.SO' if name.startswith('pycparser') and name.endswith('.egg'): continue # no clue why this shows up sometimes and not others @@ -209,6 +215,42 @@ 'Release': '?'}}) @chdir_to_tmp + def test_api_compile_explicit_target_1(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target="foo.bar.*") + if sys.platform != 'win32': + sofile = self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.SO': None, + 'mymod.c': None, + 'mymod.o': None}}) + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.SO': None, + 'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp + def test_api_compile_explicit_target_3(self): + ffi = cffi.FFI() + ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") + x = ffi.compile(target="foo.bar.baz") + if sys.platform != 'win32': + self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.baz': None, + 'mymod.c': None, + 'mymod.o': None}}) + sofile = os.path.join(str(self.udir), + 'mod_name_in_package', 'foo.bar.baz') + assert os.path.isabs(x) and os.path.samefile(x, sofile) + else: + self.check_produced_files({ + 'mod_name_in_package': {'foo.bar.baz': None, + 'mymod.c': None}, + 'Release': '?'}) + + @chdir_to_tmp def test_api_distutils_extension_1(self): ffi = cffi.FFI() ffi.set_source("mod_name_in_package.mymod", "/*code would be here*/") diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add1.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add1.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add1.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add1.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,34 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add1(int, int); +""") + +ffi.embedding_init_code(r""" + import sys, time + sys.stdout.write("preparing") + for i in range(3): + sys.stdout.flush() + time.sleep(0.02) + sys.stdout.write(".") + sys.stdout.write("\n") + + from _add1_cffi import ffi + + int(ord("A")) # check that built-ins are there + + @ffi.def_extern() + def add1(x, y): + sys.stdout.write("adding %d and %d\n" % (x, y)) + sys.stdout.flush() + return x + y +""") + +ffi.set_source("_add1_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,14 @@ +/* Generated by pypy/tool/import_cffi.py */ +#include + +extern int add1(int, int); + + +int main(void) +{ + int x, y; + x = add1(40, 2); + y = add1(100, -5); + printf("got: %d %d\n", x, y); + return 0; +} diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add2.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add2.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add2.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add2.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,30 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add2(int, int, int); +""") + +ffi.embedding_init_code(r""" + import sys + sys.stdout.write("prepADD2\n") + + assert '_add2_cffi' in sys.modules + m = sys.modules['_add2_cffi'] + import _add2_cffi + ffi = _add2_cffi.ffi + + @ffi.def_extern() + def add2(x, y, z): + sys.stdout.write("adding %d and %d and %d\n" % (x, y, z)) + sys.stdout.flush() + return x + y + z +""") + +ffi.set_source("_add2_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,15 @@ +/* Generated by pypy/tool/import_cffi.py */ +#include + +extern int add1(int, int); +extern int add2(int, int, int); + + +int main(void) +{ + int x, y; + x = add1(40, 2); + y = add2(100, -5, -20); + printf("got: %d %d\n", x, y); + return 0; +} diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add3.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add3.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add3.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add3.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,25 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add3(int, int, int, int); +""") + +ffi.embedding_init_code(r""" + from _add3_cffi import ffi + import sys + + @ffi.def_extern() + def add3(x, y, z, t): + sys.stdout.write("adding %d, %d, %d, %d\n" % (x, y, z, t)) + sys.stdout.flush() + return x + y + z + t +""") + +ffi.set_source("_add3_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,34 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int (*my_callback)(int); + int add_rec(int, int); +""") + +ffi.embedding_init_code(r""" + from _add_recursive_cffi import ffi, lib + import sys + print("preparing REC") + sys.stdout.flush() + + @ffi.def_extern() + def add_rec(x, y): + print("adding %d and %d" % (x, y)) + sys.stdout.flush() + return x + y + + x = lib.my_callback(400) + print('<<< %d >>>' % (x,)) +""") + +ffi.set_source("_add_recursive_cffi", """ +/* use CFFI_DLLEXPORT: on windows, it expands to __declspec(dllexport), + which is needed to export a variable from a dll */ +CFFI_DLLEXPORT int (*my_callback)(int); +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,28 @@ +/* Generated by pypy/tool/import_cffi.py */ +#include + +#ifdef _MSC_VER +# define DLLIMPORT __declspec(dllimport) +#else +# define DLLIMPORT extern +#endif + +DLLIMPORT int add_rec(int, int); +DLLIMPORT int (*my_callback)(int); + +static int some_callback(int x) +{ + printf("some_callback(%d)\n", x); + fflush(stdout); + return add_rec(x, 9); +} + +int main(void) +{ + int x, y; + my_callback = some_callback; + x = add_rec(40, 2); + y = add_rec(100, -5); + printf("got: %d %d\n", x, y); + return 0; +} diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/__init__.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1 @@ +# Generated by pypy/tool/import_cffi.py diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/perf.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/perf.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/perf.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/perf.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,22 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add1(int, int); +""") + +ffi.embedding_init_code(r""" + from _perf_cffi import ffi + + @ffi.def_extern() + def add1(x, y): + return x + y +""") + +ffi.set_source("_perf_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,91 @@ +/* Generated by pypy/tool/import_cffi.py */ +#include +#include +#include +#ifdef PTEST_USE_THREAD +# include +static pthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t cond1 = PTHREAD_COND_INITIALIZER; +static int remaining; +#endif + + +extern int add1(int, int); + + +static double time_delta(struct timeval *stop, struct timeval *start) +{ + return (stop->tv_sec - start->tv_sec) + + 1e-6 * (stop->tv_usec - start->tv_usec); +} + +static double measure(void) +{ + long long i, iterations; + int result; + struct timeval start, stop; + double elapsed; + + add1(0, 0); /* prepare off-line */ + + i = 0; + iterations = 1000; + result = gettimeofday(&start, NULL); + assert(result == 0); + + while (1) { + for (; i < iterations; i++) { + add1(((int)i) & 0xaaaaaa, ((int)i) & 0x555555); + } + result = gettimeofday(&stop, NULL); + assert(result == 0); + + elapsed = time_delta(&stop, &start); + assert(elapsed >= 0.0); + if (elapsed > 2.5) + break; + iterations = iterations * 3 / 2; + } + + return elapsed / (double)iterations; +} + +static void *start_routine(void *arg) +{ + double t = measure(); + printf("time per call: %.3g\n", t); + +#ifdef PTEST_USE_THREAD + pthread_mutex_lock(&mutex1); + remaining -= 1; + if (!remaining) + pthread_cond_signal(&cond1); + pthread_mutex_unlock(&mutex1); +#endif + + return arg; +} + + +int main(void) +{ +#ifndef PTEST_USE_THREAD + start_routine(0); +#else + pthread_t th; + int i, status; + + add1(0, 0); /* this is the main thread */ + + remaining = PTEST_USE_THREAD; + for (i = 0; i < PTEST_USE_THREAD; i++) { + status = pthread_create(&th, NULL, start_routine, NULL); + assert(status == 0); + } + pthread_mutex_lock(&mutex1); + while (remaining) + pthread_cond_wait(&cond1, &mutex1); + pthread_mutex_unlock(&mutex1); +#endif + return 0; +} diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,186 @@ +# Generated by pypy/tool/import_cffi.py +import py +import sys, os, re +import shutil, subprocess, time +from pypy.module.test_lib_pypy.cffi_tests.udir import udir +import cffi + + +local_dir = os.path.dirname(os.path.abspath(__file__)) +_link_error = '?' + +def check_lib_python_found(tmpdir): + global _link_error + if _link_error == '?': + ffi = cffi.FFI() + kwds = {} + ffi._apply_embedding_fix(kwds) + ffi.set_source("_test_lib_python_found", "", **kwds) + try: + ffi.compile(tmpdir=tmpdir, verbose=True) + except cffi.VerificationError as e: + _link_error = e + else: + _link_error = None + if _link_error: + py.test.skip(str(_link_error)) + + +def prefix_pythonpath(): + cffi_base = os.path.dirname(os.path.dirname(local_dir)) + pythonpath = org_env.get('PYTHONPATH', '').split(os.pathsep) + if cffi_base not in pythonpath: + pythonpath.insert(0, cffi_base) + return os.pathsep.join(pythonpath) + +def copy_away_env(): + global org_env + try: + org_env + except NameError: + org_env = os.environ.copy() + + +class EmbeddingTests: + _compiled_modules = {} + + def setup_method(self, meth): + check_lib_python_found(str(udir.ensure('embedding', dir=1))) + self._path = udir.join('embedding', meth.__name__) + if sys.platform == "win32" or sys.platform == "darwin": + self._compiled_modules.clear() # workaround + + def get_path(self): + return str(self._path.ensure(dir=1)) + + def _run_base(self, args, **kwds): + print('RUNNING:', args, kwds) + return subprocess.Popen(args, **kwds) + + def _run(self, args): + popen = self._run_base(args, cwd=self.get_path(), + stdout=subprocess.PIPE, + universal_newlines=True) + output = popen.stdout.read() + err = popen.wait() + if err: + raise OSError("popen failed with exit code %r: %r" % ( + err, args)) + print(output.rstrip()) + return output + + def prepare_module(self, name): + self.patch_environment() + if name not in self._compiled_modules: + path = self.get_path() + filename = '%s.py' % name + # NOTE: if you have an .egg globally installed with an older + # version of cffi, this will not work, because sys.path ends + # up with the .egg before the PYTHONPATH entries. I didn't + # find a solution to that: we could hack sys.path inside the + # script run here, but we can't hack it in the same way in + # execute(). + output = self._run([sys.executable, + os.path.join(local_dir, filename)]) + match = re.compile(r"\bFILENAME: (.+)").search(output) + assert match + dynamic_lib_name = match.group(1) + if sys.platform == 'win32': + assert dynamic_lib_name.endswith('_cffi.dll') + elif sys.platform == 'darwin': + assert dynamic_lib_name.endswith('_cffi.dylib') + else: + assert dynamic_lib_name.endswith('_cffi.so') + self._compiled_modules[name] = dynamic_lib_name + return self._compiled_modules[name] + + def compile(self, name, modules, opt=False, threads=False, defines={}): + path = self.get_path() + filename = '%s.c' % name + shutil.copy(os.path.join(local_dir, filename), path) + shutil.copy(os.path.join(local_dir, 'thread-test.h'), path) + import distutils.ccompiler + curdir = os.getcwd() + try: + os.chdir(self.get_path()) + c = distutils.ccompiler.new_compiler() + print('compiling %s with %r' % (name, modules)) + extra_preargs = [] + debug = True + if sys.platform == 'win32': + libfiles = [] + for m in modules: + m = os.path.basename(m) + assert m.endswith('.dll') + libfiles.append('Release\\%s.lib' % m[:-4]) + modules = libfiles + extra_preargs.append('/MANIFEST') + debug = False # you need to install extra stuff + # for this to work + elif threads: + extra_preargs.append('-pthread') + objects = c.compile([filename], macros=sorted(defines.items()), + debug=debug) + c.link_executable(objects + modules, name, extra_preargs=extra_preargs) + finally: + os.chdir(curdir) + + def patch_environment(self): + copy_away_env() + path = self.get_path() + # for libpypy-c.dll or Python27.dll + path = os.path.split(sys.executable)[0] + os.path.pathsep + path + env_extra = {'PYTHONPATH': prefix_pythonpath()} + if sys.platform == 'win32': + envname = 'PATH' + else: + envname = 'LD_LIBRARY_PATH' + libpath = org_env.get(envname) + if libpath: + libpath = path + os.path.pathsep + libpath + else: + libpath = path + env_extra[envname] = libpath + for key, value in sorted(env_extra.items()): + if os.environ.get(key) != value: + print '* setting env var %r to %r' % (key, value) + os.environ[key] = value + + def execute(self, name): + path = self.get_path() + print('running %r in %r' % (name, path)) + executable_name = name + if sys.platform == 'win32': + executable_name = os.path.join(path, executable_name + '.exe') + else: + executable_name = os.path.join('.', executable_name) + popen = self._run_base([executable_name], cwd=path, + stdout=subprocess.PIPE, + universal_newlines=True) + result = popen.stdout.read() + err = popen.wait() + if err: + raise OSError("%r failed with exit code %r" % (name, err)) + return result + + +class TestBasic(EmbeddingTests): + def test_basic(self): + add1_cffi = self.prepare_module('add1') + self.compile('add1-test', [add1_cffi]) + output = self.execute('add1-test') + assert output == ("preparing...\n" + "adding 40 and 2\n" + "adding 100 and -5\n" + "got: 42 95\n") + + def test_two_modules(self): + add1_cffi = self.prepare_module('add1') + add2_cffi = self.prepare_module('add2') + self.compile('add2-test', [add1_cffi, add2_cffi]) + output = self.execute('add2-test') + assert output == ("preparing...\n" + "adding 40 and 2\n" + "prepADD2\n" + "adding 100 and -5 and -20\n" + "got: 42 75\n") diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_performance.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_performance.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_performance.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_performance.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,53 @@ +# Generated by pypy/tool/import_cffi.py +import sys +from pypy.module.test_lib_pypy.cffi_tests.embedding.test_basic import EmbeddingTests + +if sys.platform == 'win32': + import py + py.test.skip("written with POSIX functions") + + +class TestPerformance(EmbeddingTests): + def test_perf_single_threaded(self): + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True) + output = self.execute('perf-test') + print('='*79) + print(output.rstrip()) + print('='*79) + + def test_perf_in_1_thread(self): + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '1'}) + output = self.execute('perf-test') + print('='*79) + print(output.rstrip()) + print('='*79) + + def test_perf_in_2_threads(self): + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '2'}) + output = self.execute('perf-test') + print('='*79) + print(output.rstrip()) + print('='*79) + + def test_perf_in_4_threads(self): + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '4'}) + output = self.execute('perf-test') + print('='*79) + print(output.rstrip()) + print('='*79) + + def test_perf_in_8_threads(self): + perf_cffi = self.prepare_module('perf') + self.compile('perf-test', [perf_cffi], opt=True, threads=True, + defines={'PTEST_USE_THREAD': '8'}) + output = self.execute('perf-test') + print('='*79) + print(output.rstrip()) + print('='*79) diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_recursive.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_recursive.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_recursive.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_recursive.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,16 @@ +# Generated by pypy/tool/import_cffi.py +from pypy.module.test_lib_pypy.cffi_tests.embedding.test_basic import EmbeddingTests + + +class TestRecursive(EmbeddingTests): + def test_recursive(self): + add_recursive_cffi = self.prepare_module('add_recursive') + self.compile('add_recursive-test', [add_recursive_cffi]) + output = self.execute('add_recursive-test') + assert output == ("preparing REC\n" + "some_callback(400)\n" + "adding 400 and 9\n" + "<<< 409 >>>\n" + "adding 40 and 2\n" + "adding 100 and -5\n" + "got: 42 95\n") diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_thread.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_thread.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_thread.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_thread.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,62 @@ +# Generated by pypy/tool/import_cffi.py +from pypy.module.test_lib_pypy.cffi_tests.embedding.test_basic import EmbeddingTests + + +class TestThread(EmbeddingTests): + def test_first_calls_in_parallel(self): + add1_cffi = self.prepare_module('add1') + self.compile('thread1-test', [add1_cffi], threads=True) + for i in range(50): + output = self.execute('thread1-test') + assert output == ("starting\n" + "preparing...\n" + + "adding 40 and 2\n" * 10 + + "done\n") + + def _take_out(self, text, content): + assert content in text + i = text.index(content) + return text[:i] + text[i+len(content):] + + def test_init_different_modules_in_different_threads(self): + add1_cffi = self.prepare_module('add1') + add2_cffi = self.prepare_module('add2') + self.compile('thread2-test', [add1_cffi, add2_cffi], threads=True) + output = self.execute('thread2-test') + output = self._take_out(output, "preparing") + output = self._take_out(output, ".") + output = self._take_out(output, ".") + # at least the 3rd dot should be after everything from ADD2 + assert output == ("starting\n" + "prepADD2\n" + "adding 1000 and 200 and 30\n" + ".\n" + "adding 40 and 2\n" + "done\n") + + def test_alt_issue(self): + add1_cffi = self.prepare_module('add1') + add2_cffi = self.prepare_module('add2') + self.compile('thread2-test', [add1_cffi, add2_cffi], + threads=True, defines={'T2TEST_AGAIN_ADD1': '1'}) + output = self.execute('thread2-test') + output = self._take_out(output, "adding 40 and 2\n") + assert output == ("starting\n" + "preparing...\n" + "adding -1 and -1\n" + "prepADD2\n" + "adding 1000 and 200 and 30\n" + "done\n") + + def test_load_in_parallel_more(self): + add2_cffi = self.prepare_module('add2') + add3_cffi = self.prepare_module('add3') + self.compile('thread3-test', [add2_cffi, add3_cffi], threads=True) + for i in range(150): + output = self.execute('thread3-test') + for j in range(10): + output = self._take_out(output, "adding 40 and 2 and 100\n") + output = self._take_out(output, "adding 1000, 200, 30, 4\n") + assert output == ("starting\n" + "prepADD2\n" + "done\n") diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_tlocal.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_tlocal.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_tlocal.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/test_tlocal.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,11 @@ +# Generated by pypy/tool/import_cffi.py +from pypy.module.test_lib_pypy.cffi_tests.embedding.test_basic import EmbeddingTests + + +class TestThreadLocal(EmbeddingTests): + def test_thread_local(self): + tlocal_cffi = self.prepare_module('tlocal') + self.compile('tlocal-test', [tlocal_cffi], threads=True) + for i in range(10): + output = self.execute('tlocal-test') + assert output == "done\n" diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,44 @@ +/* Generated by pypy/tool/import_cffi.py */ +#include +#include +#include "thread-test.h" + +#define NTHREADS 10 + + +extern int add1(int, int); + +static sem_t done; + + +static void *start_routine(void *arg) +{ + int x, status; + x = add1(40, 2); + assert(x == 42); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + printf("starting\n"); + fflush(stdout); + for (i = 0; i < NTHREADS; i++) { + status = pthread_create(&th, NULL, start_routine, NULL); + assert(status == 0); + } + for (i = 0; i < NTHREADS; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,58 @@ +/* Generated by pypy/tool/import_cffi.py */ +#include +#include +#include "thread-test.h" + +extern int add1(int, int); +extern int add2(int, int, int); + +static sem_t done; + + +static void *start_routine_1(void *arg) +{ + int x, status; + x = add1(40, 2); + assert(x == 42); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +static void *start_routine_2(void *arg) +{ + int x, status; +#ifdef T2TEST_AGAIN_ADD1 + add1(-1, -1); +#endif + x = add2(1000, 200, 30); + assert(x == 1230); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + printf("starting\n"); + fflush(stdout); + status = pthread_create(&th, NULL, start_routine_1, NULL); + assert(status == 0); + status = pthread_create(&th, NULL, start_routine_2, NULL); + assert(status == 0); + + for (i = 0; i < 2; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,56 @@ +/* Generated by pypy/tool/import_cffi.py */ +#include +#include +#include "thread-test.h" + +extern int add2(int, int, int); +extern int add3(int, int, int, int); + +static sem_t done; + + +static void *start_routine_2(void *arg) +{ + int x, status; + x = add2(40, 2, 100); + assert(x == 142); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +static void *start_routine_3(void *arg) +{ + int x, status; + x = add3(1000, 200, 30, 4); + assert(x == 1234); + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + printf("starting\n"); + fflush(stdout); + for (i = 0; i < 10; i++) { + status = pthread_create(&th, NULL, start_routine_2, NULL); + assert(status == 0); + status = pthread_create(&th, NULL, start_routine_3, NULL); + assert(status == 0); + } + for (i = 0; i < 20; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,97 @@ +/* Generated by pypy/tool/import_cffi.py */ +/************************************************************/ +#ifndef _MSC_VER +/************************************************************/ + + +#include + +/* don't include , it is not available on OS/X */ + +typedef struct { + pthread_mutex_t mutex1; + pthread_cond_t cond1; + unsigned int value; +} sem_t; + +static int sem_init(sem_t *sem, int pshared, unsigned int value) +{ + assert(pshared == 0); + sem->value = value; + return (pthread_mutex_init(&sem->mutex1, NULL) || + pthread_cond_init(&sem->cond1, NULL)); +} + +static int sem_post(sem_t *sem) +{ + pthread_mutex_lock(&sem->mutex1); + sem->value += 1; + pthread_cond_signal(&sem->cond1); + pthread_mutex_unlock(&sem->mutex1); + return 0; +} + +static int sem_wait(sem_t *sem) +{ + pthread_mutex_lock(&sem->mutex1); + while (sem->value == 0) + pthread_cond_wait(&sem->cond1, &sem->mutex1); + sem->value -= 1; + pthread_mutex_unlock(&sem->mutex1); + return 0; +} + + +/************************************************************/ +#else +/************************************************************/ + + +/* Very quick and dirty, just what I need for these tests. + Don't use directly in any real code! +*/ + +#include +#include + +typedef HANDLE sem_t; +typedef HANDLE pthread_t; + +static int sem_init(sem_t *sem, int pshared, unsigned int value) +{ + assert(pshared == 0); + assert(value == 0); + *sem = CreateSemaphore(NULL, 0, 999, NULL); + return *sem ? 0 : -1; +} + +static int sem_post(sem_t *sem) +{ + return ReleaseSemaphore(*sem, 1, NULL) ? 0 : -1; +} + +static int sem_wait(sem_t *sem) +{ + WaitForSingleObject(*sem, INFINITE); + return 0; +} + +static DWORD WINAPI myThreadProc(LPVOID lpParameter) +{ + void *(* start_routine)(void *) = (void *(*)(void *))lpParameter; + start_routine(NULL); + return 0; +} + +static int pthread_create(pthread_t *thread, void *attr, + void *start_routine(void *), void *arg) +{ + assert(arg == NULL); + *thread = CreateThread(NULL, 0, myThreadProc, start_routine, 0, NULL); + return *thread ? 0 : -1; +} + + +/************************************************************/ +#endif +/************************************************************/ diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,34 @@ +# Generated by pypy/tool/import_cffi.py +import cffi + +ffi = cffi.FFI() + +ffi.embedding_api(""" + int add1(int, int); +""") + +ffi.embedding_init_code(r""" + from _tlocal_cffi import ffi + import itertools + try: + import thread + g_seen = itertools.count().next + except ImportError: + import _thread as thread # py3 + g_seen = itertools.count().__next__ + tloc = thread._local() + + @ffi.def_extern() + def add1(x, y): + try: + num = tloc.num + except AttributeError: + num = tloc.num = g_seen() * 1000 + return x + y + num +""") + +ffi.set_source("_tlocal_cffi", """ +""") + +fn = ffi.compile(verbose=True) +print('FILENAME: %s' % (fn,)) diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,48 @@ +/* Generated by pypy/tool/import_cffi.py */ +#include +#include +#include "thread-test.h" + +#define NTHREADS 10 + + +extern int add1(int, int); + +static sem_t done; + + +static void *start_routine(void *arg) +{ + int i, x, expected, status; + + expected = add1(40, 2); + assert((expected % 1000) == 42); + + for (i=0; i<10; i++) { + x = add1(50, i); + assert(x == expected + 8 + i); + } + + status = sem_post(&done); + assert(status == 0); + + return arg; +} + +int main(void) +{ + pthread_t th; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); + + for (i = 0; i < NTHREADS; i++) { + status = pthread_create(&th, NULL, start_routine, NULL); + assert(status == 0); + } + for (i = 0; i < NTHREADS; i++) { + status = sem_wait(&done); + assert(status == 0); + } + printf("done\n"); + return 0; +} diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/support.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/support.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/support.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/cffi_tests/support.py 2016-03-19 16:40:12.000000000 +0000 @@ -9,6 +9,7 @@ return eval('u'+repr(other).replace(r'\\u', r'\u') .replace(r'\\U', r'\U')) u = U() + long = long # for further "from pypy.module.test_lib_pypy.cffi_tests.support import long" assert u+'a\x00b' == eval(r"u'a\x00b'") assert u+'a\u1234b' == eval(r"u'a\u1234b'") assert u+'a\U00012345b' == eval(r"u'a\U00012345b'") @@ -18,3 +19,46 @@ u = "" unicode = str long = int + + +class StdErrCapture(object): + """Capture writes to sys.stderr (not to the underlying file descriptor).""" + def __enter__(self): + try: + from StringIO import StringIO + except ImportError: + from io import StringIO + self.old_stderr = sys.stderr + sys.stderr = f = StringIO() + return f + def __exit__(self, *args): + sys.stderr = self.old_stderr + + +class FdWriteCapture(object): + """xxx limited to capture at most 512 bytes of output, according + to the Posix manual.""" + + def __init__(self, capture_fd=2): # stderr by default + if sys.platform == 'win32': + import py + py.test.skip("seems not to work, too bad") + self.capture_fd = capture_fd + + def __enter__(self): + import os + self.read_fd, self.write_fd = os.pipe() + self.copy_fd = os.dup(self.capture_fd) + os.dup2(self.write_fd, self.capture_fd) + return self + + def __exit__(self, *args): + import os + os.dup2(self.copy_fd, self.capture_fd) + os.close(self.copy_fd) + os.close(self.write_fd) + self._value = os.read(self.read_fd, 512) + os.close(self.read_fd) + + def getvalue(self): + return self._value diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/test_cPickle.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/test_cPickle.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/test_cPickle.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/test_cPickle.py 2016-03-19 16:40:12.000000000 +0000 @@ -5,3 +5,7 @@ def test_stack_underflow(): py.test.raises(cPickle.UnpicklingError, cPickle.loads, "a string") + +def test_bad_key(): + e = py.test.raises(cPickle.UnpicklingError, cPickle.loads, "v") + assert str(e.value) == "invalid load key, 'v'." diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/test_datetime.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/test_datetime.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/test_datetime.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/test_datetime.py 2016-03-19 16:40:12.000000000 +0000 @@ -170,14 +170,23 @@ self.value = value def __int__(self): return self.value + class SubInt(int): pass + class SubLong(long): pass + dt10 = datetime.datetime(10, 10, 10, 10, 10, 10, 10) for xx in [10L, decimal.Decimal(10), decimal.Decimal('10.9'), Number(10), - Number(10L)]: - assert datetime.datetime(10, 10, 10, 10, 10, 10, 10) == \ - datetime.datetime(xx, xx, xx, xx, xx, xx, xx) + Number(10L), + SubInt(10), + SubLong(10), + Number(SubInt(10)), + Number(SubLong(10))]: + dtxx = datetime.datetime(xx, xx, xx, xx, xx, xx, xx) + assert dt10 == dtxx + assert type(dtxx.month) is int + assert type(dtxx.second) is int with py.test.raises(TypeError) as e: datetime.datetime(10, 10, '10') @@ -242,6 +251,70 @@ naive == aware assert str(e.value) == "can't compare offset-naive and offset-aware times" + def test_future_types_newint(self): + try: + from future.types.newint import newint + except ImportError: + py.test.skip('requires future') + + dt_from_ints = datetime.datetime(2015, 12, 31, 12, 34, 56) + dt_from_newints = datetime.datetime(newint(2015), newint(12), newint(31), newint(12), newint(34), newint(56)) + dt_from_mixed = datetime.datetime(2015, newint(12), 31, newint(12), 34, newint(56)) + assert dt_from_ints == dt_from_newints + assert dt_from_newints == dt_from_mixed + assert dt_from_mixed == dt_from_ints + + d_from_int = datetime.date.fromtimestamp(1431216000) + d_from_newint = datetime.date.fromtimestamp(newint(1431216000)) + assert d_from_int == d_from_newint + + dt_from_int = datetime.datetime.fromtimestamp(1431216000) + dt_from_newint = datetime.datetime.fromtimestamp(newint(1431216000)) + assert dt_from_int == dt_from_newint + + dtu_from_int = datetime.datetime.utcfromtimestamp(1431216000) + dtu_from_newint = datetime.datetime.utcfromtimestamp(newint(1431216000)) + assert dtu_from_int == dtu_from_newint + + td_from_int = datetime.timedelta(16565) + tds_from_int = datetime.timedelta(seconds=1431216000) + td_from_newint = datetime.timedelta(newint(16565)) + tds_from_newint = datetime.timedelta(seconds=newint(1431216000)) + assert td_from_int == tds_from_int + assert td_from_int == td_from_newint + assert td_from_int == tds_from_newint + assert tds_from_int == td_from_newint + assert tds_from_int == tds_from_newint + assert td_from_newint == tds_from_newint + + td_mul_int_int = td_from_int * 2 + td_mul_int_newint = td_from_int * newint(2) + td_mul_newint_int = td_from_newint * 2 + td_mul_newint_newint = td_from_newint * newint(2) + assert td_mul_int_int == td_mul_int_newint + assert td_mul_int_int == td_mul_newint_int + assert td_mul_int_int == td_mul_newint_newint + assert td_mul_int_newint == td_mul_newint_int + assert td_mul_int_newint == td_mul_newint_newint + assert td_mul_newint_int == td_mul_newint_newint + + td_div_int_int = td_from_int / 3600 + td_div_int_newint = td_from_int / newint(3600) + td_div_newint_int = td_from_newint / 3600 + td_div_newint_newint = td_from_newint / newint(3600) + assert td_div_int_int == td_div_int_newint + assert td_div_int_int == td_div_newint_int + assert td_div_int_int == td_div_newint_newint + assert td_div_int_newint == td_div_newint_int + assert td_div_int_newint == td_div_newint_newint + assert td_div_newint_int == td_div_newint_newint + + def test_return_types(self): + td = datetime.timedelta(5) + assert type(td.total_seconds()) is float + class sub(datetime.timedelta): pass + assert type(+sub()) is datetime.timedelta + class TestDatetimeHost(BaseTestDatetime): def setup_class(cls): diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/test_greenlet_tracing.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/test_greenlet_tracing.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/test_greenlet_tracing.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/test_greenlet_tracing.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,53 @@ +import py +try: + from lib_pypy import greenlet +except ImportError, e: + py.test.skip(e) + +class SomeError(Exception): + pass + +class TestTracing: + def test_greenlet_tracing(self): + main = greenlet.getcurrent() + actions = [] + def trace(*args): + actions.append(args) + def dummy(): + pass + def dummyexc(): + raise SomeError() + oldtrace = greenlet.settrace(trace) + try: + g1 = greenlet.greenlet(dummy) + g1.switch() + g2 = greenlet.greenlet(dummyexc) + py.test.raises(SomeError, g2.switch) + finally: + greenlet.settrace(oldtrace) + assert actions == [ + ('switch', (main, g1)), + ('switch', (g1, main)), + ('switch', (main, g2)), + ('throw', (g2, main)), + ] + + def test_exception_disables_tracing(self): + main = greenlet.getcurrent() + actions = [] + def trace(*args): + actions.append(args) + raise SomeError() + def dummy(): + main.switch() + g = greenlet.greenlet(dummy) + g.switch() + oldtrace = greenlet.settrace(trace) + try: + py.test.raises(SomeError, g.switch) + assert greenlet.gettrace() is None + finally: + greenlet.settrace(oldtrace) + assert actions == [ + ('switch', (main, g)), + ] diff -Nru pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/test_json_extra.py pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/test_json_extra.py --- pypy-4.0.1+dfsg/pypy/module/test_lib_pypy/test_json_extra.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/test_lib_pypy/test_json_extra.py 2016-03-19 16:40:12.000000000 +0000 @@ -12,3 +12,6 @@ e = py.test.raises(UnicodeDecodeError, json.dumps, ("\xc0", u"\u1234"), ensure_ascii=False) assert str(e.value).startswith("'ascii' codec can't decode byte 0xc0 ") + +def test_issue2191(): + assert is_(json.dumps(u"xxx", ensure_ascii=False), u'"xxx"') diff -Nru pypy-4.0.1+dfsg/pypy/module/thread/gil.py pypy-5.0.1+dfsg/pypy/module/thread/gil.py --- pypy-4.0.1+dfsg/pypy/module/thread/gil.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/thread/gil.py 2016-03-19 16:40:12.000000000 +0000 @@ -11,7 +11,6 @@ from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import PeriodicAsyncAction from pypy.module.thread.threadlocals import OSThreadLocals -from rpython.rlib.objectmodel import invoke_around_extcall class GILThreadLocals(OSThreadLocals): """A version of OSThreadLocals that enforces a GIL.""" @@ -23,34 +22,21 @@ space.actionflag.register_periodic_action(GILReleaseAction(space), use_bytecode_counter=True) - def _initialize_gil(self, space): - rgil.gil_allocate() - def setup_threads(self, space): """Enable threads in the object space, if they haven't already been.""" if not self.gil_ready: - self._initialize_gil(space) + # Note: this is a quasi-immutable read by module/pypyjit/interp_jit + # It must be changed (to True) only if it was really False before + rgil.allocate() self.gil_ready = True result = True else: result = False # already set up - - # add the GIL-releasing callback around external function calls. - # - # XXX we assume a single space, but this is not quite true during - # testing; for example, if you run the whole of test_lock you get - # a deadlock caused by the first test's space being reused by - # test_lock_again after the global state was cleared by - # test_compile_lock. As a workaround, we repatch these global - # fields systematically. - invoke_around_extcall(before_external_call, after_external_call) return result - def reinit_threads(self, space): - "Called in the child process after a fork()" - OSThreadLocals.reinit_threads(self, space) - if self.gil_ready: # re-initialize the gil if needed - self._initialize_gil(space) + ## def reinit_threads(self, space): + ## "Called in the child process after a fork()" + ## OSThreadLocals.reinit_threads(self, space) class GILReleaseAction(PeriodicAsyncAction): @@ -59,43 +45,4 @@ """ def perform(self, executioncontext, frame): - do_yield_thread() - - -after_thread_switch = lambda: None # hook for signal.py - -def before_external_call(): - # this function must not raise, in such a way that the exception - # transformer knows that it cannot raise! - rgil.gil_release() -before_external_call._gctransformer_hint_cannot_collect_ = True -before_external_call._dont_reach_me_in_del_ = True - -def after_external_call(): - rgil.gil_acquire() - rthread.gc_thread_run() - after_thread_switch() -after_external_call._gctransformer_hint_cannot_collect_ = True -after_external_call._dont_reach_me_in_del_ = True - -# The _gctransformer_hint_cannot_collect_ hack is needed for -# translations in which the *_external_call() functions are not inlined. -# They tell the gctransformer not to save and restore the local GC -# pointers in the shadow stack. This is necessary because the GIL is -# not held after the call to before_external_call() or before the call -# to after_external_call(). - -def do_yield_thread(): - # explicitly release the gil, in a way that tries to give more - # priority to other threads (as opposed to continuing to run in - # the same thread). - if rgil.gil_yield_thread(): - rthread.gc_thread_run() - after_thread_switch() -do_yield_thread._gctransformer_hint_close_stack_ = True -do_yield_thread._dont_reach_me_in_del_ = True -do_yield_thread._dont_inline_ = True - -# do_yield_thread() needs a different hint: _gctransformer_hint_close_stack_. -# The *_external_call() functions are themselves called only from the rffi -# module from a helper function that also has this hint. + rgil.yield_thread() diff -Nru pypy-4.0.1+dfsg/pypy/module/thread/__init__.py pypy-5.0.1+dfsg/pypy/module/thread/__init__.py --- pypy-4.0.1+dfsg/pypy/module/thread/__init__.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/thread/__init__.py 2016-03-19 16:40:12.000000000 +0000 @@ -27,7 +27,7 @@ from pypy.module.thread import gil MixedModule.__init__(self, space, *args) prev_ec = space.threadlocals.get_ec() - space.threadlocals = gil.GILThreadLocals() + space.threadlocals = gil.GILThreadLocals(space) space.threadlocals.initialize(space) if prev_ec is not None: space.threadlocals._set_ec(prev_ec) diff -Nru pypy-4.0.1+dfsg/pypy/module/thread/test/support.py pypy-5.0.1+dfsg/pypy/module/thread/test/support.py --- pypy-4.0.1+dfsg/pypy/module/thread/test/support.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/thread/test/support.py 2016-03-19 16:40:12.000000000 +0000 @@ -5,7 +5,7 @@ import errno from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.module.thread import gil +from rpython.rlib import rgil NORMAL_TIMEOUT = 300.0 # 5 minutes @@ -15,9 +15,9 @@ adaptivedelay = 0.04 limit = time.time() + delay * NORMAL_TIMEOUT while time.time() <= limit: - gil.before_external_call() + rgil.release() time.sleep(adaptivedelay) - gil.after_external_call() + rgil.acquire() gc.collect() if space.is_true(space.call_function(w_condition)): return diff -Nru pypy-4.0.1+dfsg/pypy/module/thread/test/test_gil.py pypy-5.0.1+dfsg/pypy/module/thread/test/test_gil.py --- pypy-4.0.1+dfsg/pypy/module/thread/test/test_gil.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/thread/test/test_gil.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,5 +1,6 @@ import time from pypy.module.thread import gil +from rpython.rlib import rgil from rpython.rlib.test import test_rthread from rpython.rlib import rthread as thread from rpython.rlib.objectmodel import we_are_translated @@ -55,7 +56,7 @@ assert state.datalen3 == len(state.data) assert state.datalen4 == len(state.data) debug_print(main, i, state.datalen4) - gil.do_yield_thread() + rgil.yield_thread() assert i == j j += 1 def bootstrap(): @@ -64,7 +65,7 @@ except Exception, e: assert 0 thread.gc_thread_die() - my_gil_threadlocals = gil.GILThreadLocals() + my_gil_threadlocals = gil.GILThreadLocals(space) def f(): state.data = [] state.datalen1 = 0 @@ -82,9 +83,9 @@ if not still_waiting: raise ValueError("time out") still_waiting -= 1 - if not we_are_translated(): gil.before_external_call() + if not we_are_translated(): rgil.release() time.sleep(0.01) - if not we_are_translated(): gil.after_external_call() + if not we_are_translated(): rgil.acquire() debug_print("leaving!") i1 = i2 = 0 for tid, i in state.data: diff -Nru pypy-4.0.1+dfsg/pypy/module/thread/test/test_thread.py pypy-5.0.1+dfsg/pypy/module/thread/test/test_thread.py --- pypy-4.0.1+dfsg/pypy/module/thread/test/test_thread.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/thread/test/test_thread.py 2016-03-19 16:40:12.000000000 +0000 @@ -239,14 +239,12 @@ if waiting: thread.interrupt_main() return - print 'tock...', x # <-force the GIL to be released, as - time.sleep(0.1) # time.sleep doesn't do non-translated + time.sleep(0.1) def busy_wait(): waiting.append(None) for x in range(50): - print 'tick...', x # <-force the GIL to be released, as - time.sleep(0.1) # time.sleep doesn't do non-translated + time.sleep(0.1) waiting.pop() # This is normally called by app_main.py diff -Nru pypy-4.0.1+dfsg/pypy/module/thread/threadlocals.py pypy-5.0.1+dfsg/pypy/module/thread/threadlocals.py --- pypy-4.0.1+dfsg/pypy/module/thread/threadlocals.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/thread/threadlocals.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,5 +1,7 @@ -from rpython.rlib import rthread +import weakref +from rpython.rlib import rthread, rshrinklist from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.rarithmetic import r_ulonglong from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import ExecutionContext @@ -13,15 +15,51 @@ a thread finishes. This works as long as the thread was started by os_thread.bootstrap().""" - def __init__(self): + def __init__(self, space): "NOT_RPYTHON" - self._valuedict = {} # {thread_ident: ExecutionContext()} + # + # This object tracks code that enters and leaves threads. + # There are two APIs. For Python-level threads, we know when + # the thread starts and ends, and we call enter_thread() and + # leave_thread(). In a few other cases, like callbacks, we + # might be running in some never-seen-before thread: in this + # case, the callback logic needs to call try_enter_thread() at + # the start, and if this returns True it needs to call + # leave_thread() at the end. + # + # We implement an optimization for the second case (which only + # works if we translate with a framework GC and with + # rweakref). If try_enter_thread() is called in a + # never-seen-before thread, it still returns False and + # remembers the ExecutionContext with 'self._weaklist'. The + # next time we call try_enter_thread() again in the same + # thread, the ExecutionContext is reused. The optimization is + # not completely invisible to the user: 'thread._local()' + # values will remain. We can argue that it is the correct + # behavior to do that, and the behavior we get if the + # optimization is disabled is buggy (but hard to do better + # then). + # + # 'self._valuedict' is a dict mapping the thread idents to + # ExecutionContexts; it does not list the ExecutionContexts + # which are in 'self._weaklist'. (The latter is more precisely + # a list of AutoFreeECWrapper objects, defined below, which + # each references the ExecutionContext.) + # + self.space = space + self._valuedict = {} self._cleanup_() self.raw_thread_local = rthread.ThreadLocalReference(ExecutionContext, loop_invariant=True) + def can_optimize_with_weaklist(self): + config = self.space.config + return (config.translation.rweakref and + rthread.ThreadLocalReference.automatic_keepalive(config)) + def _cleanup_(self): self._valuedict.clear() + self._weaklist = None self._mainthreadident = 0 def enter_thread(self, space): @@ -29,19 +67,35 @@ self._set_ec(space.createexecutioncontext()) def try_enter_thread(self, space): - if rthread.get_ident() in self._valuedict: + # common case: the thread-local has already got a value + if self.raw_thread_local.get() is not None: return False - self.enter_thread(space) - return True - def _set_ec(self, ec): + # Else, make and attach a new ExecutionContext + ec = space.createexecutioncontext() + if not self.can_optimize_with_weaklist(): + self._set_ec(ec) + return True + + # If can_optimize_with_weaklist(), then 'rthread' keeps the + # thread-local values alive until the end of the thread. Use + # AutoFreeECWrapper as an object with a __del__; when this + # __del__ is called, it means the thread was really finished. + # In this case we don't want leave_thread() to be called + # explicitly, so we return False. + if self._weaklist is None: + self._weaklist = ListECWrappers() + self._weaklist.append(weakref.ref(AutoFreeECWrapper(ec))) + self._set_ec(ec, register_in_valuedict=False) + return False + + def _set_ec(self, ec, register_in_valuedict=True): ident = rthread.get_ident() if self._mainthreadident == 0 or self._mainthreadident == ident: ec._signals_enabled = 1 # the main thread is enabled self._mainthreadident = ident - self._valuedict[ident] = ec - # This logic relies on hacks and _make_sure_does_not_move(). - # It only works because we keep the 'ec' alive in '_valuedict' too. + if register_in_valuedict: + self._valuedict[ident] = ec self.raw_thread_local.set(ec) def leave_thread(self, space): @@ -84,7 +138,23 @@ ec._signals_enabled = new def getallvalues(self): - return self._valuedict + if self._weaklist is None: + return self._valuedict + # This logic walks the 'self._weaklist' list and adds the + # ExecutionContexts to 'result'. We are careful in case there + # are two AutoFreeECWrappers in the list which have the same + # 'ident'; in this case we must keep the most recent one (the + # older one should be deleted soon). Moreover, entries in + # self._valuedict have priority because they are never + # outdated. + result = {} + for h in self._weaklist.items(): + wrapper = h() + if wrapper is not None and not wrapper.deleted: + result[wrapper.ident] = wrapper.ec + # ^^ this possibly overwrites an older ec + result.update(self._valuedict) + return result def reinit_threads(self, space): "Called in the child process after a fork()" @@ -94,7 +164,31 @@ old_sig = ec._signals_enabled if ident != self._mainthreadident: old_sig += 1 - self._cleanup_() + self._cleanup_() # clears self._valuedict self._mainthreadident = ident self._set_ec(ec) ec._signals_enabled = old_sig + + +class AutoFreeECWrapper(object): + deleted = False + + def __init__(self, ec): + # this makes a loop between 'self' and 'ec'. It should not prevent + # the __del__ method here from being called. + self.ec = ec + ec._threadlocals_auto_free = self + self.ident = rthread.get_ident() + + def __del__(self): + from pypy.module.thread.os_local import thread_is_stopping + # this is always called in another thread: the thread + # referenced by 'self.ec' has finished at that point, and + # we're just after the GC which finds no more references to + # 'ec' (and thus to 'self'). + self.deleted = True + thread_is_stopping(self.ec) + +class ListECWrappers(rshrinklist.AbstractShrinkList): + def must_keep(self, wref): + return wref() is not None diff -Nru pypy-4.0.1+dfsg/pypy/module/time/interp_time.py pypy-5.0.1+dfsg/pypy/module/time/interp_time.py --- pypy-4.0.1+dfsg/pypy/module/time/interp_time.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/time/interp_time.py 2016-03-19 16:40:12.000000000 +0000 @@ -4,7 +4,7 @@ from pypy.interpreter.gateway import unwrap_spec from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rarithmetic import intmask -from rpython.rlib import rposix +from rpython.rlib import rposix, rtime from rpython.translator.tool.cbuild import ExternalCompilationInfo import os import sys @@ -316,13 +316,13 @@ if secs < 0: raise OperationError(space.w_IOError, space.wrap("Invalid argument: negative time in sleep")) - pytime.sleep(secs) + rtime.sleep(secs) else: from rpython.rlib import rwin32 from errno import EINTR def _simple_sleep(space, secs, interruptible): if secs == 0.0 or not interruptible: - pytime.sleep(secs) + rtime.sleep(secs) else: millisecs = int(secs * 1000) interrupt_event = space.fromcache(State).get_interrupt_event() @@ -331,7 +331,7 @@ if rc == rwin32.WAIT_OBJECT_0: # Yield to make sure real Python signal handler # called. - pytime.sleep(0.001) + rtime.sleep(0.001) raise wrap_oserror(space, OSError(EINTR, "sleep() interrupted")) @unwrap_spec(secs=float) @@ -482,13 +482,6 @@ secs = pytime.time() return space.wrap(secs) -if _WIN: - class PCCache: - pass - pccache = PCCache() - pccache.divisor = 0.0 - pccache.ctrStart = 0 - def clock(space): """clock() -> floating point number diff -Nru pypy-4.0.1+dfsg/pypy/module/_vmprof/__init__.py pypy-5.0.1+dfsg/pypy/module/_vmprof/__init__.py --- pypy-4.0.1+dfsg/pypy/module/_vmprof/__init__.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_vmprof/__init__.py 2016-03-19 16:40:12.000000000 +0000 @@ -11,6 +11,7 @@ interpleveldefs = { 'enable': 'interp_vmprof.enable', 'disable': 'interp_vmprof.disable', + 'write_all_code_objects': 'interp_vmprof.write_all_code_objects', 'VMProfError': 'space.fromcache(interp_vmprof.Cache).w_VMProfError', } diff -Nru pypy-4.0.1+dfsg/pypy/module/_vmprof/interp_vmprof.py pypy-5.0.1+dfsg/pypy/module/_vmprof/interp_vmprof.py --- pypy-4.0.1+dfsg/pypy/module/_vmprof/interp_vmprof.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_vmprof/interp_vmprof.py 2016-03-19 16:40:12.000000000 +0000 @@ -59,11 +59,21 @@ 'interval' is a float representing the sampling interval, in seconds. Must be smaller than 1.0 """ + w_modules = space.sys.get('modules') + #if space.contains_w(w_modules, space.wrap('_continuation')): + # space.warn(space.wrap("Using _continuation/greenlet/stacklet together " + # "with vmprof will crash"), + # space.w_RuntimeWarning) try: rvmprof.enable(fileno, period) except rvmprof.VMProfError, e: raise VMProfError(space, e) +def write_all_code_objects(space): + """ Needed on cpython, just empty function here + """ + pass + def disable(space): """Disable vmprof. Remember to close the file descriptor afterwards if necessary. diff -Nru pypy-4.0.1+dfsg/pypy/module/_vmprof/test/conftest.py pypy-5.0.1+dfsg/pypy/module/_vmprof/test/conftest.py --- pypy-4.0.1+dfsg/pypy/module/_vmprof/test/conftest.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_vmprof/test/conftest.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -import py -from rpython.jit.backend import detect_cpu - -cpu = detect_cpu.autodetect() -def pytest_runtest_setup(item): - if cpu != detect_cpu.MODEL_X86_64: - py.test.skip("x86_64 tests only") diff -Nru pypy-4.0.1+dfsg/pypy/module/_vmprof/test/test_direct.py pypy-5.0.1+dfsg/pypy/module/_vmprof/test/test_direct.py --- pypy-4.0.1+dfsg/pypy/module/_vmprof/test/test_direct.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_vmprof/test/test_direct.py 2016-03-19 16:40:12.000000000 +0000 @@ -19,6 +19,7 @@ lib = ffi.verify(""" #define PYPY_JIT_CODEMAP +#include "vmprof_stack.h" volatile int pypy_codemap_currently_invalid = 0; @@ -42,7 +43,7 @@ } -""" + open(str(srcdir.join("vmprof_get_custom_offset.h"))).read()) +""" + open(str(srcdir.join("vmprof_get_custom_offset.h"))).read(), include_dirs=[str(srcdir)]) class TestDirect(object): def test_infrastructure(self): @@ -67,8 +68,5 @@ buf = ffi.new("long[10]", [0] * 10) result = ffi.cast("void**", buf) res = lib.vmprof_write_header_for_jit_addr(result, 0, ffi.NULL, 100) - assert res == 6 - assert buf[0] == 2 - assert buf[1] == 16 - assert buf[2] == 12 - assert buf[3] == 8 + assert res == 10 + assert [x for x in buf] == [6, 0, 3, 16, 3, 12, 3, 8, 3, 4] diff -Nru pypy-4.0.1+dfsg/pypy/module/_vmprof/test/test__vmprof.py pypy-5.0.1+dfsg/pypy/module/_vmprof/test/test__vmprof.py --- pypy-4.0.1+dfsg/pypy/module/_vmprof/test/test__vmprof.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_vmprof/test/test__vmprof.py 2016-03-19 16:40:15.000000000 +0000 @@ -5,14 +5,15 @@ class AppTestVMProf(object): def setup_class(cls): cls.space = gettestobjspace(usemodules=['_vmprof', 'struct']) - cls.tmpfile = udir.join('test__vmprof.1').open('wb') - cls.w_tmpfileno = cls.space.wrap(cls.tmpfile.fileno()) - cls.w_tmpfilename = cls.space.wrap(cls.tmpfile.name) - cls.tmpfile2 = udir.join('test__vmprof.2').open('wb') - cls.w_tmpfileno2 = cls.space.wrap(cls.tmpfile2.fileno()) - cls.w_tmpfilename2 = cls.space.wrap(cls.tmpfile2.name) + cls.w_tmpfilename = cls.space.wrap(str(udir.join('test__vmprof.1'))) + cls.w_tmpfilename2 = cls.space.wrap(str(udir.join('test__vmprof.2'))) def test_import_vmprof(self): + tmpfile = open(self.tmpfilename, 'wb') + tmpfileno = tmpfile.fileno() + tmpfile2 = open(self.tmpfilename2, 'wb') + tmpfileno2 = tmpfile2.fileno() + import struct, sys WORD = struct.calcsize('l') @@ -23,7 +24,7 @@ i += 5 * WORD # header assert s[i ] == '\x05' # MARKER_HEADER assert s[i + 1] == '\x00' # 0 - assert s[i + 2] == '\x01' # VERSION_THREAD_ID + assert s[i + 2] == '\x02' # VERSION_THREAD_ID assert s[i + 3] == chr(4) # len('pypy') assert s[i + 4: i + 8] == 'pypy' i += 8 @@ -45,7 +46,7 @@ return count import _vmprof - _vmprof.enable(self.tmpfileno, 0.01) + _vmprof.enable(tmpfileno, 0.01) _vmprof.disable() s = open(self.tmpfilename, 'rb').read() no_of_codes = count(s) @@ -56,7 +57,7 @@ pass """ in d - _vmprof.enable(self.tmpfileno2, 0.01) + _vmprof.enable(tmpfileno2, 0.01) exec """def foo2(): pass diff -Nru pypy-4.0.1+dfsg/pypy/module/_warnings/interp_warnings.py pypy-5.0.1+dfsg/pypy/module/_warnings/interp_warnings.py --- pypy-4.0.1+dfsg/pypy/module/_warnings/interp_warnings.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/module/_warnings/interp_warnings.py 2016-03-19 16:40:12.000000000 +0000 @@ -75,7 +75,7 @@ frame = ec.getnextframe_nohidden(frame) stacklevel -= 1 if frame: - w_globals = frame.w_globals + w_globals = frame.get_w_globals() lineno = frame.get_last_lineno() else: w_globals = space.sys.w_dict diff -Nru pypy-4.0.1+dfsg/pypy/objspace/fake/objspace.py pypy-5.0.1+dfsg/pypy/objspace/fake/objspace.py --- pypy-4.0.1+dfsg/pypy/objspace/fake/objspace.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/fake/objspace.py 2016-03-19 16:40:12.000000000 +0000 @@ -397,9 +397,14 @@ space.wrap(value) class FakeCompiler(object): - pass + def compile(self, code, name, mode, flags): + return FakePyCode() FakeObjSpace.default_compiler = FakeCompiler() +class FakePyCode(W_Root): + def exec_code(self, space, w_globals, w_locals): + return W_Root() + class FakeModule(W_Root): def __init__(self): diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/celldict.py pypy-5.0.1+dfsg/pypy/objspace/std/celldict.py --- pypy-4.0.1+dfsg/pypy/objspace/std/celldict.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/celldict.py 2016-03-19 16:40:12.000000000 +0000 @@ -64,6 +64,9 @@ def setitem_str(self, w_dict, key, w_value): cell = self.getdictvalue_no_unwrapping(w_dict, key) + return self._setitem_str_cell_known(cell, w_dict, key, w_value) + + def _setitem_str_cell_known(self, cell, w_dict, key, w_value): w_value = write_cell(self.space, cell, w_value) if w_value is None: return @@ -74,10 +77,11 @@ space = self.space if space.is_w(space.type(w_key), space.w_str): key = space.str_w(w_key) - w_result = self.getitem_str(w_dict, key) + cell = self.getdictvalue_no_unwrapping(w_dict, key) + w_result = unwrap_cell(self.space, cell) if w_result is not None: return w_result - self.setitem_str(w_dict, key, w_default) + self._setitem_str_cell_known(cell, w_dict, key, w_default) return w_default else: self.switch_to_object_strategy(w_dict) @@ -153,7 +157,7 @@ d_new = strategy.unerase(strategy.get_empty_storage()) for key, cell in d.iteritems(): d_new[_wrapkey(space, key)] = unwrap_cell(self.space, cell) - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(d_new) def getiterkeys(self, w_dict): diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/complexobject.py pypy-5.0.1+dfsg/pypy/objspace/std/complexobject.py --- pypy-4.0.1+dfsg/pypy/objspace/std/complexobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/complexobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -266,11 +266,12 @@ return None from rpython.rlib.longlong2float import float2longlong from pypy.objspace.std.util import IDTAG_COMPLEX as tag + from pypy.objspace.std.util import IDTAG_SHIFT real = space.float_w(space.getattr(self, space.wrap("real"))) imag = space.float_w(space.getattr(self, space.wrap("imag"))) real_b = rbigint.fromrarith_int(float2longlong(real)) imag_b = rbigint.fromrarith_int(r_ulonglong(float2longlong(imag))) - val = real_b.lshift(64).or_(imag_b).lshift(3).int_or_(tag) + val = real_b.lshift(64).or_(imag_b).lshift(IDTAG_SHIFT).int_or_(tag) return space.newlong_from_rbigint(val) def int(self, space): diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/dictmultiobject.py pypy-5.0.1+dfsg/pypy/objspace/std/dictmultiobject.py --- pypy-4.0.1+dfsg/pypy/objspace/std/dictmultiobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/dictmultiobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -42,6 +42,16 @@ class W_DictMultiObject(W_Root): + """ Abstract base class that does not store a strategy. """ + __slots__ = ['space', 'dstorage'] + + def get_strategy(self): + raise NotImplementedError("abstract method") + + def set_strategy(self, strategy): + raise NotImplementedError("abstract method") + + @staticmethod def allocate_and_init_instance(space, w_type=None, module=False, instance=False, strdict=False, @@ -52,6 +62,10 @@ # every module needs its own strategy, because the strategy stores # the version tag strategy = ModuleDictStrategy(space) + storage = strategy.get_empty_storage() + w_obj = space.allocate_instance(W_ModuleDictObject, space.w_dict) + W_ModuleDictObject.__init__(w_obj, space, strategy, storage) + return w_obj elif space.config.objspace.std.withmapdict and instance: from pypy.objspace.std.mapdict import MapDictStrategy strategy = space.fromcache(MapDictStrategy) @@ -68,18 +82,17 @@ w_type = space.w_dict storage = strategy.get_empty_storage() - w_obj = space.allocate_instance(W_DictMultiObject, w_type) - W_DictMultiObject.__init__(w_obj, space, strategy, storage) + w_obj = space.allocate_instance(W_DictObject, w_type) + W_DictObject.__init__(w_obj, space, strategy, storage) return w_obj - def __init__(self, space, strategy, storage): + def __init__(self, space, storage): self.space = space - self.strategy = strategy self.dstorage = storage def __repr__(self): """representation for debugging purposes""" - return "%s(%s)" % (self.__class__.__name__, self.strategy) + return "%s(%s)" % (self.__class__.__name__, self.get_strategy()) def unwrap(w_dict, space): result = {} @@ -101,7 +114,7 @@ self.setitem(w_k, w_v) def setitem_str(self, key, w_value): - self.strategy.setitem_str(self, key, w_value) + self.get_strategy().setitem_str(self, key, w_value) @staticmethod def descr_new(space, w_dicttype, __args__): @@ -261,8 +274,9 @@ def nondescr_reversed_dict(self, space): """Not exposed directly to app-level, but via __pypy__.reversed_dict(). """ - if self.strategy.has_iterreversed: - it = self.strategy.iterreversed(self) + strategy = self.get_strategy() + if strategy.has_iterreversed: + it = strategy.iterreversed(self) return W_DictMultiIterKeysObject(space, it) else: # fall-back @@ -336,6 +350,46 @@ F: D[k] = F[k]""" init_or_update(space, self, __args__, 'dict.update') + def ensure_object_strategy(self): # for cpyext + object_strategy = self.space.fromcache(ObjectDictStrategy) + strategy = self.get_strategy() + if strategy is not object_strategy: + strategy.switch_to_object_strategy(self) + + +class W_DictObject(W_DictMultiObject): + """ a regular dict object """ + __slots__ = ['dstrategy'] + + def __init__(self, space, strategy, storage): + W_DictMultiObject.__init__(self, space, storage) + self.dstrategy = strategy + + def get_strategy(self): + return self.dstrategy + + def set_strategy(self, strategy): + self.dstrategy = strategy + + +class W_ModuleDictObject(W_DictMultiObject): + """ a dict object for a module, that is not expected to change. It stores + the strategy as a quasi-immutable field. """ + __slots__ = ['mstrategy'] + _immutable_fields_ = ['mstrategy?'] + + def __init__(self, space, strategy, storage): + W_DictMultiObject.__init__(self, space, storage) + self.mstrategy = strategy + + def get_strategy(self): + return self.mstrategy + + def set_strategy(self, strategy): + self.mstrategy = strategy + + + def _add_indirections(): dict_methods = "getitem getitem_str setitem setdefault \ @@ -347,7 +401,7 @@ def make_method(method): def f(self, *args): - return getattr(self.strategy, method)(self, *args) + return getattr(self.get_strategy(), method)(self, *args) f.func_name = method return f @@ -490,7 +544,7 @@ def clear(self, w_dict): strategy = self.space.fromcache(EmptyDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def listview_bytes(self, w_dict): @@ -556,32 +610,32 @@ def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(BytesDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_unicode_strategy(self, w_dict): strategy = self.space.fromcache(UnicodeDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_int_strategy(self, w_dict): strategy = self.space.fromcache(IntDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_identity_strategy(self, w_dict): from pypy.objspace.std.identitydict import IdentityDictStrategy strategy = self.space.fromcache(IdentityDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def switch_to_object_strategy(self, w_dict): strategy = self.space.fromcache(ObjectDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def getitem(self, w_dict, w_key): @@ -662,7 +716,7 @@ if self.pos < self.len: result = getattr(self, 'next_' + TP + '_entry')() self.pos += 1 - if self.strategy is self.dictimplementation.strategy: + if self.strategy is self.dictimplementation.get_strategy(): return result # common case else: # waaa, obscure case: the strategy changed, but not the @@ -804,7 +858,7 @@ else: return # w_dict is completely empty, nothing to do count = w_dict.length() - 1 - w_updatedict.strategy.prepare_update(w_updatedict, count) + w_updatedict.get_strategy().prepare_update(w_updatedict, count) # If the strategy is still different, continue the slow way if not same_strategy(self, w_updatedict): for key, value, keyhash in iteritemsh: @@ -825,7 +879,7 @@ def same_strategy(self, w_otherdict): return (setitem_untyped is not None and - w_otherdict.strategy is self) + w_otherdict.get_strategy() is self) dictimpl.iterkeys = iterkeys dictimpl.itervalues = itervalues @@ -934,7 +988,7 @@ d_new = strategy.unerase(strategy.get_empty_storage()) for key, value in d.iteritems(): d_new[self.wrap(key)] = value - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(d_new) # --------------- iterator interface ----------------- @@ -1178,7 +1232,7 @@ def update1_dict_dict(space, w_dict, w_data): - w_data.strategy.rev_update1_dict_dict(w_data, w_dict) + w_data.get_strategy().rev_update1_dict_dict(w_data, w_dict) def update1_pairs(space, w_dict, data_w): @@ -1371,9 +1425,8 @@ return space.len(self.w_dict) def _all_contained_in(space, w_dictview, w_other): - w_iter = space.iter(w_dictview) - for w_item in space.iteriterable(w_iter): - if not space.is_true(space.contains(w_other, w_item)): + for w_item in space.iteriterable(w_dictview): + if not space.contains_w(w_other, w_item): return space.w_False return space.w_True diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/floatobject.py pypy-5.0.1+dfsg/pypy/objspace/std/floatobject.py --- pypy-4.0.1+dfsg/pypy/objspace/std/floatobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/floatobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -160,15 +160,11 @@ return self.floatval def int(self, space): + # this is a speed-up only, for space.int(w_float). if (type(self) is not W_FloatObject and space.is_overloaded(self, space.w_float, '__int__')): return W_Root.int(self, space) - try: - value = ovfcheck_float_to_int(self.floatval) - except OverflowError: - return space.long(self) - else: - return space.newint(value) + return self.descr_trunc(space) def is_w(self, space, w_other): from rpython.rlib.longlong2float import float2longlong @@ -185,9 +181,10 @@ return None from rpython.rlib.longlong2float import float2longlong from pypy.objspace.std.util import IDTAG_FLOAT as tag + from pypy.objspace.std.util import IDTAG_SHIFT val = float2longlong(space.float_w(self)) b = rbigint.fromrarith_int(val) - b = b.lshift(3).int_or_(tag) + b = b.lshift(IDTAG_SHIFT).int_or_(tag) return space.newlong_from_rbigint(b) def __repr__(self): @@ -424,9 +421,8 @@ "cannot convert float NaN to integer") def descr_trunc(self, space): - whole = math.modf(self.floatval)[1] try: - value = ovfcheck_float_to_int(whole) + value = ovfcheck_float_to_int(self.floatval) except OverflowError: return self.descr_long(space) else: @@ -661,7 +657,7 @@ __format__ = interp2app(W_FloatObject.descr_format), __coerce__ = interp2app(W_FloatObject.descr_coerce), __nonzero__ = interp2app(W_FloatObject.descr_nonzero), - __int__ = interp2app(W_FloatObject.int), + __int__ = interp2app(W_FloatObject.descr_trunc), __float__ = interp2app(W_FloatObject.descr_float), __long__ = interp2app(W_FloatObject.descr_long), __trunc__ = interp2app(W_FloatObject.descr_trunc), diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/intobject.py pypy-5.0.1+dfsg/pypy/objspace/std/intobject.py --- pypy-4.0.1+dfsg/pypy/objspace/std/intobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/intobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -24,7 +24,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.objspace.std import newformat from pypy.objspace.std.util import ( - BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT, wrap_parsestringerror) + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT, IDTAG_SHIFT, wrap_parsestringerror) SENTINEL = object() @@ -46,7 +46,7 @@ if self.user_overridden_class: return None b = space.bigint_w(self) - b = b.lshift(3).int_or_(IDTAG_INT) + b = b.lshift(IDTAG_SHIFT).int_or_(IDTAG_INT) return space.newlong_from_rbigint(b) def int(self, space): diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/kwargsdict.py pypy-5.0.1+dfsg/pypy/objspace/std/kwargsdict.py --- pypy-4.0.1+dfsg/pypy/objspace/std/kwargsdict.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/kwargsdict.py 2016-03-19 16:40:12.000000000 +0000 @@ -18,7 +18,7 @@ def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) storage = strategy.get_empty_storage() - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage @@ -142,7 +142,7 @@ d_new = strategy.unerase(strategy.get_empty_storage()) for i in range(len(keys)): d_new[self.wrap(keys[i])] = values_w[i] - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(d_new) def switch_to_bytes_strategy(self, w_dict): @@ -152,7 +152,7 @@ d_new = strategy.unerase(storage) for i in range(len(keys)): d_new[keys[i]] = values_w[i] - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = storage def view_as_kwargs(self, w_dict): diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/listobject.py pypy-5.0.1+dfsg/pypy/objspace/std/listobject.py --- pypy-4.0.1+dfsg/pypy/objspace/std/listobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/listobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -222,6 +222,10 @@ self.strategy = object_strategy object_strategy.init_from_list_w(self, list_w) + def ensure_object_strategy(self): # for cpyext + if self.strategy is not self.space.fromcache(ObjectListStrategy): + self.switch_to_object_strategy() + def _temporarily_as_objects(self): if self.strategy is self.space.fromcache(ObjectListStrategy): return self @@ -521,7 +525,6 @@ def descr_getitem(self, space, w_index): if isinstance(w_index, W_SliceObject): - # XXX consider to extend rlist's functionality? length = self.length() start, stop, step, slicelength = w_index.indices4(space, length) assert slicelength >= 0 diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/longobject.py pypy-5.0.1+dfsg/pypy/objspace/std/longobject.py --- pypy-4.0.1+dfsg/pypy/objspace/std/longobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/longobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -16,7 +16,7 @@ from pypy.objspace.std import newformat from pypy.objspace.std.intobject import W_AbstractIntObject from pypy.objspace.std.util import ( - BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG, wrap_parsestringerror) + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG, IDTAG_SHIFT, wrap_parsestringerror) def delegate_other(func): @@ -45,7 +45,7 @@ if self.user_overridden_class: return None b = space.bigint_w(self) - b = b.lshift(3).int_or_(IDTAG_LONG) + b = b.lshift(IDTAG_SHIFT).int_or_(IDTAG_LONG) return space.newlong_from_rbigint(b) def unwrap(self, space): diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/mapdict.py pypy-5.0.1+dfsg/pypy/objspace/std/mapdict.py --- pypy-4.0.1+dfsg/pypy/objspace/std/mapdict.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/mapdict.py 2016-03-19 16:40:15.000000000 +0000 @@ -1,4 +1,4 @@ -import weakref +import weakref, sys from rpython.rlib import jit, objectmodel, debug, rerased from rpython.rlib.rarithmetic import intmask, r_uint @@ -6,11 +6,17 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.objspace.std.dictmultiobject import ( W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator, - BaseValueIterator, BaseItemIterator, _never_equal_to_string + BaseValueIterator, BaseItemIterator, _never_equal_to_string, + W_DictObject, ) from pypy.objspace.std.typeobject import MutableCell +erase_item, unerase_item = rerased.new_erasing_pair("mapdict storage item") +erase_map, unerase_map = rerased.new_erasing_pair("map") +erase_list, unerase_list = rerased.new_erasing_pair("mapdict storage list") + + # ____________________________________________________________ # attribute shapes @@ -19,6 +25,7 @@ # note: we use "x * NUM_DIGITS_POW2" instead of "x << NUM_DIGITS" because # we want to propagate knowledge that the result cannot be negative + class AbstractAttribute(object): _immutable_fields_ = ['terminator'] cache_attrs = None @@ -29,10 +36,10 @@ assert isinstance(terminator, Terminator) self.terminator = terminator - def read(self, obj, selector): - attr = self.find_map_attr(selector) + def read(self, obj, name, index): + attr = self.find_map_attr(name, index) if attr is None: - return self.terminator._read_terminator(obj, selector) + return self.terminator._read_terminator(obj, name, index) if ( jit.isconstant(attr.storageindex) and jit.isconstant(obj) and @@ -46,39 +53,26 @@ def _pure_mapdict_read_storage(self, obj, storageindex): return obj._mapdict_read_storage(storageindex) - def write(self, obj, selector, w_value): - attr = self.find_map_attr(selector) + def write(self, obj, name, index, w_value): + attr = self.find_map_attr(name, index) if attr is None: - return self.terminator._write_terminator(obj, selector, w_value) + return self.terminator._write_terminator(obj, name, index, w_value) if not attr.ever_mutated: attr.ever_mutated = True obj._mapdict_write_storage(attr.storageindex, w_value) return True - def delete(self, obj, selector): + def delete(self, obj, name, index): pass - def find_map_attr(self, selector): - if jit.we_are_jitted(): - # hack for the jit: - # the _find_map_attr method is pure too, but its argument is never - # constant, because it is always a new tuple - return self._find_map_attr_jit_pure(selector[0], selector[1]) - else: - return self._find_map_attr_indirection(selector) - @jit.elidable - def _find_map_attr_jit_pure(self, name, index): - return self._find_map_attr_indirection((name, index)) - - @jit.dont_look_inside - def _find_map_attr_indirection(self, selector): + def find_map_attr(self, name, index): if (self.space.config.objspace.std.withmethodcache): - return self._find_map_attr_cache(selector) - return self._find_map_attr(selector) + return self._find_map_attr_cache(name, index) + return self._find_map_attr(name, index) @jit.dont_look_inside - def _find_map_attr_cache(self, selector): + def _find_map_attr_cache(self, name, index): space = self.space cache = space.fromcache(MapAttrCache) SHIFT2 = r_uint.BITS - space.config.objspace.std.methodcachesizeexp @@ -86,31 +80,36 @@ attrs_as_int = objectmodel.current_object_addr_as_int(self) # ^^^Note: see comment in typeobject.py for # _pure_lookup_where_with_method_cache() - hash_selector = objectmodel.compute_hash(selector) + + # unrolled hash computation for 2-tuple + c1 = 0x345678 + c2 = 1000003 + hash_name = objectmodel.compute_hash(name) + hash_selector = intmask((c2 * ((c2 * c1) ^ hash_name)) ^ index) product = intmask(attrs_as_int * hash_selector) attr_hash = (r_uint(product) ^ (r_uint(product) << SHIFT1)) >> SHIFT2 # ^^^Note2: same comment too cached_attr = cache.attrs[attr_hash] if cached_attr is self: - cached_selector = cache.selectors[attr_hash] - if cached_selector == selector: + cached_name = cache.names[attr_hash] + cached_index = cache.indexes[attr_hash] + if cached_name == name and cached_index == index: attr = cache.cached_attrs[attr_hash] if space.config.objspace.std.withmethodcachecounter: - name = selector[0] cache.hits[name] = cache.hits.get(name, 0) + 1 return attr - attr = self._find_map_attr(selector) + attr = self._find_map_attr(name, index) cache.attrs[attr_hash] = self - cache.selectors[attr_hash] = selector + cache.names[attr_hash] = name + cache.indexes[attr_hash] = index cache.cached_attrs[attr_hash] = attr if space.config.objspace.std.withmethodcachecounter: - name = selector[0] cache.misses[name] = cache.misses.get(name, 0) + 1 return attr - def _find_map_attr(self, selector): + def _find_map_attr(self, name, index): while isinstance(self, PlainAttribute): - if selector == self.selector: + if index == self.index and name == self.name: return self self = self.back return None @@ -136,40 +135,133 @@ @jit.elidable def _get_new_attr(self, name, index): - selector = name, index cache = self.cache_attrs if cache is None: cache = self.cache_attrs = {} - attr = cache.get(selector, None) + attr = cache.get((name, index), None) if attr is None: - attr = PlainAttribute(selector, self) - cache[selector] = attr + attr = PlainAttribute(name, index, self) + cache[name, index] = attr return attr - @jit.look_inside_iff(lambda self, obj, selector, w_value: - jit.isconstant(self) and - jit.isconstant(selector[0]) and - jit.isconstant(selector[1])) - def add_attr(self, obj, selector, w_value): - # grumble, jit needs this - attr = self._get_new_attr(selector[0], selector[1]) - oldattr = obj._get_mapdict_map() + @jit.elidable + def _get_cache_attr(self, name, index): + key = name, index + # this method is not actually elidable, but it's fine anyway + if self.cache_attrs is not None: + return self.cache_attrs.get(key, None) + return None + + def add_attr(self, obj, name, index, w_value): + self._reorder_and_add(obj, name, index, w_value) if not jit.we_are_jitted(): + oldattr = self + attr = obj._get_mapdict_map() size_est = (oldattr._size_estimate + attr.size_estimate() - oldattr.size_estimate()) assert size_est >= (oldattr.length() * NUM_DIGITS_POW2) oldattr._size_estimate = size_est - if attr.length() > obj._mapdict_storage_length(): - # note that attr.size_estimate() is always at least attr.length() - new_storage = [None] * attr.size_estimate() + + def _add_attr_without_reordering(self, obj, name, index, w_value): + attr = self._get_new_attr(name, index) + attr._switch_map_and_write_storage(obj, w_value) + + @jit.unroll_safe + def _switch_map_and_write_storage(self, obj, w_value): + if self.length() > obj._mapdict_storage_length(): + # note that self.size_estimate() is always at least self.length() + new_storage = [None] * self.size_estimate() for i in range(obj._mapdict_storage_length()): new_storage[i] = obj._mapdict_read_storage(i) - obj._set_mapdict_storage_and_map(new_storage, attr) + obj._set_mapdict_storage_and_map(new_storage, self) # the order is important here: first change the map, then the storage, # for the benefit of the special subclasses - obj._set_mapdict_map(attr) - obj._mapdict_write_storage(attr.storageindex, w_value) + obj._set_mapdict_map(self) + obj._mapdict_write_storage(self.storageindex, w_value) + + + @jit.elidable + def _find_branch_to_move_into(self, name, index): + # walk up the map chain to find an ancestor with lower order that + # already has the current name as a child inserted + current_order = sys.maxint + number_to_readd = 0 + current = self + key = (name, index) + while True: + attr = None + if current.cache_attrs is not None: + attr = current.cache_attrs.get(key, None) + if attr is None or attr.order > current_order: + # we reached the top, so we didn't find it anywhere, + # just add it to the top attribute + if not isinstance(current, PlainAttribute): + return 0, self._get_new_attr(name, index) + + else: + return number_to_readd, attr + # if not found try parent + number_to_readd += 1 + current_order = current.order + current = current.back + + @jit.look_inside_iff(lambda self, obj, name, index, w_value: + jit.isconstant(self) and + jit.isconstant(name) and + jit.isconstant(index)) + def _reorder_and_add(self, obj, name, index, w_value): + # the idea is as follows: the subtrees of any map are ordered by + # insertion. the invariant is that subtrees that are inserted later + # must not contain the name of the attribute of any earlier inserted + # attribute anywhere + # m______ + # inserted first / \ ... \ further attributes + # attrname a 0/ 1\ n\ + # m a must not appear here anywhere + # + # when inserting a new attribute in an object we check whether any + # parent of lower order has seen that attribute yet. if yes, we follow + # that branch. if not, we normally append that attribute. When we + # follow a prior branch, we necessarily remove some attributes to be + # able to do that. They need to be re-added, which has to follow the + # reordering procedure recusively. + + # we store the to-be-readded attribute in the stack, with the map and + # the value paired up those are lazily initialized to a list large + # enough to store all current attributes + stack = None + stack_index = 0 + while True: + current = self + number_to_readd = 0 + number_to_readd, attr = self._find_branch_to_move_into(name, index) + # we found the attributes further up, need to save the + # previous values of the attributes we passed + if number_to_readd: + if stack is None: + stack = [erase_map(None)] * (self.length() * 2) + current = self + for i in range(number_to_readd): + assert isinstance(current, PlainAttribute) + w_self_value = obj._mapdict_read_storage( + current.storageindex) + stack[stack_index] = erase_map(current) + stack[stack_index + 1] = erase_item(w_self_value) + stack_index += 2 + current = current.back + attr._switch_map_and_write_storage(obj, w_value) + + if not stack_index: + return + + # readd the current top of the stack + stack_index -= 2 + next_map = unerase_map(stack[stack_index]) + w_value = unerase_item(stack[stack_index + 1]) + name = next_map.name + index = next_map.index + self = obj._get_mapdict_map() def materialize_r_dict(self, space, obj, dict_w): raise NotImplementedError("abstract base class") @@ -188,11 +280,11 @@ AbstractAttribute.__init__(self, space, self) self.w_cls = w_cls - def _read_terminator(self, obj, selector): + def _read_terminator(self, obj, name, index): return None - def _write_terminator(self, obj, selector, w_value): - obj._get_mapdict_map().add_attr(obj, selector, w_value) + def _write_terminator(self, obj, name, index, w_value): + obj._get_mapdict_map().add_attr(obj, name, index, w_value) return True def copy(self, obj): @@ -230,40 +322,40 @@ class NoDictTerminator(Terminator): - def _write_terminator(self, obj, selector, w_value): - if selector[1] == DICT: + def _write_terminator(self, obj, name, index, w_value): + if index == DICT: return False - return Terminator._write_terminator(self, obj, selector, w_value) + return Terminator._write_terminator(self, obj, name, index, w_value) class DevolvedDictTerminator(Terminator): - def _read_terminator(self, obj, selector): - if selector[1] == DICT: + def _read_terminator(self, obj, name, index): + if index == DICT: space = self.space w_dict = obj.getdict(space) - return space.finditem_str(w_dict, selector[0]) - return Terminator._read_terminator(self, obj, selector) + return space.finditem_str(w_dict, name) + return Terminator._read_terminator(self, obj, name, index) - def _write_terminator(self, obj, selector, w_value): - if selector[1] == DICT: + def _write_terminator(self, obj, name, index, w_value): + if index == DICT: space = self.space w_dict = obj.getdict(space) - space.setitem_str(w_dict, selector[0], w_value) + space.setitem_str(w_dict, name, w_value) return True - return Terminator._write_terminator(self, obj, selector, w_value) + return Terminator._write_terminator(self, obj, name, index, w_value) - def delete(self, obj, selector): + def delete(self, obj, name, index): from pypy.interpreter.error import OperationError - if selector[1] == DICT: + if index == DICT: space = self.space w_dict = obj.getdict(space) try: - space.delitem(w_dict, space.wrap(selector[0])) + space.delitem(w_dict, space.wrap(name)) except OperationError, ex: if not ex.match(space, space.w_KeyError): raise return Terminator.copy(self, obj) - return Terminator.delete(self, obj, selector) + return Terminator.delete(self, obj, name, index) def remove_dict_entries(self, obj): assert 0, "should be unreachable" @@ -275,27 +367,29 @@ return Terminator.set_terminator(self, obj, terminator) class PlainAttribute(AbstractAttribute): - _immutable_fields_ = ['selector', 'storageindex', 'back', 'ever_mutated?'] + _immutable_fields_ = ['name', 'index', 'storageindex', 'back', 'ever_mutated?', 'order'] - def __init__(self, selector, back): + def __init__(self, name, index, back): AbstractAttribute.__init__(self, back.space, back.terminator) - self.selector = selector + self.name = name + self.index = index self.storageindex = back.length() self.back = back self._size_estimate = self.length() * NUM_DIGITS_POW2 self.ever_mutated = False + self.order = len(back.cache_attrs) if back.cache_attrs else 0 def _copy_attr(self, obj, new_obj): - w_value = self.read(obj, self.selector) - new_obj._get_mapdict_map().add_attr(new_obj, self.selector, w_value) + w_value = self.read(obj, self.name, self.index) + new_obj._get_mapdict_map().add_attr(new_obj, self.name, self.index, w_value) - def delete(self, obj, selector): - if selector == self.selector: + def delete(self, obj, name, index): + if index == self.index and name == self.name: # ok, attribute is deleted if not self.ever_mutated: self.ever_mutated = True return self.back.copy(obj) - new_obj = self.back.delete(obj, selector) + new_obj = self.back.delete(obj, name, index) if new_obj is not None: self._copy_attr(obj, new_obj) return new_obj @@ -314,14 +408,14 @@ return new_obj def search(self, attrtype): - if self.selector[1] == attrtype: + if self.index == attrtype: return self return self.back.search(attrtype) def materialize_r_dict(self, space, obj, dict_w): new_obj = self.back.materialize_r_dict(space, obj, dict_w) - if self.selector[1] == DICT: - w_attr = space.wrap(self.selector[0]) + if self.index == DICT: + w_attr = space.wrap(self.name) dict_w[w_attr] = obj._mapdict_read_storage(self.storageindex) else: self._copy_attr(obj, new_obj) @@ -329,12 +423,12 @@ def remove_dict_entries(self, obj): new_obj = self.back.remove_dict_entries(obj) - if self.selector[1] != DICT: + if self.index != DICT: self._copy_attr(obj, new_obj) return new_obj def __repr__(self): - return "" % (self.selector, self.storageindex, self.back) + return "" % (self.name, self.index, self.storageindex, self.back) def _become(w_obj, new_obj): # this is like the _become method, really, but we cannot use that due to @@ -346,8 +440,8 @@ assert space.config.objspace.std.withmethodcache SIZE = 1 << space.config.objspace.std.methodcachesizeexp self.attrs = [None] * SIZE - self._empty_selector = (None, INVALID) - self.selectors = [self._empty_selector] * SIZE + self.names = [None] * SIZE + self.indexes = [INVALID] * SIZE self.cached_attrs = [None] * SIZE if space.config.objspace.std.withmethodcachecounter: self.hits = {} @@ -356,8 +450,9 @@ def clear(self): for i in range(len(self.attrs)): self.attrs[i] = None - for i in range(len(self.selectors)): - self.selectors[i] = self._empty_selector + for i in range(len(self.names)): + self.names[i] = None + self.indexes[i] = INVALID for i in range(len(self.cached_attrs)): self.cached_attrs[i] = None @@ -387,28 +482,28 @@ # objspace interface def getdictvalue(self, space, attrname): - return self._get_mapdict_map().read(self, (attrname, DICT)) + return self._get_mapdict_map().read(self, attrname, DICT) def setdictvalue(self, space, attrname, w_value): - return self._get_mapdict_map().write(self, (attrname, DICT), w_value) + return self._get_mapdict_map().write(self, attrname, DICT, w_value) def deldictvalue(self, space, attrname): - new_obj = self._get_mapdict_map().delete(self, (attrname, DICT)) + new_obj = self._get_mapdict_map().delete(self, attrname, DICT) if new_obj is None: return False self._become(new_obj) return True def getdict(self, space): - w_dict = self._get_mapdict_map().read(self, ("dict", SPECIAL)) + w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL) if w_dict is not None: assert isinstance(w_dict, W_DictMultiObject) return w_dict strategy = space.fromcache(MapDictStrategy) storage = strategy.erase(self) - w_dict = W_DictMultiObject(space, strategy, storage) - flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) + w_dict = W_DictObject(space, strategy, storage) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) assert flag return w_dict @@ -417,9 +512,14 @@ w_dict = check_new_dictionary(space, w_dict) w_olddict = self.getdict(space) assert isinstance(w_dict, W_DictMultiObject) - if type(w_olddict.strategy) is not ObjectDictStrategy: - w_olddict.strategy.switch_to_object_strategy(w_olddict) - flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict) + # The old dict has got 'self' as dstorage, but we are about to + # change self's ("dict", SPECIAL) attribute to point to the + # new dict. If the old dict was using the MapDictStrategy, we + # have to force it now: otherwise it would remain an empty + # shell that continues to delegate to 'self'. + if type(w_olddict.get_strategy()) is MapDictStrategy: + w_olddict.get_strategy().switch_to_object_strategy(w_olddict) + flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) assert flag def getclass(self, space): @@ -437,16 +537,16 @@ self._init_empty(w_subtype.terminator) def getslotvalue(self, slotindex): - key = ("slot", SLOTS_STARTING_FROM + slotindex) - return self._get_mapdict_map().read(self, key) + index = SLOTS_STARTING_FROM + slotindex + return self._get_mapdict_map().read(self, "slot", index) def setslotvalue(self, slotindex, w_value): - key = ("slot", SLOTS_STARTING_FROM + slotindex) - self._get_mapdict_map().write(self, key, w_value) + index = SLOTS_STARTING_FROM + slotindex + self._get_mapdict_map().write(self, "slot", index, w_value) def delslotvalue(self, slotindex): - key = ("slot", SLOTS_STARTING_FROM + slotindex) - new_obj = self._get_mapdict_map().delete(self, key) + index = SLOTS_STARTING_FROM + slotindex + new_obj = self._get_mapdict_map().delete(self, "slot", index) if new_obj is None: return False self._become(new_obj) @@ -456,7 +556,7 @@ def getweakref(self): from pypy.module._weakref.interp__weakref import WeakrefLifeline - lifeline = self._get_mapdict_map().read(self, ("weakref", SPECIAL)) + lifeline = self._get_mapdict_map().read(self, "weakref", SPECIAL) if lifeline is None: return None assert isinstance(lifeline, WeakrefLifeline) @@ -466,11 +566,11 @@ def setweakref(self, space, weakreflifeline): from pypy.module._weakref.interp__weakref import WeakrefLifeline assert isinstance(weakreflifeline, WeakrefLifeline) - self._get_mapdict_map().write(self, ("weakref", SPECIAL), weakreflifeline) + self._get_mapdict_map().write(self, "weakref", SPECIAL, weakreflifeline) setweakref._cannot_really_call_random_things_ = True def delweakref(self): - self._get_mapdict_map().write(self, ("weakref", SPECIAL), None) + self._get_mapdict_map().write(self, "weakref", SPECIAL, None) delweakref._cannot_really_call_random_things_ = True class ObjectMixin(object): @@ -531,9 +631,6 @@ memo_get_subclass_of_correct_size._annspecialcase_ = "specialize:memo" _subclass_cache = {} -erase_item, unerase_item = rerased.new_erasing_pair("mapdict storage item") -erase_list, unerase_list = rerased.new_erasing_pair("mapdict storage list") - def _make_subclass_size_n(supercls, n): from rpython.rlib import unroll rangen = unroll.unrolling_iterable(range(n)) @@ -636,7 +733,7 @@ w_obj = self.unerase(w_dict.dstorage) strategy = self.space.fromcache(ObjectDictStrategy) dict_w = strategy.unerase(strategy.get_empty_storage()) - w_dict.strategy = strategy + w_dict.set_strategy(strategy) w_dict.dstorage = strategy.erase(dict_w) assert w_obj.getdict(self.space) is w_dict or w_obj._get_mapdict_map().terminator.w_cls is None materialize_r_dict(self.space, w_obj, dict_w) @@ -715,7 +812,7 @@ curr = self.unerase(w_dict.dstorage)._get_mapdict_map().search(DICT) if curr is None: raise KeyError - key = curr.selector[0] + key = curr.name w_value = self.getitem_str(w_dict, key) w_key = self.space.wrap(key) self.delitem(w_dict, w_key) @@ -745,14 +842,14 @@ def next_key_entry(self): implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) + assert isinstance(implementation.get_strategy(), MapDictStrategy) if self.orig_map is not self.w_obj._get_mapdict_map(): return None if self.curr_map: curr_map = self.curr_map.search(DICT) if curr_map: self.curr_map = curr_map.back - attr = curr_map.selector[0] + attr = curr_map.name w_attr = self.space.wrap(attr) return w_attr return None @@ -767,14 +864,14 @@ def next_value_entry(self): implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) + assert isinstance(implementation.get_strategy(), MapDictStrategy) if self.orig_map is not self.w_obj._get_mapdict_map(): return None if self.curr_map: curr_map = self.curr_map.search(DICT) if curr_map: self.curr_map = curr_map.back - attr = curr_map.selector[0] + attr = curr_map.name return self.w_obj.getdictvalue(self.space, attr) return None @@ -788,14 +885,14 @@ def next_item_entry(self): implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) + assert isinstance(implementation.get_strategy(), MapDictStrategy) if self.orig_map is not self.w_obj._get_mapdict_map(): return None, None if self.curr_map: curr_map = self.curr_map.search(DICT) if curr_map: self.curr_map = curr_map.back - attr = curr_map.selector[0] + attr = curr_map.name w_attr = self.space.wrap(attr) return w_attr, self.w_obj.getdictvalue(self.space, attr) return None, None @@ -878,9 +975,9 @@ _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache( name, version_tag) # - selector = ("", INVALID) + attrname, index = ("", INVALID) if w_descr is None: - selector = (name, DICT) # common case: no such attr in the class + attrname, index = (name, DICT) # common case: no such attr in the class elif isinstance(w_descr, MutableCell): pass # we have a MutableCell in the class: give up elif space.is_data_descr(w_descr): @@ -888,20 +985,21 @@ # (if any) has no relevance. from pypy.interpreter.typedef import Member if isinstance(w_descr, Member): # it is a slot -- easy case - selector = ("slot", SLOTS_STARTING_FROM + w_descr.index) + attrname, index = ("slot", SLOTS_STARTING_FROM + w_descr.index) else: # There is a non-data descriptor in the class. If there is # also a dict attribute, use the latter, caching its storageindex. # If not, we loose. We could do better in this case too, # but we don't care too much; the common case of a method # invocation is handled by LOOKUP_METHOD_xxx below. - selector = (name, DICT) + attrname = name + index = DICT # - if selector[1] != INVALID: - attr = map.find_map_attr(selector) + if index != INVALID: + attr = map.find_map_attr(attrname, index) if attr is not None: # Note that if map.terminator is a DevolvedDictTerminator, - # map.find_map_attr will always return None if selector[1]==DICT. + # map.find_map_attr will always return None if index==DICT. _fill_cache(pycode, nameindex, map, version_tag, attr.storageindex) return w_obj._mapdict_read_storage(attr.storageindex) if space.config.objspace.std.withmethodcachecounter: diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/objspace.py pypy-5.0.1+dfsg/pypy/objspace/std/objspace.py --- pypy-4.0.1+dfsg/pypy/objspace/std/objspace.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/objspace.py 2016-03-19 16:40:12.000000000 +0000 @@ -18,7 +18,7 @@ from pypy.objspace.std.bytearrayobject import W_BytearrayObject from pypy.objspace.std.bytesobject import W_AbstractBytesObject, W_BytesObject, wrapstr from pypy.objspace.std.complexobject import W_ComplexObject -from pypy.objspace.std.dictmultiobject import W_DictMultiObject +from pypy.objspace.std.dictmultiobject import W_DictMultiObject, W_DictObject from pypy.objspace.std.floatobject import W_FloatObject from pypy.objspace.std.intobject import W_IntObject, setup_prebuilt, wrapint from pypy.objspace.std.iterobject import W_AbstractSeqIterObject, W_SeqIterObject @@ -130,6 +130,7 @@ def wrapbytes(self, x): return wrapstr(self, x) + @specialize.argtype(1) def wrap(self, x): "Wraps the Python value 'x' into one of the wrapper classes." # You might notice that this function is rather conspicuously @@ -172,7 +173,6 @@ else: return W_LongObject.fromrarith_int(x) return self._wrap_not_rpython(x) - wrap._annspecialcase_ = "specialize:wrap" def _wrap_not_rpython(self, x): "NOT_RPYTHON" @@ -359,7 +359,8 @@ subcls = get_subclass_of_correct_size(self, cls, w_subtype) else: subcls = get_unique_interplevel_subclass( - self.config, cls, w_subtype.hasdict, w_subtype.nslots != 0, + self.config, cls, w_subtype.hasdict, + w_subtype.layout.nslots != 0, w_subtype.needsdel, w_subtype.weakrefable) instance = instantiate(subcls) assert isinstance(instance, cls) @@ -439,7 +440,7 @@ # and isinstance() for others. See test_listobject.test_uses_custom... if type(w_obj) is W_ListObject: return w_obj.getitems_bytes() - if type(w_obj) is W_DictMultiObject: + if type(w_obj) is W_DictObject: return w_obj.listview_bytes() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_bytes() @@ -454,7 +455,7 @@ # and isinstance() for others. See test_listobject.test_uses_custom... if type(w_obj) is W_ListObject: return w_obj.getitems_unicode() - if type(w_obj) is W_DictMultiObject: + if type(w_obj) is W_DictObject: return w_obj.listview_unicode() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_unicode() @@ -467,7 +468,7 @@ def listview_int(self, w_obj): if type(w_obj) is W_ListObject: return w_obj.getitems_int() - if type(w_obj) is W_DictMultiObject: + if type(w_obj) is W_DictObject: return w_obj.listview_int() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_int() @@ -485,7 +486,7 @@ return None def view_as_kwargs(self, w_dict): - if type(w_dict) is W_DictMultiObject: + if type(w_dict) is W_DictObject: return w_dict.view_as_kwargs() return (None, None) diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/setobject.py pypy-5.0.1+dfsg/pypy/objspace/std/setobject.py --- pypy-4.0.1+dfsg/pypy/objspace/std/setobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/setobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -942,7 +942,7 @@ return False if w_set.length() == 0: return True - # it's possible to have 0-lenght strategy that's not empty + # it's possible to have 0-length strategy that's not empty if w_set.strategy is w_other.strategy: return self._issubset_unwrapped(w_set, w_other) if not self.may_contain_equal_elements(w_other.strategy): @@ -1076,7 +1076,7 @@ if self is w_other.strategy: strategy = self if w_set.length() > w_other.length(): - # swap operants + # swap operands storage = self._intersect_unwrapped(w_other, w_set) else: storage = self._intersect_unwrapped(w_set, w_other) @@ -1086,7 +1086,7 @@ else: strategy = self.space.fromcache(ObjectSetStrategy) if w_set.length() > w_other.length(): - # swap operants + # swap operands storage = w_other.strategy._intersect_wrapped(w_other, w_set) else: storage = self._intersect_wrapped(w_set, w_other) diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/test/test_celldict.py pypy-5.0.1+dfsg/pypy/objspace/std/test/test_celldict.py --- pypy-4.0.1+dfsg/pypy/objspace/std/test/test_celldict.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/test/test_celldict.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,7 +1,7 @@ import py from pypy.objspace.std.celldict import ModuleDictStrategy -from pypy.objspace.std.dictmultiobject import W_DictMultiObject +from pypy.objspace.std.dictmultiobject import W_DictObject, W_ModuleDictObject from pypy.objspace.std.test.test_dictmultiobject import ( BaseTestRDictImplementation, BaseTestDevolvedDictImplementation, FakeSpace, FakeString) @@ -14,7 +14,7 @@ def test_basic_property_cells(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_ModuleDictObject(space, strategy, storage) v1 = strategy.version key = "a" @@ -23,30 +23,30 @@ v2 = strategy.version assert v1 is not v2 assert d.getitem(w_key) == 1 - assert d.strategy.getdictvalue_no_unwrapping(d, key) == 1 + assert d.get_strategy().getdictvalue_no_unwrapping(d, key) == 1 d.setitem(w_key, 2) v3 = strategy.version assert v2 is not v3 assert d.getitem(w_key) == 2 - assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 2 + assert d.get_strategy().getdictvalue_no_unwrapping(d, key).w_value == 2 d.setitem(w_key, 3) v4 = strategy.version assert v3 is v4 assert d.getitem(w_key) == 3 - assert d.strategy.getdictvalue_no_unwrapping(d, key).w_value == 3 + assert d.get_strategy().getdictvalue_no_unwrapping(d, key).w_value == 3 d.delitem(w_key) v5 = strategy.version assert v5 is not v4 assert d.getitem(w_key) is None - assert d.strategy.getdictvalue_no_unwrapping(d, key) is None + assert d.get_strategy().getdictvalue_no_unwrapping(d, key) is None def test_same_key_set_twice(self): strategy = ModuleDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_ModuleDictObject(space, strategy, storage) v1 = strategy.version x = object() @@ -108,22 +108,11 @@ class TestModuleDictImplementation(BaseTestRDictImplementation): StrategyClass = ModuleDictStrategy - -class TestModuleDictImplementationWithBuiltinNames(BaseTestRDictImplementation): - StrategyClass = ModuleDictStrategy - - string = "int" - string2 = "isinstance" - + setdefault_hash_count = 2 class TestDevolvedModuleDictImplementation(BaseTestDevolvedDictImplementation): StrategyClass = ModuleDictStrategy - -class TestDevolvedModuleDictImplementationWithBuiltinNames(BaseTestDevolvedDictImplementation): - StrategyClass = ModuleDictStrategy - - string = "int" - string2 = "isinstance" + setdefault_hash_count = 2 class AppTestCellDict(object): @@ -134,7 +123,7 @@ py.test.skip("__repr__ doesn't work on appdirect") strategy = ModuleDictStrategy(cls.space) storage = strategy.get_empty_storage() - cls.w_d = W_DictMultiObject(cls.space, strategy, storage) + cls.w_d = W_ModuleDictObject(cls.space, strategy, storage) def test_popitem(self): import __pypy__ diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/test/test_dictmultiobject.py pypy-5.0.1+dfsg/pypy/objspace/std/test/test_dictmultiobject.py --- pypy-4.0.1+dfsg/pypy/objspace/std/test/test_dictmultiobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/test/test_dictmultiobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -2,14 +2,14 @@ import py from pypy.objspace.std.dictmultiobject import (W_DictMultiObject, - BytesDictStrategy, ObjectDictStrategy) + W_DictObject, BytesDictStrategy, ObjectDictStrategy) class TestW_DictObject(object): def test_empty(self): d = self.space.newdict() assert not self.space.is_true(d) - assert type(d.strategy) is not ObjectDictStrategy + assert type(d.get_strategy()) is not ObjectDictStrategy def test_nonempty(self): space = self.space @@ -1050,7 +1050,7 @@ return l def newlist_bytes(self, l): return l - DictObjectCls = W_DictMultiObject + DictObjectCls = W_DictObject def type(self, w_obj): if isinstance(w_obj, FakeString): return str @@ -1076,7 +1076,7 @@ return tuple(l) def newdict(self, module=False, instance=False): - return W_DictMultiObject.allocate_and_init_instance( + return W_DictObject.allocate_and_init_instance( self, module=module, instance=instance) def view_as_kwargs(self, w_d): @@ -1105,7 +1105,7 @@ w_float = float StringObjectCls = FakeString UnicodeObjectCls = FakeUnicode - w_dict = W_DictMultiObject + w_dict = W_DictObject iter = iter fixedview = list listview = list @@ -1149,8 +1149,8 @@ def get_impl(self): strategy = self.StrategyClass(self.fakespace) storage = strategy.get_empty_storage() - w_dict = self.fakespace.allocate_instance(W_DictMultiObject, None) - W_DictMultiObject.__init__(w_dict, self.fakespace, strategy, storage) + w_dict = self.fakespace.allocate_instance(W_DictObject, None) + W_DictObject.__init__(w_dict, self.fakespace, strategy, storage) return w_dict def fill_impl(self): @@ -1159,7 +1159,7 @@ def check_not_devolved(self): #XXX check if strategy changed!? - assert type(self.impl.strategy) is self.StrategyClass + assert type(self.impl.get_strategy()) is self.StrategyClass #assert self.impl.r_dict_content is None def test_popitem(self): @@ -1246,7 +1246,10 @@ for x in xrange(100): impl.setitem(self.fakespace.str_w(str(x)), x) impl.setitem(x, x) - assert type(impl.strategy) is ObjectDictStrategy + assert type(impl.get_strategy()) is ObjectDictStrategy + + + setdefault_hash_count = 1 def test_setdefault_fast(self): on_pypy = "__pypy__" in sys.builtin_module_names @@ -1255,11 +1258,11 @@ x = impl.setdefault(key, 1) assert x == 1 if on_pypy: - assert key.hash_count == 1 + assert key.hash_count == self.setdefault_hash_count x = impl.setdefault(key, 2) assert x == 1 if on_pypy: - assert key.hash_count == 2 + assert key.hash_count == self.setdefault_hash_count + 1 def test_fallback_evil_key(self): class F(object): @@ -1308,7 +1311,7 @@ class BaseTestDevolvedDictImplementation(BaseTestRDictImplementation): def fill_impl(self): BaseTestRDictImplementation.fill_impl(self) - self.impl.strategy.switch_to_object_strategy(self.impl) + self.impl.get_strategy().switch_to_object_strategy(self.impl) def check_not_devolved(self): pass @@ -1320,5 +1323,5 @@ def test_module_uses_strdict(): fakespace = FakeSpace() d = fakespace.newdict(module=True) - assert type(d.strategy) is BytesDictStrategy + assert type(d.get_strategy()) is BytesDictStrategy diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/test/test_kwargsdict.py pypy-5.0.1+dfsg/pypy/objspace/std/test/test_kwargsdict.py --- pypy-4.0.1+dfsg/pypy/objspace/std/test/test_kwargsdict.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/test/test_kwargsdict.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,5 +1,5 @@ import py -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictMultiObject +from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictObject from pypy.objspace.std.kwargsdict import * space = FakeSpace() @@ -9,7 +9,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert d.getitem_str("a") == 1 assert d.getitem_str("b") == 2 assert d.getitem_str("c") == 3 @@ -23,7 +23,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert d.getitem_str("a") == 1 assert d.getitem_str("b") == 2 assert d.getitem_str("c") == 3 @@ -52,7 +52,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) assert d.getitem_str("a") == 1 assert d.getitem_str("b") == 2 assert d.getitem_str("c") == 3 @@ -69,11 +69,11 @@ def test_limit_size(): storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) for i in range(100): assert d.setitem_str("d%s" % i, 4) is None - assert d.strategy is not strategy - assert "BytesDictStrategy" == d.strategy.__class__.__name__ + assert d.get_strategy() is not strategy + assert "BytesDictStrategy" == d.get_strategy().__class__.__name__ def test_keys_doesnt_wrap(): space = FakeSpace() @@ -82,7 +82,7 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) w_l = d.w_keys() # does not crash def test_view_as_kwargs(): @@ -91,36 +91,43 @@ keys = ["a", "b", "c"] values = [1, 2, 3] storage = strategy.erase((keys, values)) - d = W_DictMultiObject(space, strategy, storage) - assert (space.view_as_kwargs(d) == keys, values) + d = W_DictObject(space, strategy, storage) + assert space.view_as_kwargs(d) == (keys, values) strategy = EmptyDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) - assert (space.view_as_kwargs(d) == [], []) + d = W_DictObject(space, strategy, storage) + assert space.view_as_kwargs(d) == ([], []) def test_from_empty_to_kwargs(): strategy = EmptyKwargsDictStrategy(space) storage = strategy.get_empty_storage() - d = W_DictMultiObject(space, strategy, storage) + d = W_DictObject(space, strategy, storage) d.setitem_str("a", 3) - assert isinstance(d.strategy, KwargsDictStrategy) + assert isinstance(d.get_strategy(), KwargsDictStrategy) from pypy.objspace.std.test.test_dictmultiobject import BaseTestRDictImplementation, BaseTestDevolvedDictImplementation def get_impl(self): storage = strategy.erase(([], [])) - return W_DictMultiObject(space, strategy, storage) + return W_DictObject(space, strategy, storage) + class TestKwargsDictImplementation(BaseTestRDictImplementation): StrategyClass = KwargsDictStrategy get_impl = get_impl def test_delitem(self): pass # delitem devolves for now + def test_setdefault_fast(self): + pass # not based on hashing at all + class TestDevolvedKwargsDictImplementation(BaseTestDevolvedDictImplementation): get_impl = get_impl StrategyClass = KwargsDictStrategy + def test_setdefault_fast(self): + pass # not based on hashing at all + class AppTestKwargsDictStrategy(object): def setup_class(cls): diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/test/test_longobject.py pypy-5.0.1+dfsg/pypy/objspace/std/test/test_longobject.py --- pypy-4.0.1+dfsg/pypy/objspace/std/test/test_longobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/test/test_longobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -358,3 +358,10 @@ assert 3L.__coerce__(4L) == (3L, 4L) assert 3L.__coerce__(4) == (3, 4) assert 3L.__coerce__(object()) == NotImplemented + + def test_linear_long_base_16(self): + # never finishes if long(_, 16) is not linear-time + size = 100000 + n = "a" * size + expected = (2 << (size * 4)) // 3 + assert long(n, 16) == expected diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/test/test_mapdict.py pypy-5.0.1+dfsg/pypy/objspace/std/test/test_mapdict.py --- pypy-4.0.1+dfsg/pypy/objspace/std/test/test_mapdict.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/test/test_mapdict.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,4 +1,4 @@ -from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictMultiObject +from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictObject from pypy.objspace.std.mapdict import * class Config: @@ -34,8 +34,8 @@ def test_plain_attribute(): w_cls = "class" - aa = PlainAttribute(("b", DICT), - PlainAttribute(("a", DICT), + aa = PlainAttribute("b", DICT, + PlainAttribute("a", DICT, Terminator(space, w_cls))) assert aa.space is space assert aa.terminator.w_cls is w_cls @@ -63,16 +63,16 @@ def test_huge_chain(): current = Terminator(space, "cls") for i in range(20000): - current = PlainAttribute((str(i), DICT), current) - assert current.find_map_attr(("0", DICT)).storageindex == 0 + current = PlainAttribute(str(i), DICT, current) + assert current.find_map_attr("0", DICT).storageindex == 0 def test_search(): - aa = PlainAttribute(("b", DICT), PlainAttribute(("a", DICT), Terminator(None, None))) + aa = PlainAttribute("b", DICT, PlainAttribute("a", DICT, Terminator(None, None))) assert aa.search(DICT) is aa assert aa.search(SLOTS_STARTING_FROM) is None assert aa.search(SPECIAL) is None - bb = PlainAttribute(("C", SPECIAL), PlainAttribute(("A", SLOTS_STARTING_FROM), aa)) + bb = PlainAttribute("C", SPECIAL, PlainAttribute("A", SLOTS_STARTING_FROM, aa)) assert bb.search(DICT) is aa assert bb.search(SLOTS_STARTING_FROM) is bb.back assert bb.search(SPECIAL) is bb @@ -107,6 +107,153 @@ assert obj2.getdictvalue(space, "b") == 60 assert obj2.map is obj.map +def test_insert_different_orders(): + cls = Class() + obj = cls.instantiate() + obj.setdictvalue(space, "a", 10) + obj.setdictvalue(space, "b", 20) + + obj2 = cls.instantiate() + obj2.setdictvalue(space, "b", 30) + obj2.setdictvalue(space, "a", 40) + + assert obj.map is obj2.map + +def test_insert_different_orders_2(): + cls = Class() + obj = cls.instantiate() + obj2 = cls.instantiate() + + obj.setdictvalue(space, "a", 10) + + obj2.setdictvalue(space, "b", 20) + obj2.setdictvalue(space, "a", 30) + + obj.setdictvalue(space, "b", 40) + assert obj.map is obj2.map + +def test_insert_different_orders_3(): + cls = Class() + obj = cls.instantiate() + obj2 = cls.instantiate() + obj3 = cls.instantiate() + obj4 = cls.instantiate() + obj5 = cls.instantiate() + obj6 = cls.instantiate() + + obj.setdictvalue(space, "a", 10) + obj.setdictvalue(space, "b", 20) + obj.setdictvalue(space, "c", 30) + + obj2.setdictvalue(space, "a", 30) + obj2.setdictvalue(space, "c", 40) + obj2.setdictvalue(space, "b", 50) + + obj3.setdictvalue(space, "c", 30) + obj3.setdictvalue(space, "a", 40) + obj3.setdictvalue(space, "b", 50) + + obj4.setdictvalue(space, "c", 30) + obj4.setdictvalue(space, "b", 40) + obj4.setdictvalue(space, "a", 50) + + obj5.setdictvalue(space, "b", 30) + obj5.setdictvalue(space, "a", 40) + obj5.setdictvalue(space, "c", 50) + + obj6.setdictvalue(space, "b", 30) + obj6.setdictvalue(space, "c", 40) + obj6.setdictvalue(space, "a", 50) + + assert obj.map is obj2.map + assert obj.map is obj3.map + assert obj.map is obj4.map + assert obj.map is obj5.map + assert obj.map is obj6.map + + +def test_insert_different_orders_4(): + cls = Class() + obj = cls.instantiate() + obj2 = cls.instantiate() + + obj.setdictvalue(space, "a", 10) + obj.setdictvalue(space, "b", 20) + obj.setdictvalue(space, "c", 30) + obj.setdictvalue(space, "d", 40) + + obj2.setdictvalue(space, "d", 50) + obj2.setdictvalue(space, "c", 50) + obj2.setdictvalue(space, "b", 50) + obj2.setdictvalue(space, "a", 50) + + assert obj.map is obj2.map + +def test_insert_different_orders_5(): + cls = Class() + obj = cls.instantiate() + obj2 = cls.instantiate() + + obj.setdictvalue(space, "a", 10) + obj.setdictvalue(space, "b", 20) + obj.setdictvalue(space, "c", 30) + obj.setdictvalue(space, "d", 40) + + obj2.setdictvalue(space, "d", 50) + obj2.setdictvalue(space, "c", 50) + obj2.setdictvalue(space, "b", 50) + obj2.setdictvalue(space, "a", 50) + + obj3 = cls.instantiate() + obj3.setdictvalue(space, "d", 50) + obj3.setdictvalue(space, "c", 50) + obj3.setdictvalue(space, "b", 50) + obj3.setdictvalue(space, "a", 50) + + assert obj.map is obj3.map + + +def test_bug_stack_overflow_insert_attributes(): + cls = Class() + obj = cls.instantiate() + + for i in range(1000): + obj.setdictvalue(space, str(i), i) + + +def test_insert_different_orders_perm(): + from itertools import permutations + cls = Class() + seen_maps = {} + for preexisting in ['', 'x', 'xy']: + for i, attributes in enumerate(permutations("abcdef")): + obj = cls.instantiate() + for i, attr in enumerate(preexisting): + obj.setdictvalue(space, attr, i*1000) + key = preexisting + for j, attr in enumerate(attributes): + obj.setdictvalue(space, attr, i*10+j) + key = "".join(sorted(key+attr)) + if key in seen_maps: + assert obj.map is seen_maps[key] + else: + seen_maps[key] = obj.map + + print len(seen_maps) + + +def test_bug_infinite_loop(): + cls = Class() + obj = cls.instantiate() + obj.setdictvalue(space, "e", 1) + obj2 = cls.instantiate() + obj2.setdictvalue(space, "f", 2) + obj3 = cls.instantiate() + obj3.setdictvalue(space, "a", 3) + obj3.setdictvalue(space, "e", 4) + obj3.setdictvalue(space, "f", 5) + + def test_attr_immutability(monkeypatch): cls = Class() obj = cls.instantiate() @@ -309,7 +456,7 @@ obj.setdictvalue(space, "c", 7) assert obj.storage == [50, 60, 70, 5, 6, 7] - class FakeDict(W_DictMultiObject): + class FakeDict(W_DictObject): def __init__(self, d): self.dstorage = d @@ -320,7 +467,7 @@ d = {} w_d = FakeDict(d) - flag = obj.map.write(obj, ("dict", SPECIAL), w_d) + flag = obj.map.write(obj, "dict", SPECIAL, w_d) assert flag materialize_r_dict(space, obj, d) assert d == {"a": 5, "b": 6, "c": 7} @@ -359,16 +506,22 @@ class TestMapDictImplementation(BaseTestRDictImplementation): StrategyClass = MapDictStrategy get_impl = get_impl + def test_setdefault_fast(self): + # mapdict can't pass this, which is fine + pass class TestDevolvedMapDictImplementation(BaseTestDevolvedDictImplementation): get_impl = get_impl StrategyClass = MapDictStrategy + def test_setdefault_fast(self): + # mapdict can't pass this, which is fine + pass # ___________________________________________________________ # tests that check the obj interface after the dict has devolved def devolve_dict(space, obj): w_d = obj.getdict(space) - w_d.strategy.switch_to_object_strategy(w_d) + w_d.get_strategy().switch_to_object_strategy(w_d) def test_get_setdictvalue_after_devolve(): cls = Class() @@ -1127,8 +1280,12 @@ def test_newdict_instance(): w_dict = space.newdict(instance=True) - assert type(w_dict.strategy) is MapDictStrategy + assert type(w_dict.get_strategy()) is MapDictStrategy class TestMapDictImplementationUsingnewdict(BaseTestRDictImplementation): StrategyClass = MapDictStrategy # NB: the get_impl method is not overwritten here, as opposed to above + + def test_setdefault_fast(self): + # mapdict can't pass this, which is fine + pass diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/test/test_obj.py pypy-5.0.1+dfsg/pypy/objspace/std/test/test_obj.py --- pypy-4.0.1+dfsg/pypy/objspace/std/test/test_obj.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/test/test_obj.py 2016-03-19 16:40:15.000000000 +0000 @@ -172,15 +172,15 @@ def test_id_on_primitives(self): if self.cpython_apptest: skip("cpython behaves differently") - assert id(1) == (1 << 3) + 1 - assert id(1l) == (1 << 3) + 3 + assert id(1) == (1 << 4) + 1 + assert id(1l) == (1 << 4) + 3 class myint(int): pass assert id(myint(1)) != id(1) assert id(1.0) & 7 == 5 assert id(-0.0) != id(0.0) - assert hex(id(2.0)) == '0x20000000000000005L' + assert hex(id(2.0)) == '0x40000000000000005L' assert id(0.0) == 5 def test_id_on_strs(self): diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/test/test_tupleobject.py pypy-5.0.1+dfsg/pypy/objspace/std/test/test_tupleobject.py --- pypy-4.0.1+dfsg/pypy/objspace/std/test/test_tupleobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/test/test_tupleobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -237,8 +237,8 @@ class AppTestW_TupleObject: def test_is_true(self): assert not () - assert (5,) - assert (5, 3) + assert bool((5,)) + assert bool((5, 3)) def test_len(self): assert len(()) == 0 diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/transparent.py pypy-5.0.1+dfsg/pypy/objspace/std/transparent.py --- pypy-4.0.1+dfsg/pypy/objspace/std/transparent.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/transparent.py 2016-03-19 16:40:12.000000000 +0000 @@ -62,7 +62,7 @@ return W_TransparentGenerator(space, w_type, w_controller) if space.is_true(space.issubtype(w_type, space.gettypeobject(PyCode.typedef))): return W_TransparentCode(space, w_type, w_controller) - if w_type.instancetypedef is space.w_object.instancetypedef: + if w_type.layout.typedef is space.w_object.layout.typedef: return W_Transparent(space, w_type, w_controller) else: raise OperationError(space.w_TypeError, space.wrap("type expected as first argument")) diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/tupleobject.py pypy-5.0.1+dfsg/pypy/objspace/std/tupleobject.py --- pypy-4.0.1+dfsg/pypy/objspace/std/tupleobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/tupleobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -30,6 +30,11 @@ contains_jmp = jit.JitDriver(greens = ['tp'], reds = 'auto', name = 'tuple.contains') +hash_driver = jit.JitDriver( + name='tuple.hash', + greens=['w_type'], + reds='auto') + class W_AbstractTupleObject(W_Root): __slots__ = () @@ -262,12 +267,32 @@ def length(self): return len(self.wrappeditems) - @jit.look_inside_iff(lambda self, _1: _unroll_condition(self)) def descr_hash(self, space): + if _unroll_condition(self): + return self._descr_hash_unroll(space) + else: + return self._descr_hash_jitdriver(space) + + @jit.unroll_safe + def _descr_hash_unroll(self, space): + mult = 1000003 + x = 0x345678 + z = len(self.wrappeditems) + for w_item in self.wrappeditems: + y = space.hash_w(w_item) + x = (x ^ y) * mult + z -= 1 + mult += 82520 + z + z + x += 97531 + return space.wrap(intmask(x)) + + def _descr_hash_jitdriver(self, space): mult = 1000003 x = 0x345678 z = len(self.wrappeditems) + w_type = space.type(self.wrappeditems[0]) for w_item in self.wrappeditems: + hash_driver.jit_merge_point(w_type=w_type) y = space.hash_w(w_item) x = (x ^ y) * mult z -= 1 diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/typeobject.py pypy-5.0.1+dfsg/pypy/objspace/std/typeobject.py --- pypy-4.0.1+dfsg/pypy/objspace/std/typeobject.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/typeobject.py 2016-03-19 16:40:12.000000000 +0000 @@ -7,7 +7,7 @@ from pypy.interpreter.astcompiler.misc import mangle from rpython.rlib.jit import (promote, elidable_promote, we_are_jitted, - promote_string, elidable, dont_look_inside, unroll_safe) + elidable, dont_look_inside, unroll_safe) from rpython.rlib.objectmodel import current_object_addr_as_int, compute_hash from rpython.rlib.rarithmetic import intmask, r_uint @@ -87,6 +87,29 @@ for i in range(len(self.lookup_where)): self.lookup_where[i] = None_None + +class Layout(object): + """A Layout is attached to every W_TypeObject to represent the + layout of instances. Some W_TypeObjects share the same layout. + If a W_TypeObject is a base of another, then the layout of + the first is either the same or a parent layout of the second. + The Layouts have single inheritance, unlike W_TypeObjects. + """ + _immutable_ = True + + def __init__(self, typedef, nslots, base_layout=None): + self.typedef = typedef + self.nslots = nslots + self.base_layout = base_layout + + def issublayout(self, parent): + while self is not parent: + self = self.base_layout + if self is None: + return False + return True + + # possible values of compares_by_identity_status UNKNOWN = 0 COMPARES_BY_IDENTITY = 1 @@ -106,8 +129,7 @@ 'needsdel', 'weakrefable', 'hasdict', - 'nslots', - 'instancetypedef', + 'layout', 'terminator', '_version_tag?', 'name?', @@ -126,12 +148,11 @@ @dont_look_inside def __init__(w_self, space, name, bases_w, dict_w, - overridetypedef=None): + overridetypedef=None, force_new_layout=False): w_self.space = space w_self.name = name w_self.bases_w = bases_w w_self.dict_w = dict_w - w_self.nslots = 0 w_self.hasdict = False w_self.needsdel = False w_self.weakrefable = False @@ -141,13 +162,13 @@ w_self.flag_cpytype = False w_self.flag_abstract = False w_self.flag_sequence_bug_compat = False - w_self.instancetypedef = overridetypedef if overridetypedef is not None: - setup_builtin_type(w_self) + assert not force_new_layout + layout = setup_builtin_type(w_self, overridetypedef) else: - setup_user_defined_type(w_self) - w_self.w_same_layout_as = get_parent_layout(w_self) + layout = setup_user_defined_type(w_self, force_new_layout) + w_self.layout = layout if space.config.objspace.std.withtypeversion: if not is_mro_purely_of_types(w_self.mro_w): @@ -164,6 +185,10 @@ else: w_self.terminator = NoDictTerminator(space, w_self) + def __repr__(self): + "NOT_RPYTHON" + return '' % (self.name, id(self)) + def mutated(w_self, key): """ The type is being mutated. key is either the string containing the @@ -264,8 +289,8 @@ # compute a tuple that fully describes the instance layout def get_full_instance_layout(w_self): - w_layout = w_self.w_same_layout_as or w_self - return (w_layout, w_self.hasdict, w_self.needsdel, w_self.weakrefable) + layout = w_self.layout + return (layout, w_self.hasdict, w_self.needsdel, w_self.weakrefable) def compute_default_mro(w_self): return compute_C3_mro(w_self.space, w_self) @@ -402,7 +427,6 @@ if version_tag is None: tup = w_self._lookup_where(name) return tup - name = promote_string(name) tup_w = w_self._pure_lookup_where_with_method_cache(name, version_tag) w_class, w_value = tup_w if (space.config.objspace.std.withtypeversion and @@ -463,7 +487,7 @@ raise oefmt(space.w_TypeError, "%N.__new__(%N): %N is not a subtype of %N", w_self, w_subtype, w_subtype, w_self) - if w_self.instancetypedef is not w_subtype.instancetypedef: + if w_self.layout.typedef is not w_subtype.layout.typedef: raise oefmt(space.w_TypeError, "%N.__new__(%N) is not safe, use %N.__new__()", w_self, w_subtype, w_subtype) @@ -478,12 +502,12 @@ def getdict(w_self, space): # returning a dict-proxy! from pypy.objspace.std.dictproxyobject import DictProxyStrategy - from pypy.objspace.std.dictmultiobject import W_DictMultiObject + from pypy.objspace.std.dictmultiobject import W_DictObject if w_self.lazyloaders: w_self._cleanup_() # force un-lazification strategy = space.fromcache(DictProxyStrategy) storage = strategy.erase(w_self) - return W_DictMultiObject(space, strategy, storage) + return W_DictObject(space, strategy, storage) def is_heaptype(w_self): return w_self.flag_heaptype @@ -817,11 +841,10 @@ for w_subclass in w_type.get_subclasses(): if isinstance(w_subclass, W_TypeObject): w_subclass._version_tag = None - assert w_type.w_same_layout_as is get_parent_layout(w_type) # invariant def descr__base(space, w_type): w_type = _check(space, w_type) - return find_best_base(space, w_type.bases_w) + return find_best_base(w_type.bases_w) def descr__doc(space, w_type): if space.is_w(w_type, space.w_type): @@ -924,48 +947,7 @@ # ____________________________________________________________ # Initialization of type objects -def get_parent_layout(w_type): - """Compute the most parent class of 'w_type' whose layout - is the same as 'w_type', or None if all parents of 'w_type' - have a different layout than 'w_type'. - """ - w_starttype = w_type - while len(w_type.bases_w) > 0: - w_bestbase = find_best_base(w_type.space, w_type.bases_w) - if w_type.instancetypedef is not w_bestbase.instancetypedef: - break - if w_type.nslots != w_bestbase.nslots: - break - w_type = w_bestbase - if w_type is not w_starttype: - return w_type - else: - return None - -def issublayout(w_layout1, w_layout2): - space = w_layout2.space - while w_layout1 is not w_layout2: - w_layout1 = find_best_base(space, w_layout1.bases_w) - if w_layout1 is None: - return False - w_layout1 = w_layout1.w_same_layout_as or w_layout1 - return True - -@unroll_safe -def issubtypedef(a, b): - from pypy.objspace.std.objectobject import W_ObjectObject - if b is W_ObjectObject.typedef: - return True - if a is None: - return False - if a is b: - return True - for a1 in a.bases: - if issubtypedef(a1, b): - return True - return False - -def find_best_base(space, bases_w): +def find_best_base(bases_w): """The best base is one of the bases in the given list: the one whose layout a new type should use as a starting point. """ @@ -976,14 +958,10 @@ if w_bestbase is None: w_bestbase = w_candidate # for now continue - candtypedef = w_candidate.instancetypedef - besttypedef = w_bestbase.instancetypedef - if candtypedef is besttypedef: - # two candidates with the same typedef are equivalent unless - # one has extra slots over the other - if w_candidate.nslots > w_bestbase.nslots: - w_bestbase = w_candidate - elif issubtypedef(candtypedef, besttypedef): + cand_layout = w_candidate.layout + best_layout = w_bestbase.layout + if (cand_layout is not best_layout and + cand_layout.issublayout(best_layout)): w_bestbase = w_candidate return w_bestbase @@ -992,20 +970,21 @@ whose layout a new type should use as a starting point. This version checks that bases_w is an acceptable tuple of bases. """ - w_bestbase = find_best_base(space, bases_w) + w_bestbase = find_best_base(bases_w) if w_bestbase is None: raise oefmt(space.w_TypeError, "a new-style class can't have only classic bases") - if not w_bestbase.instancetypedef.acceptable_as_base_class: + if not w_bestbase.layout.typedef.acceptable_as_base_class: raise oefmt(space.w_TypeError, "type '%N' is not an acceptable base class", w_bestbase) - # check that all other bases' layouts are superclasses of the bestbase - w_bestlayout = w_bestbase.w_same_layout_as or w_bestbase + # check that all other bases' layouts are "super-layouts" of the + # bestbase's layout + best_layout = w_bestbase.layout for w_base in bases_w: if isinstance(w_base, W_TypeObject): - w_layout = w_base.w_same_layout_as or w_base - if not issublayout(w_bestlayout, w_layout): + layout = w_base.layout + if not best_layout.issublayout(layout): raise oefmt(space.w_TypeError, "instance layout conflicts in multiple inheritance") return w_bestbase @@ -1019,10 +998,11 @@ w_self.hasdict = w_self.hasdict or w_base.hasdict w_self.needsdel = w_self.needsdel or w_base.needsdel w_self.weakrefable = w_self.weakrefable or w_base.weakrefable - w_self.nslots = w_bestbase.nslots return hasoldstylebase -def create_all_slots(w_self, hasoldstylebase, w_bestbase): +def create_all_slots(w_self, hasoldstylebase, w_bestbase, force_new_layout): + base_layout = w_bestbase.layout + index_next_extra_slot = base_layout.nslots space = w_self.space dict_w = w_self.dict_w if '__slots__' not in dict_w: @@ -1050,7 +1030,8 @@ "__weakref__ slot disallowed: we already got one") wantweakref = True else: - create_slot(w_self, slot_name) + index_next_extra_slot = create_slot(w_self, slot_name, + index_next_extra_slot) wantdict = wantdict or hasoldstylebase if wantdict: create_dict_slot(w_self) @@ -1058,8 +1039,14 @@ create_weakref_slot(w_self) if '__del__' in dict_w: w_self.needsdel = True + # + if index_next_extra_slot == base_layout.nslots and not force_new_layout: + return base_layout + else: + return Layout(base_layout.typedef, index_next_extra_slot, + base_layout=base_layout) -def create_slot(w_self, slot_name): +def create_slot(w_self, slot_name, index_next_extra_slot): space = w_self.space if not valid_slot_name(slot_name): raise oefmt(space.w_TypeError, "__slots__ must be identifiers") @@ -1069,9 +1056,10 @@ # Force interning of slot names. slot_name = space.str_w(space.new_interned_str(slot_name)) # in cpython it is ignored less, but we probably don't care - member = Member(w_self.nslots, slot_name, w_self) + member = Member(index_next_extra_slot, slot_name, w_self) + index_next_extra_slot += 1 w_self.dict_w[slot_name] = space.wrap(member) - w_self.nslots += 1 + return index_next_extra_slot def create_dict_slot(w_self): if not w_self.hasdict: @@ -1093,11 +1081,10 @@ return False return True -def setup_user_defined_type(w_self): +def setup_user_defined_type(w_self, force_new_layout): if len(w_self.bases_w) == 0: w_self.bases_w = [w_self.space.w_object] w_bestbase = check_and_find_best_base(w_self.space, w_self.bases_w) - w_self.instancetypedef = w_bestbase.instancetypedef w_self.flag_heaptype = True for w_base in w_self.bases_w: if not isinstance(w_base, W_TypeObject): @@ -1106,16 +1093,29 @@ w_self.flag_abstract |= w_base.flag_abstract hasoldstylebase = copy_flags_from_bases(w_self, w_bestbase) - create_all_slots(w_self, hasoldstylebase, w_bestbase) + layout = create_all_slots(w_self, hasoldstylebase, w_bestbase, + force_new_layout) ensure_common_attributes(w_self) + return layout -def setup_builtin_type(w_self): - w_self.hasdict = w_self.instancetypedef.hasdict - w_self.weakrefable = w_self.instancetypedef.weakrefable - w_self.w_doc = w_self.space.wrap(w_self.instancetypedef.doc) +def setup_builtin_type(w_self, instancetypedef): + w_self.hasdict = instancetypedef.hasdict + w_self.weakrefable = instancetypedef.weakrefable + w_self.w_doc = w_self.space.wrap(instancetypedef.doc) ensure_common_attributes(w_self) - w_self.flag_heaptype = w_self.instancetypedef.heaptype + w_self.flag_heaptype = instancetypedef.heaptype + # + # usually 'instancetypedef' is new, i.e. not seen in any base, + # but not always (see Exception class) + w_bestbase = find_best_base(w_self.bases_w) + if w_bestbase is None: + parent_layout = None + else: + parent_layout = w_bestbase.layout + if parent_layout.typedef is instancetypedef: + return parent_layout + return Layout(instancetypedef, 0, base_layout=parent_layout) def ensure_common_attributes(w_self): ensure_static_new(w_self) @@ -1139,7 +1139,7 @@ space = w_self.space caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: - w_globals = caller.w_globals + w_globals = caller.get_w_globals() w_name = space.finditem(w_globals, space.wrap('__name__')) if w_name is not None: w_self.dict_w['__module__'] = w_name diff -Nru pypy-4.0.1+dfsg/pypy/objspace/std/util.py pypy-5.0.1+dfsg/pypy/objspace/std/util.py --- pypy-4.0.1+dfsg/pypy/objspace/std/util.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/objspace/std/util.py 2016-03-19 16:40:15.000000000 +0000 @@ -2,11 +2,13 @@ from pypy.interpreter.error import OperationError, oefmt +IDTAG_SHIFT = 4 IDTAG_INT = 1 IDTAG_LONG = 3 IDTAG_FLOAT = 5 IDTAG_COMPLEX = 7 +IDTAG_METHOD = 9 CMP_OPS = dict(lt='<', le='<=', eq='==', ne='!=', gt='>', ge='>=') BINARY_BITWISE_OPS = {'and': '&', 'lshift': '<<', 'or': '|', 'rshift': '>>', diff -Nru pypy-4.0.1+dfsg/pypy/test_all.py pypy-5.0.1+dfsg/pypy/test_all.py --- pypy-4.0.1+dfsg/pypy/test_all.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/test_all.py 2016-03-19 16:40:12.000000000 +0000 @@ -26,11 +26,10 @@ #Add toplevel repository dir to sys.path sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) import pytest - import pytest_cov if sys.platform == 'win32': #Try to avoid opeing a dialog box if one of the tests causes a system error # We do this in runner.py, but buildbots run twisted which ruins inheritance - # in windows subprocesses. + # in windows subprocesses. import ctypes winapi = ctypes.windll.kernel32 SetErrorMode = winapi.SetErrorMode @@ -44,4 +43,4 @@ old_mode = SetErrorMode(flags) SetErrorMode(old_mode | flags) - sys.exit(pytest.main(plugins=[pytest_cov])) + sys.exit(pytest.main()) diff -Nru pypy-4.0.1+dfsg/pypy/tool/ann_override.py pypy-5.0.1+dfsg/pypy/tool/ann_override.py --- pypy-4.0.1+dfsg/pypy/tool/ann_override.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/tool/ann_override.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,16 +1,7 @@ # overrides for annotation specific to PyPy codebase from rpython.annotator.policy import AnnotatorPolicy from rpython.flowspace.model import Constant -from rpython.annotator import specialize -from rpython.annotator.classdesc import InstanceSource, ClassDef - - - -def isidentifier(s): - if not s: - return False - s = s.replace('_', 'x') - return s[0].isalpha() and s.isalnum() +from rpython.annotator.classdesc import InstanceSource class PyPyAnnotatorPolicy(AnnotatorPolicy): @@ -19,37 +10,6 @@ self.lookups_where = {} self.pypytypes = {} - def specialize__wrap(self, funcdesc, args_s): - from pypy.interpreter.baseobjspace import W_Root - W_Root_def = funcdesc.bookkeeper.getuniqueclassdef(W_Root) - typ = args_s[1].knowntype - if isinstance(typ, ClassDef): - assert typ.issubclass(W_Root_def) - typ = W_Root - else: - assert not issubclass(typ, W_Root) - assert typ != tuple, "space.wrap(tuple) forbidden; use newtuple()" - assert typ != list, "space.wrap(list) forbidden; use newlist()" - assert typ != dict, "space.wrap(dict) forbidden; use newdict()" - assert typ != object, "degenerated space.wrap(object)" - if args_s[0].is_constant() and args_s[1].is_constant(): - if typ in (str, bool, int, float): - space = args_s[0].const - x = args_s[1].const - - def fold(): - if typ is str and isidentifier(x): - return space.new_interned_str(x) - else: - return space.wrap(x) - builder = specialize.make_constgraphbuilder(2, factory=fold, - srcmodule='') - return funcdesc.cachedgraph((typ, x), builder=builder) - if typ is str: - if args_s[1].can_be_None: - typ = (None, str) - return funcdesc.cachedgraph(typ) - def consider_lookup(self, bookkeeper, attr): assert attr not in self.lookups from pypy.objspace.std import typeobject diff -Nru pypy-4.0.1+dfsg/pypy/tool/import_cffi.py pypy-5.0.1+dfsg/pypy/tool/import_cffi.py --- pypy-4.0.1+dfsg/pypy/tool/import_cffi.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/tool/import_cffi.py 2016-03-19 16:40:12.000000000 +0000 @@ -7,11 +7,18 @@ import sys, py -def mangle(lines): - yield "# Generated by pypy/tool/import_cffi.py\n" - for line in lines: - line = line.replace('from testing', 'from pypy.module.test_lib_pypy.cffi_tests') - yield line +def mangle(lines, ext): + if ext == '.py': + yield "# Generated by pypy/tool/import_cffi.py\n" + for line in lines: + line = line.replace('from testing', 'from pypy.module.test_lib_pypy.cffi_tests') + yield line + elif ext in ('.c', '.h'): + yield "/* Generated by pypy/tool/import_cffi.py */\n" + for line in lines: + yield line + else: + raise AssertionError(ext) def main(cffi_dir): cffi_dir = py.path.local(cffi_dir) @@ -23,10 +30,12 @@ for p in (list(cffi_dir.join('cffi').visit(fil='*.py')) + list(cffi_dir.join('cffi').visit(fil='*.h'))): cffi_dest.join('..', p.relto(cffi_dir)).write(p.read()) - for p in cffi_dir.join('testing').visit(fil='*.py'): + for p in (list(cffi_dir.join('testing').visit(fil='*.py')) + + list(cffi_dir.join('testing').visit(fil='*.h')) + + list(cffi_dir.join('testing').visit(fil='*.c'))): path = test_dest.join(p.relto(cffi_dir.join('testing'))) path.join('..').ensure(dir=1) - path.write(''.join(mangle(p.readlines()))) + path.write(''.join(mangle(p.readlines(), p.ext))) if __name__ == '__main__': if len(sys.argv) != 2: diff -Nru pypy-4.0.1+dfsg/pypy/tool/pytest/appsupport.py pypy-5.0.1+dfsg/pypy/tool/pytest/appsupport.py --- pypy-4.0.1+dfsg/pypy/tool/pytest/appsupport.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/tool/pytest/appsupport.py 2016-03-19 16:40:12.000000000 +0000 @@ -58,12 +58,15 @@ self.w_locals = space.getattr(pyframe, space.wrap('f_locals')) self.f_locals = self.w_locals # for py.test's recursion detection + def get_w_globals(self): + return self.w_globals + def eval(self, code, **vars): space = self.space for key, w_value in vars.items(): space.setitem(self.w_locals, space.wrap(key), w_value) if isinstance(code, str): - return space.eval(code, self.w_globals, self.w_locals) + return space.eval(code, self.get_w_globals(), self.w_locals) pyc = pycode.PyCode._from_code(space, code) return pyc.exec_host_bytecode(self.w_globals, self.w_locals) exec_ = eval @@ -248,7 +251,7 @@ #if filename.endswith("pyc"): # filename = filename[:-1] try: - space.exec_(str(source), frame.w_globals, w_locals, + space.exec_(str(source), frame.get_w_globals(), w_locals, filename=filename) except OperationError, e: if e.match(space, w_ExpectedException): diff -Nru pypy-4.0.1+dfsg/pypy/tool/release/force-builds.py pypy-5.0.1+dfsg/pypy/tool/release/force-builds.py --- pypy-4.0.1+dfsg/pypy/tool/release/force-builds.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/tool/release/force-builds.py 2016-03-19 16:40:12.000000000 +0000 @@ -9,7 +9,7 @@ modified by PyPy team """ -import os, sys, urllib +import os, sys, urllib, subprocess from twisted.internet import reactor, defer from twisted.python import log @@ -83,4 +83,9 @@ (options, args) = parser.parse_args() if not options.branch: parser.error("branch option required") + try: + subprocess.check_call(['hg','id','-r', options.branch]) + except subprocess.CalledProcessError: + print 'branch', options.branch, 'could not be found in local repository' + sys.exit(-1) main(options.branch, options.server, user=options.user) diff -Nru pypy-4.0.1+dfsg/pypy/tool/release/package.py pypy-5.0.1+dfsg/pypy/tool/release/package.py --- pypy-4.0.1+dfsg/pypy/tool/release/package.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/tool/release/package.py 2016-03-19 16:40:12.000000000 +0000 @@ -108,13 +108,8 @@ # builddir = py.path.local(options.builddir) pypydir = builddir.ensure(name, dir=True) + includedir = basedir.join('include') - # Recursively copy all headers, shutil has only ignore - # so we do a double-negative to include what we want - def copyonly(dirpath, contents): - return set(contents) - set( # XXX function not used? - shutil.ignore_patterns('*.h', '*.incl')(dirpath, contents), - ) shutil.copytree(str(includedir), str(pypydir.join('include'))) pypydir.ensure('include', dir=True) @@ -139,22 +134,27 @@ continue print "Picking %s" % p binaries.append((p, p.basename)) - importlib_name = 'python27.lib' - if pypy_c.dirpath().join(importlib_name).check(): - shutil.copyfile(str(pypy_c.dirpath().join(importlib_name)), - str(pypydir.join('include/python27.lib'))) - print "Picking %s as %s" % (pypy_c.dirpath().join(importlib_name), - pypydir.join('include/python27.lib')) + libsdir = basedir.join('libs') + if libsdir.exists(): + print 'Picking %s (and contents)' % libsdir + shutil.copytree(str(libsdir), str(pypydir.join('libs'))) else: - pass - # XXX users will complain that they cannot compile cpyext - # modules for windows, has the lib moved or are there no - # exported functions in the dll so no import library is created? + print '"libs" dir with import library not found.' + print 'You have to create %r' % (str(libsdir),) + print 'and copy libpypy-c.lib in there, renamed to python27.lib' + # XXX users will complain that they cannot compile capi (cpyext) + # modules for windows, also embedding pypy (i.e. in cffi) + # will fail. + # Has the lib moved, was translation not 'shared', or are + # there no exported functions in the dll so no import + # library was created? if not options.no_tk: try: p = pypy_c.dirpath().join('tcl85.dll') if not p.check(): p = py.path.local.sysfind('tcl85.dll') + if p is None: + raise WindowsError("tcl85.dll not found") tktcldir = p.dirpath().join('..').join('lib') shutil.copytree(str(tktcldir), str(pypydir.join('tcl'))) except WindowsError: diff -Nru pypy-4.0.1+dfsg/pypy/tool/release/repackage.sh pypy-5.0.1+dfsg/pypy/tool/release/repackage.sh --- pypy-4.0.1+dfsg/pypy/tool/release/repackage.sh 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/tool/release/repackage.sh 2016-03-19 16:40:15.000000000 +0000 @@ -1,10 +1,11 @@ # Edit these appropriately before running this script -maj=2 -min=6 -rev=1 +maj=5 +min=0 +rev=0 # This script will download latest builds from the buildmaster, rename the top # level directory, and repackage ready to be uploaded to bitbucket. It will also # download source, assuming a tag for the release already exists, and repackage them. +# The script should be run in an empty directory, i.e. /tmp/release_xxx for plat in linux linux64 linux-armhf-raspbian linux-armhf-raring linux-armel osx64 freebsd64 do diff -Nru pypy-4.0.1+dfsg/pypy/tool/test/test_tab.py pypy-5.0.1+dfsg/pypy/tool/test/test_tab.py --- pypy-4.0.1+dfsg/pypy/tool/test/test_tab.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pypy/tool/test/test_tab.py 2016-03-19 16:40:12.000000000 +0000 @@ -6,7 +6,8 @@ from pypy.conftest import pypydir ROOT = os.path.abspath(os.path.join(pypydir, '..')) -EXCLUDE = {} +RPYTHONDIR = os.path.join(ROOT, "rpython") +EXCLUDE = {'/virt_test/lib/python2.7/site-packages/setuptools'} def test_no_tabs(): @@ -28,3 +29,27 @@ if not entry.startswith('.'): walk('%s/%s' % (reldir, entry)) walk('') + +def test_no_pypy_import_in_rpython(): + def walk(reldir): + print reldir + if reldir: + path = os.path.join(RPYTHONDIR, *reldir.split('/')) + else: + path = RPYTHONDIR + if os.path.isfile(path): + if not path.lower().endswith('.py'): + return + with file(path) as f: + for line in f: + if "import" not in line: + continue + assert "from pypy." not in line + assert "import pypy." not in line + elif os.path.isdir(path) and not os.path.islink(path): + for entry in os.listdir(path): + if not entry.startswith('.'): + walk('%s/%s' % (reldir, entry)) + + walk('') + diff -Nru pypy-4.0.1+dfsg/pytest_cov.py pypy-5.0.1+dfsg/pytest_cov.py --- pypy-4.0.1+dfsg/pytest_cov.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/pytest_cov.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,353 +0,0 @@ -"""produce code coverage reports using the 'coverage' package, including support for distributed testing. - -This plugin produces coverage reports. It supports centralised testing and distributed testing in -both load and each modes. It also supports coverage of subprocesses. - -All features offered by the coverage package should be available, either through pytest-cov or -through coverage's config file. - - -Installation ------------- - -The `pytest-cov`_ package may be installed with pip or easy_install:: - - pip install pytest-cov - easy_install pytest-cov - -.. _`pytest-cov`: http://pypi.python.org/pypi/pytest-cov/ - - -Uninstallation --------------- - -Uninstalling packages is supported by pip:: - - pip uninstall pytest-cov - -However easy_install does not provide an uninstall facility. - -.. IMPORTANT:: - - Ensure that you manually delete the init_cov_core.pth file in your site-packages directory. - - This file starts coverage collection of subprocesses if appropriate during site initialisation - at python startup. - - -Usage ------ - -Centralised Testing -~~~~~~~~~~~~~~~~~~~ - -Centralised testing will report on the combined coverage of the main process and all of it's -subprocesses. - -Running centralised testing:: - - py.test --cov myproj tests/ - -Shows a terminal report:: - - -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- - Name Stmts Miss Cover - ---------------------------------------- - myproj/__init__ 2 0 100% - myproj/myproj 257 13 94% - myproj/feature4286 94 7 92% - ---------------------------------------- - TOTAL 353 20 94% - - -Distributed Testing: Load -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Distributed testing with dist mode set to load will report on the combined coverage of all slaves. -The slaves may be spread out over any number of hosts and each slave may be located anywhere on the -file system. Each slave will have it's subprocesses measured. - -Running distributed testing with dist mode set to load:: - - py.test --cov myproj -n 2 tests/ - -Shows a terminal report:: - - -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- - Name Stmts Miss Cover - ---------------------------------------- - myproj/__init__ 2 0 100% - myproj/myproj 257 13 94% - myproj/feature4286 94 7 92% - ---------------------------------------- - TOTAL 353 20 94% - - -Again but spread over different hosts and different directories:: - - py.test --cov myproj --dist load - --tx ssh=memedough@host1//chdir=testenv1 - --tx ssh=memedough@host2//chdir=/tmp/testenv2//python=/tmp/env1/bin/python - --rsyncdir myproj --rsyncdir tests --rsync examples - tests/ - -Shows a terminal report:: - - -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- - Name Stmts Miss Cover - ---------------------------------------- - myproj/__init__ 2 0 100% - myproj/myproj 257 13 94% - myproj/feature4286 94 7 92% - ---------------------------------------- - TOTAL 353 20 94% - - -Distributed Testing: Each -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Distributed testing with dist mode set to each will report on the combined coverage of all slaves. -Since each slave is running all tests this allows generating a combined coverage report for multiple -environments. - -Running distributed testing with dist mode set to each:: - - py.test --cov myproj --dist each - --tx popen//chdir=/tmp/testenv3//python=/usr/local/python27/bin/python - --tx ssh=memedough@host2//chdir=/tmp/testenv4//python=/tmp/env2/bin/python - --rsyncdir myproj --rsyncdir tests --rsync examples - tests/ - -Shows a terminal report:: - - ---------------------------------------- coverage ---------------------------------------- - platform linux2, python 2.6.5-final-0 - platform linux2, python 2.7.0-final-0 - Name Stmts Miss Cover - ---------------------------------------- - myproj/__init__ 2 0 100% - myproj/myproj 257 13 94% - myproj/feature4286 94 7 92% - ---------------------------------------- - TOTAL 353 20 94% - - -Reporting ---------- - -It is possible to generate any combination of the reports for a single test run. - -The available reports are terminal (with or without missing line numbers shown), HTML, XML and -annotated source code. - -The terminal report without line numbers (default):: - - py.test --cov-report term --cov myproj tests/ - - -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- - Name Stmts Miss Cover - ---------------------------------------- - myproj/__init__ 2 0 100% - myproj/myproj 257 13 94% - myproj/feature4286 94 7 92% - ---------------------------------------- - TOTAL 353 20 94% - - -The terminal report with line numbers:: - - py.test --cov-report term-missing --cov myproj tests/ - - -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- - Name Stmts Miss Cover Missing - -------------------------------------------------- - myproj/__init__ 2 0 100% - myproj/myproj 257 13 94% 24-26, 99, 149, 233-236, 297-298, 369-370 - myproj/feature4286 94 7 92% 183-188, 197 - -------------------------------------------------- - TOTAL 353 20 94% - - -The remaining three reports output to files without showing anything on the terminal (useful for -when the output is going to a continuous integration server):: - - py.test --cov-report html - --cov-report xml - --cov-report annotate - --cov myproj tests/ - - -Coverage Data File ------------------- - -The data file is erased at the beginning of testing to ensure clean data for each test run. - -The data file is left at the end of testing so that it is possible to use normal coverage tools to -examine it. - - -Coverage Config File --------------------- - -This plugin provides a clean minimal set of command line options that are added to pytest. For -further control of coverage use a coverage config file. - -For example if tests are contained within the directory tree being measured the tests may be -excluded if desired by using a .coveragerc file with the omit option set:: - - py.test --cov-config .coveragerc - --cov myproj - myproj/tests/ - -Where the .coveragerc file contains file globs:: - - [run] - omit = tests/* - -For full details refer to the `coverage config file`_ documentation. - -.. _`coverage config file`: http://nedbatchelder.com/code/coverage/config.html - -Note that this plugin controls some options and setting the option in the config file will have no -effect. These include specifying source to be measured (source option) and all data file handling -(data_file and parallel options). - - -Limitations ------------ - -For distributed testing the slaves must have the pytest-cov package installed. This is needed since -the plugin must be registered through setuptools / distribute for pytest to start the plugin on the -slave. - -For subprocess measurement environment variables must make it from the main process to the -subprocess. The python used by the subprocess must have pytest-cov installed. The subprocess must -do normal site initialisation so that the environment variables can be detected and coverage -started. - - -Acknowledgements ----------------- - -Whilst this plugin has been built fresh from the ground up it has been influenced by the work done -on pytest-coverage (Ross Lawley, James Mills, Holger Krekel) and nose-cover (Jason Pellerin) which are -other coverage plugins. - -Ned Batchelder for coverage and its ability to combine the coverage results of parallel runs. - -Holger Krekel for pytest with its distributed testing support. - -Jason Pellerin for nose. - -Michael Foord for unittest2. - -No doubt others have contributed to these tools as well. -""" - - -def pytest_addoption(parser): - """Add options to control coverage.""" - - group = parser.getgroup('coverage reporting with distributed testing support') - group.addoption('--cov', action='append', default=[], metavar='path', - dest='cov_source', - help='measure coverage for filesystem path (multi-allowed)') - group.addoption('--cov-report', action='append', default=[], metavar='type', - choices=['term', 'term-missing', 'annotate', 'html', 'xml'], - dest='cov_report', - help='type of report to generate: term, term-missing, annotate, html, xml (multi-allowed)') - group.addoption('--cov-config', action='store', default='.coveragerc', metavar='path', - dest='cov_config', - help='config file for coverage, default: .coveragerc') - - -def pytest_configure(config): - """Activate coverage plugin if appropriate.""" - - if config.getvalue('cov_source'): - config.pluginmanager.register(CovPlugin(), '_cov') - - -class CovPlugin(object): - """Use coverage package to produce code coverage reports. - - Delegates all work to a particular implementation based on whether - this test process is centralised, a distributed master or a - distributed slave. - """ - - def __init__(self): - """Creates a coverage pytest plugin. - - We read the rc file that coverage uses to get the data file - name. This is needed since we give coverage through it's API - the data file name. - """ - - # Our implementation is unknown at this time. - self.cov_controller = None - - def pytest_sessionstart(self, session): - """At session start determine our implementation and delegate to it.""" - - import cov_core - - cov_source = session.config.getvalue('cov_source') - cov_report = session.config.getvalue('cov_report') or ['term'] - cov_config = session.config.getvalue('cov_config') - - session_name = session.__class__.__name__ - is_master = (session.config.pluginmanager.hasplugin('dsession') or - session_name == 'DSession') - is_slave = (hasattr(session.config, 'slaveinput') or - session_name == 'SlaveSession') - nodeid = None - - if is_master: - controller_cls = cov_core.DistMaster - elif is_slave: - controller_cls = cov_core.DistSlave - nodeid = session.config.slaveinput.get('slaveid', getattr(session, 'nodeid')) - else: - controller_cls = cov_core.Central - - self.cov_controller = controller_cls(cov_source, - cov_report, - cov_config, - session.config, - nodeid) - - self.cov_controller.start() - - def pytest_configure_node(self, node): - """Delegate to our implementation.""" - - self.cov_controller.configure_node(node) - pytest_configure_node.optionalhook = True - - def pytest_testnodedown(self, node, error): - """Delegate to our implementation.""" - - self.cov_controller.testnodedown(node, error) - pytest_testnodedown.optionalhook = True - - def pytest_sessionfinish(self, session, exitstatus): - """Delegate to our implementation.""" - - self.cov_controller.finish() - - def pytest_terminal_summary(self, terminalreporter): - """Delegate to our implementation.""" - - self.cov_controller.summary(terminalreporter._tw) - - -def pytest_funcarg__cov(request): - """A pytest funcarg that provides access to the underlying coverage object.""" - - # Check with hasplugin to avoid getplugin exception in older pytest. - if request.config.pluginmanager.hasplugin('_cov'): - plugin = request.config.pluginmanager.getplugin('_cov') - if plugin.cov_controller: - return plugin.cov_controller.cov - return None diff -Nru pypy-4.0.1+dfsg/requirements.txt pypy-5.0.1+dfsg/requirements.txt --- pypy-4.0.1+dfsg/requirements.txt 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/requirements.txt 2016-03-19 16:40:15.000000000 +0000 @@ -0,0 +1,3 @@ +# hypothesis is used for test generation on untranslated jit tests +hypothesis +enum>=0.4.6 # is a dependency, but old pip does not pick it up diff -Nru pypy-4.0.1+dfsg/rpython/annotator/annrpython.py pypy-5.0.1+dfsg/rpython/annotator/annrpython.py --- pypy-4.0.1+dfsg/rpython/annotator/annrpython.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/annotator/annrpython.py 2016-03-19 16:40:15.000000000 +0000 @@ -1,15 +1,17 @@ from __future__ import absolute_import import types +from collections import defaultdict from rpython.tool.ansi_print import ansi_log from rpython.tool.pairtype import pair from rpython.tool.error import (format_blocked_annotation_error, gather_error, source_lines) -from rpython.flowspace.model import ( - Variable, Constant, FunctionGraph, checkgraph) +from rpython.flowspace.model import Variable, Constant, checkgraph from rpython.translator import simplify, transform from rpython.annotator import model as annmodel, signature +from rpython.annotator.model import ( + typeof, s_ImpossibleValue, SomeInstance, intersection, difference) from rpython.annotator.bookkeeper import Bookkeeper from rpython.rtyper.normalizecalls import perform_normalizations @@ -209,7 +211,7 @@ for graph in newgraphs: v = graph.getreturnvar() if v.annotation is None: - self.setbinding(v, annmodel.s_ImpossibleValue) + self.setbinding(v, s_ImpossibleValue) def validate(self): """Check that the annotation results are valid""" @@ -281,7 +283,7 @@ except KeyError: # the function didn't reach any return statement so far. # (some functions actually never do, they always raise exceptions) - return annmodel.s_ImpossibleValue + return s_ImpossibleValue def reflowfromposition(self, position_key): graph, block, index = position_key @@ -387,6 +389,34 @@ if unions != oldcells: self.bindinputargs(graph, block, unions) + def apply_renaming(self, s_out, renaming): + if hasattr(s_out, 'is_type_of'): + renamed_is_type_of = [] + for v in s_out.is_type_of: + renamed_is_type_of += renaming[v] + assert s_out.knowntype is type + newcell = typeof(renamed_is_type_of) + if s_out.is_constant(): + newcell.const = s_out.const + s_out = newcell + + if hasattr(s_out, 'knowntypedata'): + renamed_knowntypedata = {} + for value, constraints in s_out.knowntypedata.items(): + renamed_knowntypedata[value] = {} + for v, s in constraints.items(): + new_vs = renaming.get(v, []) + for new_v in new_vs: + renamed_knowntypedata[value][new_v] = s + assert isinstance(s_out, annmodel.SomeBool) + newcell = annmodel.SomeBool() + if s_out.is_constant(): + newcell.const = s_out.const + s_out = newcell + s_out.set_knowntypedata(renamed_knowntypedata) + return s_out + + def whereami(self, position_key): graph, block, i = position_key blk = "" @@ -456,33 +486,31 @@ exits = [link for link in exits if link.exitcase == s_exitswitch.const] - # filter out those exceptions which cannot - # occour for this specific, typed operation. if block.canraise: op = block.raising_op - can_only_throw = op.get_can_only_throw(self) - if can_only_throw is not None: - candidates = can_only_throw - candidate_exits = exits - exits = [] - for link in candidate_exits: - case = link.exitcase - if case is None: - exits.append(link) - continue - covered = [c for c in candidates if issubclass(c, case)] - if covered: - exits.append(link) - candidates = [c for c in candidates if c not in covered] - - # mapping (exitcase, variable) -> s_annotation - # that can be attached to booleans, exitswitches - knowntypedata = {} - if isinstance(block.exitswitch, Variable): - knowntypedata = getattr(self.binding(block.exitswitch), - "knowntypedata", {}) - for link in exits: - self.follow_link(graph, link, knowntypedata) + s_exception = self.get_exception(op) + for link in exits: + case = link.exitcase + if case is None: + self.follow_link(graph, link, {}) + continue + if s_exception == s_ImpossibleValue: + break + s_case = SomeInstance(self.bookkeeper.getuniqueclassdef(case)) + s_matching_exc = intersection(s_exception, s_case) + if s_matching_exc != s_ImpossibleValue: + self.follow_raise_link(graph, link, s_matching_exc) + s_exception = difference(s_exception, s_case) + else: + if isinstance(block.exitswitch, Variable): + knowntypedata = getattr( + block.exitswitch.annotation, "knowntypedata", {}) + else: + knowntypedata = {} + for link in exits: + constraints = knowntypedata.get(link.exitcase, {}) + self.follow_link(graph, link, constraints) + if block in self.notify: # reflow from certain positions when this block is done for callback in self.notify[block]: @@ -491,84 +519,66 @@ else: callback() - def follow_link(self, graph, link, knowntypedata): - in_except_block = False - v_last_exc_type = link.last_exception # may be None for non-exception link - v_last_exc_value = link.last_exc_value # may be None for non-exception link - - if (isinstance(link.exitcase, (types.ClassType, type)) and - issubclass(link.exitcase, BaseException)): - assert v_last_exc_type and v_last_exc_value - s_last_exc_value = self.bookkeeper.valueoftype(link.exitcase) - s_last_exc_type = annmodel.SomeType() - if isinstance(v_last_exc_type, Constant): - s_last_exc_type.const = v_last_exc_type.value - s_last_exc_type.is_type_of = [v_last_exc_value] - - if isinstance(v_last_exc_type, Variable): - self.setbinding(v_last_exc_type, s_last_exc_type) - if isinstance(v_last_exc_value, Variable): - self.setbinding(v_last_exc_value, s_last_exc_value) - - s_last_exc_type = annmodel.SomeType() - if isinstance(v_last_exc_type, Constant): - s_last_exc_type.const = v_last_exc_type.value - last_exc_value_vars = [] - in_except_block = True + + def follow_link(self, graph, link, constraints): + assert not (isinstance(link.exitcase, (types.ClassType, type)) and + issubclass(link.exitcase, BaseException)) ignore_link = False inputs_s = [] - renaming = {} + renaming = defaultdict(list) + for v_out, v_input in zip(link.args, link.target.inputargs): + renaming[v_out].append(v_input) + + for v_out in link.args: + s_out = self.annotation(v_out) + if v_out in constraints: + s_constraint = constraints[v_out] + s_out = pair(s_out, s_constraint).improve() + # ignore links that try to pass impossible values + if s_out == s_ImpossibleValue: + ignore_link = True + s_out = self.apply_renaming(s_out, renaming) + inputs_s.append(s_out) + if ignore_link: + return + + self.links_followed[link] = True + self.addpendingblock(graph, link.target, inputs_s) + + def follow_raise_link(self, graph, link, s_last_exc_value): + v_last_exc_type = link.last_exception + v_last_exc_value = link.last_exc_value + + assert (isinstance(link.exitcase, (types.ClassType, type)) and + issubclass(link.exitcase, BaseException)) + + assert v_last_exc_type and v_last_exc_value + + if isinstance(v_last_exc_value, Variable): + self.setbinding(v_last_exc_value, s_last_exc_value) + + if isinstance(v_last_exc_type, Variable): + self.setbinding(v_last_exc_type, typeof([v_last_exc_value])) + + inputs_s = [] + renaming = defaultdict(list) for v_out, v_input in zip(link.args, link.target.inputargs): - renaming.setdefault(v_out, []).append(v_input) + renaming[v_out].append(v_input) + for v_out, v_input in zip(link.args, link.target.inputargs): if v_out == v_last_exc_type: - assert in_except_block - inputs_s.append(s_last_exc_type) - elif v_out == v_last_exc_value: - assert in_except_block - inputs_s.append(s_last_exc_value) - last_exc_value_vars.append(v_input) + s_out = typeof(renaming[v_last_exc_value]) + if isinstance(v_last_exc_type, Constant): + s_out.const = v_last_exc_type.value + elif v_last_exc_type.annotation.is_constant(): + s_out.const = v_last_exc_type.annotation.const + inputs_s.append(s_out) else: s_out = self.annotation(v_out) - if (link.exitcase, v_out) in knowntypedata: - knownvarvalue = knowntypedata[(link.exitcase, v_out)] - s_out = pair(s_out, knownvarvalue).improve() - # ignore links that try to pass impossible values - if s_out == annmodel.s_ImpossibleValue: - ignore_link = True - - if hasattr(s_out,'is_type_of'): - renamed_is_type_of = [] - for v in s_out.is_type_of: - new_vs = renaming.get(v, []) - renamed_is_type_of += new_vs - assert s_out.knowntype is type - newcell = annmodel.SomeType() - if s_out.is_constant(): - newcell.const = s_out.const - s_out = newcell - s_out.is_type_of = renamed_is_type_of - - if hasattr(s_out, 'knowntypedata'): - renamed_knowntypedata = {} - for (value, v), s in s_out.knowntypedata.items(): - new_vs = renaming.get(v, []) - for new_v in new_vs: - renamed_knowntypedata[value, new_v] = s - assert isinstance(s_out, annmodel.SomeBool) - newcell = annmodel.SomeBool() - if s_out.is_constant(): - newcell.const = s_out.const - s_out = newcell - s_out.set_knowntypedata(renamed_knowntypedata) - + s_out = self.apply_renaming(s_out, renaming) inputs_s.append(s_out) - if ignore_link: - return - if in_except_block: - s_last_exc_type.is_type_of = last_exc_value_vars self.links_followed[link] = True self.addpendingblock(graph, link.target, inputs_s) @@ -586,13 +596,23 @@ raise BlockedInference(self, op, -1) resultcell = op.consider(self) if resultcell is None: - resultcell = annmodel.s_ImpossibleValue - elif resultcell == annmodel.s_ImpossibleValue: + resultcell = s_ImpossibleValue + elif resultcell == s_ImpossibleValue: raise BlockedInference(self, op, -1) # the operation cannot succeed assert isinstance(resultcell, annmodel.SomeObject) assert isinstance(op.result, Variable) self.setbinding(op.result, resultcell) # bind resultcell to op.result + def get_exception(self, operation): + """ + Return the annotation for all exceptions that `operation` may raise. + """ + can_only_throw = operation.get_can_only_throw(self) + if can_only_throw is None: + return SomeInstance(self.bookkeeper.getuniqueclassdef(Exception)) + else: + return self.bookkeeper.new_exception(can_only_throw) + class BlockedInference(Exception): """This exception signals the type inference engine that the situation diff -Nru pypy-4.0.1+dfsg/rpython/annotator/binaryop.py pypy-5.0.1+dfsg/rpython/annotator/binaryop.py --- pypy-4.0.1+dfsg/rpython/annotator/binaryop.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/annotator/binaryop.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,18 +1,19 @@ """ Binary operations between SomeValues. """ +from collections import defaultdict from rpython.tool.pairtype import pair, pairtype from rpython.annotator.model import ( SomeObject, SomeInteger, SomeBool, s_Bool, SomeString, SomeChar, SomeList, - SomeDict, SomeUnicodeCodePoint, SomeUnicodeString, + SomeDict, SomeUnicodeCodePoint, SomeUnicodeString, SomeException, SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, SomeBuiltinMethod, SomeIterator, SomePBC, SomeNone, SomeFloat, s_None, SomeByteArray, SomeWeakRef, SomeSingleFloat, - SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, + SomeLongFloat, SomeType, SomeTypeOf, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) -from rpython.annotator.bookkeeper import immutablevalue +from rpython.annotator.bookkeeper import immutablevalue, getbookkeeper from rpython.flowspace.model import Variable, Constant, const from rpython.flowspace.operation import op from rpython.rlib import rarithmetic @@ -35,7 +36,7 @@ elif s_obj1.is_constant(): if s_obj1.const is None and not s_obj2.can_be_none(): r.const = False - knowntypedata = {} + knowntypedata = defaultdict(dict) bk = annotator.bookkeeper def bind(src_obj, tgt_obj): @@ -70,6 +71,22 @@ for cmp_op in [op.lt, op.le, op.eq, op.ne, op.gt, op.ge]: _make_cmp_annotator_default(cmp_op) +@op.getitem.register(SomeObject, SomeObject) +def getitem_default(ann, v_obj, v_index): + return s_ImpossibleValue + +def _getitem_can_only_throw(s_c1, s_o2): + impl = op.getitem.get_specialization(s_c1, s_o2) + return read_can_only_throw(impl, s_c1, s_o2) + +@op.getitem_idx.register(SomeObject, SomeObject) +def getitem_idx(ann, v_obj, v_index): + s_obj = ann.annotation(v_obj) + s_index = ann.annotation(v_index) + impl = op.getitem.get_specialization(s_obj, s_index) + return impl(ann, v_obj, v_index) +getitem_idx.can_only_throw = _getitem_can_only_throw + class __extend__(pairtype(SomeObject, SomeObject)): def union((obj1, obj2)): @@ -110,10 +127,10 @@ def coerce((obj1, obj2)): return pair(obj1, obj2).union() # reasonable enough - def getitem((obj1, obj2)): + def add((obj1, obj2)): return s_ImpossibleValue - add = sub = mul = truediv = floordiv = div = mod = getitem - lshift = rshift = and_ = or_ = xor = delitem = getitem + sub = mul = truediv = floordiv = div = mod = add + lshift = rshift = and_ = or_ = xor = delitem = add def setitem((obj1, obj2), _): return s_ImpossibleValue @@ -126,17 +143,6 @@ else: return obj - # checked getitems - - def _getitem_can_only_throw(s_c1, s_o2): - impl = pair(s_c1, s_o2).getitem - return read_can_only_throw(impl, s_c1, s_o2) - - def getitem_idx((s_c1, s_o2)): - impl = pair(s_c1, s_o2).getitem - return impl() - getitem_idx.can_only_throw = _getitem_can_only_throw - class __extend__(pairtype(SomeType, SomeType), @@ -145,24 +151,18 @@ def union((obj1, obj2)): result = SomeType() - is_type_of1 = getattr(obj1, 'is_type_of', None) - is_type_of2 = getattr(obj2, 'is_type_of', None) if obj1.is_immutable_constant() and obj2.is_immutable_constant() and obj1.const == obj2.const: result.const = obj1.const - is_type_of = {} - if is_type_of1: - for v in is_type_of1: - is_type_of[v] = True - if is_type_of2: - for v in is_type_of2: - is_type_of[v] = True - if is_type_of: - result.is_type_of = is_type_of.keys() - else: - if is_type_of1 and is_type_of1 == is_type_of2: - result.is_type_of = is_type_of1 return result +class __extend__(pairtype(SomeTypeOf, SomeTypeOf)): + def union((s_obj1, s_obj2)): + vars = list(set(s_obj1.is_type_of) | set(s_obj2.is_type_of)) + result = SomeTypeOf(vars) + if (s_obj1.is_immutable_constant() and s_obj2.is_immutable_constant() + and s_obj1.const == s_obj2.const): + result.const = obj1.const + return result # cloning a function with identical code, for the can_only_throw attribute def _clone(f, can_only_throw = None): @@ -263,7 +263,7 @@ if not (rarithmetic.signedtype(s_int1.knowntype) and rarithmetic.signedtype(s_int2.knowntype)): return r - knowntypedata = {} + knowntypedata = defaultdict(dict) def tointtype(s_int0): if s_int0.knowntype is bool: return int @@ -465,10 +465,11 @@ return SomeList(lst1.listdef.union(lst2.listdef)) def add((lst1, lst2)): - return lst1.listdef.offspring(lst2.listdef) + bk = getbookkeeper() + return lst1.listdef.offspring(bk, lst2.listdef) def eq((lst1, lst2)): - lst1.listdef.agree(lst2.listdef) + lst1.listdef.agree(getbookkeeper(), lst2.listdef) return s_Bool ne = eq @@ -531,13 +532,17 @@ return None # r_dict: can throw anything return [] # else: no possible exception +@op.getitem.register(SomeDict, SomeObject) +def getitem_SomeDict(annotator, v_dict, v_key): + s_dict = annotator.annotation(v_dict) + s_key = annotator.annotation(v_key) + s_dict.dictdef.generalize_key(s_key) + position = annotator.bookkeeper.position_key + return s_dict.dictdef.read_value(position) +getitem_SomeDict.can_only_throw = _dict_can_only_throw_keyerror -class __extend__(pairtype(SomeDict, SomeObject)): - def getitem((dic1, obj2)): - dic1.dictdef.generalize_key(obj2) - return dic1.dictdef.read_value() - getitem.can_only_throw = _dict_can_only_throw_keyerror +class __extend__(pairtype(SomeDict, SomeObject)): def setitem((dic1, obj2), s_value): dic1.dictdef.generalize_key(obj2) @@ -565,14 +570,17 @@ class __extend__(pairtype(SomeList, SomeInteger)): def mul((lst1, int2)): - return lst1.listdef.offspring() + bk = getbookkeeper() + return lst1.listdef.offspring(bk) def getitem((lst1, int2)): - return lst1.listdef.read_item() + position = getbookkeeper().position_key + return lst1.listdef.read_item(position) getitem.can_only_throw = [] def getitem_idx((lst1, int2)): - return lst1.listdef.read_item() + position = getbookkeeper().position_key + return lst1.listdef.read_item(position) getitem_idx.can_only_throw = [IndexError] def setitem((lst1, int2), s_value): @@ -633,7 +641,8 @@ class __extend__(pairtype(SomeInteger, SomeList)): def mul((int1, lst2)): - return lst2.listdef.offspring() + bk = getbookkeeper() + return lst2.listdef.offspring(bk) class __extend__(pairtype(SomeInstance, SomeInstance)): @@ -682,6 +691,22 @@ thistype = pairtype(SomeInstance, SomeInstance) return super(thistype, pair(ins1, ins2)).improve() +class __extend__( + pairtype(SomeException, SomeInstance), + pairtype(SomeException, SomeNone)): + def union((s_exc, s_inst)): + return unionof(s_exc.as_SomeInstance(), s_inst) + +class __extend__( + pairtype(SomeInstance, SomeException), + pairtype(SomeNone, SomeException)): + def union((s_inst, s_exc)): + return unionof(s_exc.as_SomeInstance(), s_inst) + +class __extend__(pairtype(SomeException, SomeException)): + def union((s_exc1, s_exc2)): + return SomeException(s_exc1.classdefs | s_exc2.classdefs) + @op.getitem.register_transform(SomeInstance, SomeObject) def getitem_SomeInstance(annotator, v_ins, v_idx): @@ -771,6 +796,7 @@ class __extend__(pairtype(SomeNone, SomeObject)): def getitem((none, o)): return s_ImpossibleValue + getitem.can_only_throw = [] def setitem((none, o), s_value): return None diff -Nru pypy-4.0.1+dfsg/rpython/annotator/bookkeeper.py pypy-5.0.1+dfsg/rpython/annotator/bookkeeper.py --- pypy-4.0.1+dfsg/rpython/annotator/bookkeeper.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/annotator/bookkeeper.py 2016-03-19 16:40:12.000000000 +0000 @@ -12,7 +12,7 @@ from rpython.annotator.model import ( SomeOrderedDict, SomeString, SomeChar, SomeFloat, unionof, SomeInstance, SomeDict, SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, - s_None, s_ImpossibleValue, SomeBool, SomeTuple, + s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeException, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, SomeWeakRef, SomeByteArray, SomeConstantType, SomeProperty) from rpython.annotator.classdesc import ClassDef, ClassDesc @@ -167,6 +167,10 @@ desc = self.getdesc(cls) return desc.getuniqueclassdef() + def new_exception(self, exc_classes): + clsdefs = {self.getuniqueclassdef(cls) for cls in exc_classes} + return SomeException(clsdefs) + def getlistdef(self, **flags_if_new): """Get the ListDef associated with the current position.""" try: @@ -349,8 +353,9 @@ # * a user-defined bound or unbound method object # * a frozen pre-built constant (with _freeze_() == True) # * a bound method of a frozen pre-built constant + obj_key = Constant(pyobj) try: - return self.descs[pyobj] + return self.descs[obj_key] except KeyError: if isinstance(pyobj, types.FunctionType): result = description.FunctionDesc(self, pyobj) @@ -395,7 +400,7 @@ msg = "unexpected prebuilt constant" raise Exception("%s: %r" % (msg, pyobj)) result = self.getfrozen(pyobj) - self.descs[pyobj] = result + self.descs[obj_key] = result return result def getfrozen(self, pyobj): @@ -546,20 +551,6 @@ emulated = callback return self.pbc_call(pbc, args, emulated=emulated) - def _find_current_op(self, opname=None, arity=None, pos=None, s_type=None): - """ Find operation that is currently being annotated. Do some - sanity checks to see whether the correct op was found.""" - # XXX XXX HACK HACK HACK - fn, block, i = self.position_key - op = block.operations[i] - if opname is not None: - assert op.opname == opname - if arity is not None: - assert len(op.args) == arity - if pos is not None: - assert self.annotator.binding(op.args[pos]) == s_type - return op - def whereami(self): return self.annotator.whereami(self.position_key) diff -Nru pypy-4.0.1+dfsg/rpython/annotator/builtin.py pypy-5.0.1+dfsg/rpython/annotator/builtin.py --- pypy-4.0.1+dfsg/rpython/annotator/builtin.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/annotator/builtin.py 2016-03-19 16:40:12.000000000 +0000 @@ -2,7 +2,7 @@ Built-in functions. """ import sys -from collections import OrderedDict +from collections import OrderedDict, defaultdict from rpython.annotator.model import ( SomeInteger, SomeChar, SomeBool, SomeString, SomeTuple, @@ -39,8 +39,9 @@ return s_result s_realresult = immutablevalue(realresult) if not s_result.contains(s_realresult): - raise Exception("%s%r returned %r, which is not contained in %s" % ( - func, args, realresult, s_result)) + raise AnnotatorError( + "%s%r returned %r, which is not contained in %s" % ( + func, args, realresult, s_result)) return s_realresult # ____________________________________________________________ @@ -56,14 +57,14 @@ s_start, s_stop = args[:2] s_step = args[2] else: - raise Exception("range() takes 1 to 3 arguments") + raise AnnotatorError("range() takes 1 to 3 arguments") empty = False # so far if not s_step.is_constant(): step = 0 # this case signals a variable step else: step = s_step.const if step == 0: - raise Exception("range() with step zero") + raise AnnotatorError("range() with step zero") if s_start.is_constant() and s_stop.is_constant(): try: if len(xrange(s_start.const, s_stop.const, step)) == 0: @@ -123,77 +124,6 @@ def builtin_bytearray(s_str): return SomeByteArray() -def our_issubclass(cls1, cls2): - """ we're going to try to be less silly in the face of old-style classes""" - if cls2 is object: - return True - def classify(cls): - if isinstance(cls, ClassDef): - return 'def' - if cls.__module__ == '__builtin__': - return 'builtin' - else: - return 'cls' - kind1 = classify(cls1) - kind2 = classify(cls2) - if kind1 != 'def' and kind2 != 'def': - return issubclass(cls1, cls2) - if kind1 == 'builtin' and kind2 == 'def': - return False - elif kind1 == 'def' and kind2 == 'builtin': - return issubclass(object, cls2) - else: - bk = getbookkeeper() - def toclassdef(kind, cls): - if kind != 'def': - return bk.getuniqueclassdef(cls) - else: - return cls - return toclassdef(kind1, cls1).issubclass(toclassdef(kind2, cls2)) - - -def builtin_isinstance(s_obj, s_type, variables=None): - r = SomeBool() - if s_type.is_constant(): - typ = s_type.const - if issubclass(typ, rpython.rlib.rarithmetic.base_int): - try: - r.const = issubclass(s_obj.knowntype, typ) - except TypeError: # s_obj.knowntype is not a Python type at all - r.const = False - else: - if typ == long: - getbookkeeper().warning("isinstance(., long) is not RPython") - r.const = False - return r - - assert not issubclass(typ, (int, long)) or typ in (bool, int, long), ( - "for integers only isinstance(.,int|r_uint) are supported") - - if s_obj.is_constant(): - r.const = isinstance(s_obj.const, typ) - elif our_issubclass(s_obj.knowntype, typ): - if not s_obj.can_be_none(): - r.const = True - elif not our_issubclass(typ, s_obj.knowntype): - r.const = False - elif s_obj.knowntype == int and typ == bool: # xxx this will explode in case of generalisation - # from bool to int, notice that isinstance( , bool|int) - # is quite border case for RPython - r.const = False - bk = getbookkeeper() - if variables is None: - op = bk._find_current_op("simple_call", 3) - assert op.args[0] == Constant(isinstance) - variables = [op.args[1]] - for variable in variables: - assert bk.annotator.binding(variable) == s_obj - knowntypedata = {} - if not hasattr(typ, '_freeze_') and isinstance(s_type, SomePBC): - add_knowntypedata(knowntypedata, True, variables, bk.valueoftype(typ)) - r.set_knowntypedata(knowntypedata) - return r - # note that this one either needs to be constant, or we will create SomeObject def builtin_hasattr(s_obj, s_attr): if not s_attr.is_constant() or not isinstance(s_attr.const, str): @@ -219,10 +149,11 @@ raise AnnotatorError("tuple(): argument must be another tuple") def builtin_list(s_iterable): + bk = getbookkeeper() if isinstance(s_iterable, SomeList): - return s_iterable.listdef.offspring() + return s_iterable.listdef.offspring(bk) s_iter = s_iterable.iter() - return getbookkeeper().newlist(s_iter.next()) + return bk.newlist(s_iter.next()) def builtin_zip(s_iterable1, s_iterable2): # xxx not actually implemented s_iter1 = s_iterable1.iter() @@ -355,7 +286,8 @@ else: @analyzer_for(unicodedata.decimal) def unicodedata_decimal(s_uchr): - raise TypeError("unicodedate.decimal() calls should not happen at interp-level") + raise AnnotatorError( + "unicodedate.decimal() calls should not happen at interp-level") @analyzer_for(OrderedDict) def analyze(): @@ -369,9 +301,9 @@ @analyzer_for(weakref.ref) def weakref_ref(s_obj): if not isinstance(s_obj, SomeInstance): - raise Exception("cannot take a weakref to %r" % (s_obj,)) + raise AnnotatorError("cannot take a weakref to %r" % (s_obj,)) if s_obj.can_be_None: - raise Exception("should assert that the instance we take " + raise AnnotatorError("should assert that the instance we take " "a weakref to cannot be None") return SomeWeakRef(s_obj.classdef) @@ -381,3 +313,14 @@ @analyzer_for(rpython.rlib.objectmodel.free_non_gc_object) def robjmodel_free_non_gc_object(obj): pass + +#________________________________ +# pdb + +import pdb + +@analyzer_for(pdb.set_trace) +def pdb_set_trace(*args_s): + raise AnnotatorError( + "you left pdb.set_trace() in your interpreter! " + "If you want to attach a gdb instead, call rlib.debug.attach_gdb()") diff -Nru pypy-4.0.1+dfsg/rpython/annotator/classdesc.py pypy-5.0.1+dfsg/rpython/annotator/classdesc.py --- pypy-4.0.1+dfsg/rpython/annotator/classdesc.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/annotator/classdesc.py 2016-03-19 16:40:12.000000000 +0000 @@ -258,8 +258,8 @@ yield self self = self.basedef - def issubclass(self, otherclsdef): - return otherclsdef in self.parentdefs + def issubclass(self, other): + return self.classdesc.issubclass(other.classdesc) def getallsubdefs(self): pending = [self] @@ -471,6 +471,19 @@ def is_mixin(cls): return cls.__dict__.get('_mixin_', False) +def is_primitive_type(cls): + from rpython.rlib.rarithmetic import base_int + return cls.__module__ == '__builtin__' or issubclass(cls, base_int) + + +class BuiltinTypeDesc(object): + """Represents a primitive or builtin type object""" + def __init__(self, cls): + self.pyobj = cls + + def issubclass(self, other): + return issubclass(self.pyobj, other.pyobj) + class ClassDesc(Desc): knowntype = type @@ -722,6 +735,9 @@ return True return False + def issubclass(self, other): + return issubclass(self.pyobj, other.pyobj) + def lookup(self, name): cdesc = self while name not in cdesc.classdict: @@ -811,7 +827,7 @@ if immutable_fields: if (search1 in immutable_fields or search2 in immutable_fields): s_result.listdef.never_resize() - s_copy = s_result.listdef.offspring() + s_copy = s_result.listdef.offspring(self.bookkeeper) s_copy.listdef.mark_as_immutable() # cdesc = cdesc.basedesc diff -Nru pypy-4.0.1+dfsg/rpython/annotator/dictdef.py pypy-5.0.1+dfsg/rpython/annotator/dictdef.py --- pypy-4.0.1+dfsg/rpython/annotator/dictdef.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/annotator/dictdef.py 2016-03-19 16:40:12.000000000 +0000 @@ -90,26 +90,13 @@ self.dictkey.itemof[self] = True self.dictvalue = DictValue(bookkeeper, s_value) self.dictvalue.itemof[self] = True - self.bookkeeper = bookkeeper self.force_non_null = force_non_null - def read_key(self, position_key=None): - if position_key is None: - if self.bookkeeper is None: # for tests - from rpython.annotator.bookkeeper import getbookkeeper - position_key = getbookkeeper().position_key - else: - position_key = self.bookkeeper.position_key + def read_key(self, position_key): self.dictkey.read_locations[position_key] = True return self.dictkey.s_value - def read_value(self, position_key=None): - if position_key is None: - if self.bookkeeper is None: # for tests - from rpython.annotator.bookkeeper import getbookkeeper - position_key = getbookkeeper().position_key - else: - position_key = self.bookkeeper.position_key + def read_value(self, position_key): self.dictvalue.read_locations[position_key] = True return self.dictvalue.s_value diff -Nru pypy-4.0.1+dfsg/rpython/annotator/listdef.py pypy-5.0.1+dfsg/rpython/annotator/listdef.py --- pypy-4.0.1+dfsg/rpython/annotator/listdef.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/annotator/listdef.py 2016-03-19 16:40:12.000000000 +0000 @@ -128,18 +128,8 @@ self.listitem.mutated = mutated | resized self.listitem.resized = resized self.listitem.itemof[self] = True - self.bookkeeper = bookkeeper - def getbookkeeper(self): - if self.bookkeeper is None: - from rpython.annotator.bookkeeper import getbookkeeper - return getbookkeeper() - else: - return self.bookkeeper - - def read_item(self, position_key=None): - if position_key is None: - position_key = self.getbookkeeper().position_key + def read_item(self, position_key): self.listitem.read_locations[position_key] = True return self.listitem.s_value @@ -150,9 +140,10 @@ self.listitem.merge(other.listitem) return self - def agree(self, other): - s_self_value = self.read_item() - s_other_value = other.read_item() + def agree(self, bookkeeper, other): + position = bookkeeper.position_key + s_self_value = self.read_item(position) + s_other_value = other.read_item(position) self.generalize(s_other_value) other.generalize(s_self_value) if self.listitem.range_step is not None: @@ -160,13 +151,14 @@ if other.listitem.range_step is not None: other.generalize_range_step(self.listitem.range_step) - def offspring(self, *others): - s_self_value = self.read_item() + def offspring(self, bookkeeper, *others): + position = bookkeeper.position_key + s_self_value = self.read_item(position) s_other_values = [] for other in others: - s_other_values.append(other.read_item()) - s_newlst = self.getbookkeeper().newlist(s_self_value, *s_other_values) - s_newvalue = s_newlst.listdef.read_item() + s_other_values.append(other.read_item(position)) + s_newlst = bookkeeper.newlist(s_self_value, *s_other_values) + s_newvalue = s_newlst.listdef.read_item(position) self.generalize(s_newvalue) for other in others: other.generalize(s_newvalue) diff -Nru pypy-4.0.1+dfsg/rpython/annotator/model.py pypy-5.0.1+dfsg/rpython/annotator/model.py --- pypy-4.0.1+dfsg/rpython/annotator/model.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/annotator/model.py 2016-03-19 16:40:15.000000000 +0000 @@ -32,11 +32,11 @@ import inspect import weakref from types import BuiltinFunctionType, MethodType -from collections import OrderedDict +from collections import OrderedDict, defaultdict import rpython from rpython.tool import descriptor -from rpython.tool.pairtype import pair, extendabletype +from rpython.tool.pairtype import pair, extendabletype, doubledispatch from rpython.rlib.rarithmetic import r_uint, base_int, r_singlefloat, r_longfloat @@ -129,6 +129,16 @@ def nonnoneify(self): return self +@doubledispatch +def intersection(s_obj1, s_obj2): + """Return the intersection of two annotations, or an over-approximation thereof""" + raise NotImplementedError + +@doubledispatch +def difference(s_obj1, s_obj2): + """Return the set difference of two annotations, or an over-approximation thereof""" + raise NotImplementedError + class SomeType(SomeObject): "Stands for a type. We might not be sure which one it is." @@ -138,6 +148,23 @@ def can_be_none(self): return False +class SomeTypeOf(SomeType): + """The type of a variable""" + def __init__(self, args_v): + self.is_type_of = args_v + +def typeof(args_v): + if args_v: + result = SomeTypeOf(args_v) + if len(args_v) == 1: + s_arg = args_v[0].annotation + if isinstance(s_arg, SomeException) and len(s_arg.classdefs) == 1: + cdef, = s_arg.classdefs + result.const = cdef.classdesc.pyobj + return result + else: + return SomeType() + class SomeFloat(SomeObject): "Stands for a float or an integer." @@ -214,6 +241,9 @@ def set_knowntypedata(self, knowntypedata): assert not hasattr(self, 'knowntypedata') + for key, value in knowntypedata.items(): + if not value: + del knowntypedata[key] if knowntypedata: self.knowntypedata = knowntypedata @@ -409,7 +439,7 @@ def __init__(self, classdef, can_be_None=False, flags={}): self.classdef = classdef - self.knowntype = classdef or object + self.knowntype = classdef.classdesc if classdef else None self.can_be_None = can_be_None self.flags = flags @@ -437,6 +467,52 @@ def noneify(self): return SomeInstance(self.classdef, can_be_None=True) +@intersection.register(SomeInstance, SomeInstance) +def intersection_Instance(s_inst1, s_inst2): + can_be_None = s_inst1.can_be_None and s_inst2.can_be_None + if s_inst1.classdef.issubclass(s_inst2.classdef): + return SomeInstance(s_inst1.classdef, can_be_None=can_be_None) + elif s_inst2.classdef.issubclass(s_inst1.classdef): + return SomeInstance(s_inst2.classdef, can_be_None=can_be_None) + else: + return s_ImpossibleValue + +@difference.register(SomeInstance, SomeInstance) +def difference_Instance_Instance(s_inst1, s_inst2): + if s_inst1.classdef.issubclass(s_inst2.classdef): + return s_ImpossibleValue + else: + return s_inst1 + + +class SomeException(SomeObject): + """The set of exceptions obeying type(exc) in self.classes""" + def __init__(self, classdefs): + self.classdefs = classdefs + + def as_SomeInstance(self): + return unionof(*[SomeInstance(cdef) for cdef in self.classdefs]) + +@intersection.register(SomeException, SomeInstance) +def intersection_Exception_Instance(s_exc, s_inst): + classdefs = {c for c in s_exc.classdefs if c.issubclass(s_inst.classdef)} + if classdefs: + return SomeException(classdefs) + else: + return s_ImpossibleValue + +@intersection.register(SomeInstance, SomeException) +def intersection_Exception_Instance(s_inst, s_exc): + return intersection(s_exc, s_inst) + +@difference.register(SomeException, SomeInstance) +def difference_Exception_Instance(s_exc, s_inst): + classdefs = {c for c in s_exc.classdefs + if not c.issubclass(s_inst.classdef)} + if classdefs: + return SomeException(classdefs) + else: + return s_ImpossibleValue class SomePBC(SomeObject): """Stands for a global user instance, built prior to the analysis, @@ -682,14 +758,15 @@ def add_knowntypedata(ktd, truth, vars, s_obj): for v in vars: - ktd[(truth, v)] = s_obj + ktd[truth][v] = s_obj def merge_knowntypedata(ktd1, ktd2): - r = {} - for truth_v in ktd1: - if truth_v in ktd2: - r[truth_v] = unionof(ktd1[truth_v], ktd2[truth_v]) + r = defaultdict(dict) + for truth, constraints in ktd1.items(): + for v in constraints: + if truth in ktd2 and v in ktd2[truth]: + r[truth][v] = unionof(ktd1[truth][v], ktd2[truth][v]) return r diff -Nru pypy-4.0.1+dfsg/rpython/annotator/policy.py pypy-5.0.1+dfsg/rpython/annotator/policy.py --- pypy-4.0.1+dfsg/rpython/annotator/policy.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/annotator/policy.py 2016-03-19 16:40:12.000000000 +0000 @@ -3,6 +3,9 @@ from rpython.annotator.specialize import ( specialize_argvalue, specialize_argtype, specialize_arglistitemtype, specialize_arg_or_var, memo, specialize_call_location) +from rpython.flowspace.operation import op +from rpython.flowspace.model import Constant +from rpython.annotator.model import SomeTuple class AnnotatorPolicy(object): @@ -64,7 +67,34 @@ return LowLevelAnnotatorPolicy.specialize__ll_and_arg(*args) def no_more_blocks_to_annotate(pol, annotator): + bk = annotator.bookkeeper # hint to all pending specializers that we are done - for callback in annotator.bookkeeper.pending_specializations: + for callback in bk.pending_specializations: callback() - del annotator.bookkeeper.pending_specializations[:] + del bk.pending_specializations[:] + if annotator.added_blocks is not None: + all_blocks = annotator.added_blocks + else: + all_blocks = annotator.annotated + for block in list(all_blocks): + for i, instr in enumerate(block.operations): + if not isinstance(instr, (op.simple_call, op.call_args)): + continue + v_func = instr.args[0] + s_func = annotator.annotation(v_func) + if not hasattr(s_func, 'needs_sandboxing'): + continue + key = ('sandboxing', s_func.const) + if key not in bk.emulated_pbc_calls: + params_s = s_func.args_s + s_result = s_func.s_result + from rpython.translator.sandbox.rsandbox import make_sandbox_trampoline + sandbox_trampoline = make_sandbox_trampoline( + s_func.name, params_s, s_result) + sandbox_trampoline._signature_ = [SomeTuple(items=params_s)], s_result + bk.emulate_pbc_call(key, bk.immutablevalue(sandbox_trampoline), params_s) + else: + s_trampoline = bk.emulated_pbc_calls[key][0] + sandbox_trampoline = s_trampoline.const + new = instr.replace({instr.args[0]: Constant(sandbox_trampoline)}) + block.operations[i] = new diff -Nru pypy-4.0.1+dfsg/rpython/annotator/signature.py pypy-5.0.1+dfsg/rpython/annotator/signature.py --- pypy-4.0.1+dfsg/rpython/annotator/signature.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/annotator/signature.py 2016-03-19 16:40:12.000000000 +0000 @@ -100,6 +100,7 @@ self.argtypes = argtypes def __call__(self, funcdesc, inputcells): + from rpython.rlib.objectmodel import NOT_CONSTANT from rpython.rtyper.lltypesystem import lltype args_s = [] from rpython.annotator import model as annmodel @@ -115,6 +116,9 @@ args_s.append(s_input) elif argtype is None: args_s.append(inputcells[i]) # no change + elif argtype is NOT_CONSTANT: + from rpython.annotator.model import not_const + args_s.append(not_const(inputcells[i])) else: args_s.append(annotation(argtype, bookkeeper=funcdesc.bookkeeper)) if len(inputcells) != len(args_s): diff -Nru pypy-4.0.1+dfsg/rpython/annotator/specialize.py pypy-5.0.1+dfsg/rpython/annotator/specialize.py --- pypy-4.0.1+dfsg/rpython/annotator/specialize.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/annotator/specialize.py 2016-03-19 16:40:12.000000000 +0000 @@ -317,18 +317,6 @@ yield (value,) + tuple_tail -def make_constgraphbuilder(n, v=None, factory=None, srcmodule=None): - def constgraphbuilder(translator, ignore): - args = ','.join(["arg%d" % i for i in range(n)]) - if factory is not None: - computed_v = factory() - else: - computed_v = v - miniglobals = {'v': computed_v, '__name__': srcmodule} - exec py.code.Source("constf = lambda %s: v" % args).compile() in miniglobals - return translator.buildflowgraph(miniglobals['constf']) - return constgraphbuilder - def maybe_star_args(funcdesc, key, args_s): args_s, key1, builder = flatten_star_args(funcdesc, args_s) if key1 is not None: diff -Nru pypy-4.0.1+dfsg/rpython/annotator/test/test_annrpython.py pypy-5.0.1+dfsg/rpython/annotator/test/test_annrpython.py --- pypy-4.0.1+dfsg/rpython/annotator/test/test_annrpython.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/annotator/test/test_annrpython.py 2016-03-19 16:40:12.000000000 +0000 @@ -41,8 +41,8 @@ assert isinstance(s_dict, annmodel.SomeDict) return s_dict.dictdef.dictvalue.s_value -def somedict(s_key, s_value): - return annmodel.SomeDict(DictDef(None, s_key, s_value)) +def somedict(annotator, s_key, s_value): + return annmodel.SomeDict(DictDef(annotator.bookkeeper, s_key, s_value)) class TestAnnotateTestCase: @@ -586,7 +586,7 @@ def test_simple_iter_dict(self): a = self.RPythonAnnotator() - t = somedict(annmodel.SomeInteger(), annmodel.SomeInteger()) + t = somedict(a, annmodel.SomeInteger(), annmodel.SomeInteger()) s = a.build_types(snippet.simple_iter, [t]) assert isinstance(s, annmodel.SomeIterator) @@ -602,7 +602,7 @@ def test_dict_copy(self): a = self.RPythonAnnotator() - t = somedict(annmodel.SomeInteger(), annmodel.SomeInteger()) + t = somedict(a, annmodel.SomeInteger(), annmodel.SomeInteger()) s = a.build_types(snippet.dict_copy, [t]) assert isinstance(dictkey(s), annmodel.SomeInteger) assert isinstance(dictvalue(s), annmodel.SomeInteger) @@ -698,6 +698,56 @@ s = a.build_types(snippet.exc_deduction_our_excs_plus_others, []) assert isinstance(s, annmodel.SomeInteger) + def test_complex_exception_deduction(self): + class InternalError(Exception): + def __init__(self, msg): + self.msg = msg + + class AppError(Exception): + def __init__(self, msg): + self.msg = msg + def apperror(msg): + return AppError(msg) + + def f(string): + if not string: + raise InternalError('Empty string') + return string, None + def cleanup(): + pass + + def g(string): + try: + try: + string, _ = f(string) + except ZeroDivisionError: + raise apperror('ZeroDivisionError') + try: + result, _ = f(string) + finally: + cleanup() + except InternalError as e: + raise apperror(e.msg) + return result + + a = self.RPythonAnnotator() + s_result = a.build_types(g, [str]) + assert isinstance(s_result, annmodel.SomeString) + + def test_method_exception_specialization(self): + def f(l): + try: + return l.pop() + except Exception: + raise + a = self.RPythonAnnotator() + s = a.build_types(f, [[int]]) + graph = graphof(a, f) + etype, evalue = graph.exceptblock.inputargs + assert evalue.annotation.classdefs == { + a.bookkeeper.getuniqueclassdef(IndexError)} + assert etype.annotation.const == IndexError + def test_operation_always_raising(self): def operation_always_raising(n): lst = [] @@ -1123,6 +1173,13 @@ s = a.build_types(g, [int]) assert s.const == True + def test_isinstance_basic(self): + def f(): + return isinstance(IndexError(), type) + a = self.RPythonAnnotator() + s = a.build_types(f, []) + assert s.const == False + def test_alloc_like(self): class Base(object): pass @@ -1373,14 +1430,14 @@ except KeyError: raise a = self.RPythonAnnotator() - a.build_types(f, [somedict(annmodel.s_Int, annmodel.s_Int)]) + a.build_types(f, [somedict(a, annmodel.s_Int, annmodel.s_Int)]) fg = graphof(a, f) et, ev = fg.exceptblock.inputargs - t = annmodel.SomeType() + t = annmodel.SomeTypeOf([ev]) t.const = KeyError - t.is_type_of = [ev] - assert a.binding(et) == t - assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(KeyError) + assert et.annotation == t + s_ev = ev.annotation + assert s_ev == a.bookkeeper.new_exception([KeyError]) def test_reraiseAnything(self): def f(dic): @@ -1389,14 +1446,14 @@ except: raise a = self.RPythonAnnotator() - a.build_types(f, [somedict(annmodel.s_Int, annmodel.s_Int)]) + a.build_types(f, [somedict(a, annmodel.s_Int, annmodel.s_Int)]) fg = graphof(a, f) et, ev = fg.exceptblock.inputargs - t = annmodel.SomeType() - t.is_type_of = [ev] - t.const = KeyError # IndexError ignored because 'dic' is a dict - assert a.binding(et) == t - assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(KeyError) + t = annmodel.SomeTypeOf([ev]) + t.const = KeyError # IndexError ignored because 'dic' is a dict + assert et.annotation == t + s_ev = ev.annotation + assert s_ev == a.bookkeeper.new_exception([KeyError]) def test_exception_mixing(self): def h(): @@ -1427,10 +1484,11 @@ a.build_types(f, [int, somelist(annmodel.s_Int)]) fg = graphof(a, f) et, ev = fg.exceptblock.inputargs - t = annmodel.SomeType() - t.is_type_of = [ev] - assert a.binding(et) == t - assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(Exception) + t = annmodel.SomeTypeOf([ev]) + assert et.annotation == t + s_ev = ev.annotation + assert (isinstance(s_ev, annmodel.SomeInstance) and + s_ev.classdef == a.bookkeeper.getuniqueclassdef(Exception)) def test_try_except_raise_finally1(self): def h(): pass @@ -1449,10 +1507,11 @@ a.build_types(f, []) fg = graphof(a, f) et, ev = fg.exceptblock.inputargs - t = annmodel.SomeType() - t.is_type_of = [ev] - assert a.binding(et) == t - assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(Exception) + t = annmodel.SomeTypeOf([ev]) + assert et.annotation == t + s_ev = ev.annotation + assert (isinstance(s_ev, annmodel.SomeInstance) and + s_ev.classdef == a.bookkeeper.getuniqueclassdef(Exception)) def test_inplace_div(self): def f(n): @@ -3457,6 +3516,32 @@ s = a.build_types(f, [unicode]) assert isinstance(s, annmodel.SomeUnicodeString) + def test_extended_slice(self): + a = self.RPythonAnnotator() + def f(start, end, step): + return [1, 2, 3][start:end:step] + with py.test.raises(AnnotatorError): + a.build_types(f, [int, int, int]) + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True)]) + def f(x): + return x[::-1] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) + def f(x): + return x[::2] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) + def f(x): + return x[1:2:1] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) def test_negative_slice(self): def f(s, e): @@ -3503,7 +3588,7 @@ a = self.RPythonAnnotator() s = a.build_types(f, []) assert isinstance(s, annmodel.SomeList) - assert not s.listdef.listitem.resized + assert s.listdef.listitem.resized assert not s.listdef.listitem.immutable assert s.listdef.listitem.mutated diff -Nru pypy-4.0.1+dfsg/rpython/annotator/test/test_model.py pypy-5.0.1+dfsg/rpython/annotator/test/test_model.py --- pypy-4.0.1+dfsg/rpython/annotator/test/test_model.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/annotator/test/test_model.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,8 +1,17 @@ -import py +import pytest +from rpython.flowspace.model import Variable +from rpython.flowspace.operation import op +from rpython.translator.translator import TranslationContext from rpython.annotator.model import * +from rpython.annotator.annrpython import BlockedInference from rpython.annotator.listdef import ListDef -from rpython.translator.translator import TranslationContext +from rpython.annotator import unaryop, binaryop # for side-effects + +@pytest.fixture() +def annotator(): + t = TranslationContext() + return t.buildannotator() listdef1 = ListDef(None, SomeTuple([SomeInteger(nonneg=True), SomeString()])) @@ -100,19 +109,21 @@ class AAA(object): pass -def test_blocked_inference1(): +def test_blocked_inference1(annotator): def blocked_inference(): return AAA().m() - py.test.raises(AnnotatorError, compile_function, blocked_inference) + with pytest.raises(AnnotatorError): + annotator.build_types(blocked_inference, []) -def test_blocked_inference2(): +def test_blocked_inference2(annotator): def blocked_inference(): a = AAA() b = a.x return b - py.test.raises(AnnotatorError, compile_function, blocked_inference) + with pytest.raises(AnnotatorError): + annotator.build_types(blocked_inference, []) def test_not_const(): @@ -129,3 +140,86 @@ assert s.no_nul is True s = SomeChar().nonnulify() assert s.no_nul is True + +def test_SomeException_union(annotator): + bk = annotator.bookkeeper + someinst = lambda cls, **kw: SomeInstance(bk.getuniqueclassdef(cls), **kw) + s_inst = someinst(Exception) + s_exc = bk.new_exception([ValueError, IndexError]) + assert unionof(s_exc, s_inst) == s_inst + assert unionof(s_inst, s_exc) == s_inst + s_nullable = unionof(s_None, bk.new_exception([ValueError])) + assert isinstance(s_nullable, SomeInstance) + assert s_nullable.can_be_None + s_exc1 = bk.new_exception([ValueError]) + s_exc2 = bk.new_exception([IndexError]) + unionof(s_exc1, s_exc2) == unionof(s_exc2, s_exc1) + +def contains_s(s_a, s_b): + if s_b is None: + return True + elif s_a is None: + return False + else: + return s_a.contains(s_b) + +def annotate_op(ann, hlop, args_s): + for v_arg, s_arg in zip(hlop.args, args_s): + ann.setbinding(v_arg, s_arg) + with ann.bookkeeper.at_position(None): + try: + ann.consider_op(hlop) + except BlockedInference: + # BlockedInference only stops annotation along the normal path, + # but not along the exceptional one. + pass + return hlop.result.annotation, ann.get_exception(hlop) + +def test_generalize_getitem_dict(annotator): + bk = annotator.bookkeeper + hlop = op.getitem(Variable(), Variable()) + s_int = SomeInteger() + with bk.at_position(None): + s_empty_dict = bk.newdict() + s_value, s_exc = annotate_op(annotator, hlop, [s_None, s_int]) + s_value2, s_exc2 = annotate_op(annotator, hlop, [s_empty_dict, s_int]) + assert contains_s(s_value2, s_value) + assert contains_s(s_exc2, s_exc) + +def test_generalize_getitem_list(annotator): + bk = annotator.bookkeeper + hlop = op.getitem(Variable(), Variable()) + s_int = SomeInteger() + with bk.at_position(None): + s_empty_list = bk.newlist() + s_value, s_exc = annotate_op(annotator, hlop, [s_None, s_int]) + s_value2, s_exc2 = annotate_op(annotator, hlop, [s_empty_list, s_int]) + assert contains_s(s_value2, s_value) + assert contains_s(s_exc2, s_exc) + +def test_generalize_getitem_string(annotator): + hlop = op.getitem(Variable(), Variable()) + s_int = SomeInteger() + s_str = SomeString(can_be_None=True) + s_value, s_exc = annotate_op(annotator, hlop, [s_None, s_int]) + s_value2, s_exc2 = annotate_op(annotator, hlop, [s_str, s_int]) + assert contains_s(s_value2, s_value) + assert contains_s(s_exc2, s_exc) + +def test_generalize_string_concat(annotator): + hlop = op.add(Variable(), Variable()) + s_str = SomeString(can_be_None=True) + s_value, s_exc = annotate_op(annotator, hlop, [s_None, s_str]) + s_value2, s_exc2 = annotate_op(annotator, hlop, [s_str, s_str]) + assert contains_s(s_value2, s_value) + assert contains_s(s_exc2, s_exc) + +def test_getitem_dict(annotator): + bk = annotator.bookkeeper + hlop = op.getitem(Variable(), Variable()) + with bk.at_position(None): + s_dict = bk.newdict() + s_dict.dictdef.generalize_key(SomeString()) + s_dict.dictdef.generalize_value(SomeInteger()) + s_result, _ = annotate_op(annotator, hlop, [s_dict, SomeString()]) + assert s_result == SomeInteger() diff -Nru pypy-4.0.1+dfsg/rpython/annotator/unaryop.py pypy-5.0.1+dfsg/rpython/annotator/unaryop.py --- pypy-4.0.1+dfsg/rpython/annotator/unaryop.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/annotator/unaryop.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,9 +1,10 @@ """ Unary operations on SomeValues. """ - from __future__ import absolute_import +from collections import defaultdict + from rpython.tool.pairtype import pair from rpython.flowspace.operation import op from rpython.flowspace.model import const, Constant @@ -11,14 +12,14 @@ from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeBuiltinMethod, - SomeFloat, SomeIterator, SomePBC, SomeNone, SomeType, s_ImpossibleValue, + SomeFloat, SomeIterator, SomePBC, SomeNone, SomeTypeOf, s_ImpossibleValue, s_Bool, s_None, s_Int, unionof, add_knowntypedata, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue -from rpython.annotator import builtin from rpython.annotator.binaryop import _clone ## XXX where to put this? from rpython.annotator.binaryop import _dict_can_only_throw_keyerror from rpython.annotator.binaryop import _dict_can_only_throw_nothing +from rpython.annotator.classdesc import ClassDesc, is_primitive_type, BuiltinTypeDesc from rpython.annotator.model import AnnotatorError from rpython.annotator.argument import simple_args, complex_args @@ -26,12 +27,55 @@ if oper.dispatch == 1]) UNARY_OPERATIONS.remove('contains') + @op.type.register(SomeObject) -def type_SomeObject(annotator, arg): - r = SomeType() - r.is_type_of = [arg] +def type_SomeObject(annotator, v_arg): + return SomeTypeOf([v_arg]) + + +def our_issubclass(bk, cls1, cls2): + def toclassdesc(cls): + if isinstance(cls, ClassDesc): + return cls + elif is_primitive_type(cls): + return BuiltinTypeDesc(cls) + else: + return bk.getdesc(cls) + return toclassdesc(cls1).issubclass(toclassdesc(cls2)) + + +def s_isinstance(annotator, s_obj, s_type, variables): + if not s_type.is_constant(): + return SomeBool() + r = SomeBool() + typ = s_type.const + bk = annotator.bookkeeper + if s_obj.is_constant(): + r.const = isinstance(s_obj.const, typ) + elif our_issubclass(bk, s_obj.knowntype, typ): + if not s_obj.can_be_none(): + r.const = True + elif not our_issubclass(bk, typ, s_obj.knowntype): + r.const = False + elif s_obj.knowntype == int and typ == bool: # xxx this will explode in case of generalisation + # from bool to int, notice that isinstance( , bool|int) + # is quite border case for RPython + r.const = False + for v in variables: + assert v.annotation == s_obj + knowntypedata = defaultdict(dict) + if not hasattr(typ, '_freeze_') and isinstance(s_type, SomePBC): + add_knowntypedata(knowntypedata, True, variables, bk.valueoftype(typ)) + r.set_knowntypedata(knowntypedata) return r +@op.isinstance.register(SomeObject) +def isinstance_SomeObject(annotator, v_obj, v_cls): + s_obj = annotator.annotation(v_obj) + s_cls = annotator.annotation(v_cls) + return s_isinstance(annotator, s_obj, s_cls, variables=[v_obj]) + + @op.bool.register(SomeObject) def bool_SomeObject(annotator, obj): r = SomeBool() @@ -39,7 +83,7 @@ s_nonnone_obj = annotator.annotation(obj) if s_nonnone_obj.can_be_none(): s_nonnone_obj = s_nonnone_obj.nonnoneify() - knowntypedata = {} + knowntypedata = defaultdict(dict) add_knowntypedata(knowntypedata, True, [obj], s_nonnone_obj) r.set_knowntypedata(knowntypedata) return r @@ -69,8 +113,9 @@ @op.simple_call.register(SomeObject) def simple_call_SomeObject(annotator, func, *args): - return annotator.annotation(func).call( - simple_args([annotator.annotation(arg) for arg in args])) + s_func = annotator.annotation(func) + argspec = simple_args([annotator.annotation(arg) for arg in args]) + return s_func.call(argspec) @op.call_args.register_transform(SomeObject) def transform_varargs(annotator, v_func, v_shape, *data_v): @@ -99,17 +144,16 @@ callspec = complex_args([annotator.annotation(v_arg) for v_arg in args_v]) return annotator.annotation(func).call(callspec) -class __extend__(SomeObject): +@op.issubtype.register(SomeObject) +def issubtype(annotator, v_type, v_cls): + s_type = v_type.annotation + s_cls = annotator.annotation(v_cls) + if s_type.is_constant() and s_cls.is_constant(): + return annotator.bookkeeper.immutablevalue( + issubclass(s_type.const, s_cls.const)) + return s_Bool - def issubtype(self, s_cls): - if hasattr(self, 'is_type_of'): - vars = self.is_type_of - annotator = getbookkeeper().annotator - return builtin.builtin_isinstance(annotator.binding(vars[0]), - s_cls, vars) - if self.is_constant() and s_cls.is_constant(): - return immutablevalue(issubclass(self.const, s_cls.const)) - return s_Bool +class __extend__(SomeObject): def len(self): return SomeInteger(nonneg=True) @@ -291,7 +335,7 @@ return SomeIterator(self) iter.can_only_throw = [] - def getanyitem(self): + def getanyitem(self, position): return unionof(*self.items) def getslice(self, s_start, s_stop): @@ -316,7 +360,7 @@ def method_extend(self, s_iterable): self.listdef.resize() if isinstance(s_iterable, SomeList): # unify the two lists - self.listdef.agree(s_iterable.listdef) + self.listdef.agree(getbookkeeper(), s_iterable.listdef) else: s_iter = s_iterable.iter() self.method_append(s_iter.next()) @@ -332,8 +376,9 @@ self.listdef.generalize(s_value) def method_pop(self, s_index=None): + position = getbookkeeper().position_key self.listdef.resize() - return self.listdef.read_item() + return self.listdef.read_item(position) method_pop.can_only_throw = [IndexError] def method_index(self, s_value): @@ -341,7 +386,8 @@ return SomeInteger(nonneg=True) def len(self): - s_item = self.listdef.read_item() + position = getbookkeeper().position_key + s_item = self.listdef.read_item(position) if isinstance(s_item, SomeImpossibleValue): return immutablevalue(0) return SomeObject.len(self) @@ -350,8 +396,8 @@ return SomeIterator(self) iter.can_only_throw = [] - def getanyitem(self): - return self.listdef.read_item() + def getanyitem(self, position): + return self.listdef.read_item(position) def hint(self, *args_s): hints = args_s[-1].const @@ -364,20 +410,21 @@ self.listdef.resize() self.listdef.listitem.hint_maxlength = True elif 'fence' in hints: - self = self.listdef.offspring() + self = self.listdef.offspring(getbookkeeper()) return self def getslice(self, s_start, s_stop): + bk = getbookkeeper() check_negative_slice(s_start, s_stop) - return self.listdef.offspring() + return self.listdef.offspring(bk) def setslice(self, s_start, s_stop, s_iterable): check_negative_slice(s_start, s_stop) if not isinstance(s_iterable, SomeList): raise Exception("list[start:stop] = x: x must be a list") self.listdef.mutate() - self.listdef.agree(s_iterable.listdef) - # note that setslice is not allowed to resize a list in RPython + self.listdef.agree(getbookkeeper(), s_iterable.listdef) + self.listdef.resize() def delslice(self, s_start, s_stop): check_negative_slice(s_start, s_stop) @@ -392,9 +439,9 @@ raise AnnotatorError("%s: not proven to have non-negative stop" % error) -def dict_contains(s_dct, s_element): +def dict_contains(s_dct, s_element, position): s_dct.dictdef.generalize_key(s_element) - if s_dct._is_empty(): + if s_dct._is_empty(position): s_bool = SomeBool() s_bool.const = False return s_bool @@ -402,20 +449,23 @@ @op.contains.register(SomeDict) def contains_SomeDict(annotator, dct, element): + position = annotator.bookkeeper.position_key return dict_contains(annotator.annotation(dct), - annotator.annotation(element)) + annotator.annotation(element), + position) contains_SomeDict.can_only_throw = _dict_can_only_throw_nothing class __extend__(SomeDict): - def _is_empty(self): - s_key = self.dictdef.read_key() - s_value = self.dictdef.read_value() + def _is_empty(self, position): + s_key = self.dictdef.read_key(position) + s_value = self.dictdef.read_value(position) return (isinstance(s_key, SomeImpossibleValue) or isinstance(s_value, SomeImpossibleValue)) def len(self): - if self._is_empty(): + position = getbookkeeper().position_key + if self._is_empty(position): return immutablevalue(0) return SomeObject.len(self) @@ -423,14 +473,14 @@ return SomeIterator(self) iter.can_only_throw = [] - def getanyitem(self, variant='keys'): + def getanyitem(self, position, variant='keys'): if variant == 'keys': - return self.dictdef.read_key() + return self.dictdef.read_key(position) elif variant == 'values': - return self.dictdef.read_value() + return self.dictdef.read_value(position) elif variant == 'items' or variant == 'items_with_hash': - s_key = self.dictdef.read_key() - s_value = self.dictdef.read_value() + s_key = self.dictdef.read_key(position) + s_value = self.dictdef.read_value(position) if (isinstance(s_key, SomeImpossibleValue) or isinstance(s_value, SomeImpossibleValue)): return s_ImpossibleValue @@ -439,16 +489,17 @@ elif variant == 'items_with_hash': return SomeTuple((s_key, s_value, s_Int)) elif variant == 'keys_with_hash': - s_key = self.dictdef.read_key() + s_key = self.dictdef.read_key(position) if isinstance(s_key, SomeImpossibleValue): return s_ImpossibleValue return SomeTuple((s_key, s_Int)) raise ValueError(variant) def method_get(self, key, dfl): + position = getbookkeeper().position_key self.dictdef.generalize_key(key) self.dictdef.generalize_value(dfl) - return self.dictdef.read_value() + return self.dictdef.read_value(position) method_setdefault = method_get @@ -464,13 +515,16 @@ pass def method_keys(self): - return getbookkeeper().newlist(self.dictdef.read_key()) + bk = getbookkeeper() + return bk.newlist(self.dictdef.read_key(bk.position_key)) def method_values(self): - return getbookkeeper().newlist(self.dictdef.read_value()) + bk = getbookkeeper() + return bk.newlist(self.dictdef.read_value(bk.position_key)) def method_items(self): - return getbookkeeper().newlist(self.getanyitem('items')) + bk = getbookkeeper() + return bk.newlist(self.getanyitem(bk.position_key, variant='items')) def method_iterkeys(self): return SomeIterator(self, 'keys') @@ -491,16 +545,19 @@ pass def method_popitem(self): - return self.getanyitem('items') + position = getbookkeeper().position_key + return self.getanyitem(position, variant='items') def method_pop(self, s_key, s_dfl=None): self.dictdef.generalize_key(s_key) if s_dfl is not None: self.dictdef.generalize_value(s_dfl) - return self.dictdef.read_value() + position = getbookkeeper().position_key + return self.dictdef.read_value(position) def method_contains_with_hash(self, s_key, s_hash): - return dict_contains(self, s_key) + position = getbookkeeper().position_key + return dict_contains(self, s_key, position) method_contains_with_hash.can_only_throw = _dict_can_only_throw_nothing def method_setitem_with_hash(self, s_key, s_hash, s_value): @@ -508,7 +565,10 @@ method_setitem_with_hash.can_only_throw = _dict_can_only_throw_nothing def method_getitem_with_hash(self, s_key, s_hash): - return pair(self, s_key).getitem() + # XXX: copy of binaryop.getitem_SomeDict + self.dictdef.generalize_key(s_key) + position = getbookkeeper().position_key + return self.dictdef.read_value(position) method_getitem_with_hash.can_only_throw = _dict_can_only_throw_keyerror def method_delitem_with_hash(self, s_key, s_hash): @@ -520,7 +580,7 @@ def contains_String(annotator, string, char): if annotator.annotation(char).is_constant() and annotator.annotation(char).const == "\0": r = SomeBool() - knowntypedata = {} + knowntypedata = defaultdict(dict) add_knowntypedata(knowntypedata, False, [string], annotator.annotation(string).nonnulify()) r.set_knowntypedata(knowntypedata) @@ -573,7 +633,8 @@ def method_join(self, s_list): if s_None.contains(s_list): return SomeImpossibleValue() - s_item = s_list.listdef.read_item() + position = getbookkeeper().position_key + s_item = s_list.listdef.read_item(position) if s_None.contains(s_item): if isinstance(self, SomeUnicodeString): return immutablevalue(u"") @@ -585,7 +646,7 @@ return SomeIterator(self) iter.can_only_throw = [] - def getanyitem(self): + def getanyitem(self, position): return self.basecharclass() def method_split(self, patt, max=-1): @@ -626,7 +687,7 @@ enc = s_enc.const if enc not in ('ascii', 'latin-1', 'utf-8'): raise AnnotatorError("Encoding %s not supported for unicode" % (enc,)) - return SomeString() + return SomeString(no_nul=self.no_nul) method_encode.can_only_throw = [UnicodeEncodeError] @@ -659,7 +720,7 @@ enc = s_enc.const if enc not in ('ascii', 'latin-1', 'utf-8'): raise AnnotatorError("Encoding %s not supported for strings" % (enc,)) - return SomeUnicodeString() + return SomeUnicodeString(no_nul=self.no_nul) method_decode.can_only_throw = [UnicodeDecodeError] class __extend__(SomeChar, SomeUnicodeCodePoint): @@ -710,15 +771,16 @@ return can_throw def next(self): + position = getbookkeeper().position_key if s_None.contains(self.s_container): return s_ImpossibleValue # so far if self.variant == ("enumerate",): - s_item = self.s_container.getanyitem() + s_item = self.s_container.getanyitem(position) return SomeTuple((SomeInteger(nonneg=True), s_item)) variant = self.variant if variant == ("reversed",): variant = () - return self.s_container.getanyitem(*variant) + return self.s_container.getanyitem(position, *variant) next.can_only_throw = _can_only_throw method_next = next @@ -913,6 +975,12 @@ # really crash translated code). It can be generalized later. return SomeImpossibleValue() +@op.issubtype.register(SomeTypeOf) +def issubtype(annotator, v_type, v_cls): + args_v = v_type.annotation.is_type_of + return s_isinstance(annotator, args_v[0].annotation, + annotator.annotation(v_cls), args_v) + #_________________________________________ # weakrefs diff -Nru pypy-4.0.1+dfsg/rpython/config/translationoption.py pypy-5.0.1+dfsg/rpython/config/translationoption.py --- pypy-4.0.1+dfsg/rpython/config/translationoption.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/config/translationoption.py 2016-03-19 16:40:12.000000000 +0000 @@ -192,6 +192,8 @@ "If true, makes an lldebug0 build", default=False, cmdline="--lldebug0"), StrOption("icon", "Path to the (Windows) icon to use for the executable"), + StrOption("libname", + "Windows: name and possibly location of the lib file to create"), OptionDescription("backendopt", "Backend Optimization Options", [ # control inlining diff -Nru pypy-4.0.1+dfsg/rpython/doc/conf.py pypy-5.0.1+dfsg/rpython/doc/conf.py --- pypy-4.0.1+dfsg/rpython/doc/conf.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/doc/conf.py 2016-03-19 16:40:12.000000000 +0000 @@ -59,7 +59,7 @@ # General information about the project. project = u'RPython' -copyright = u'2015, The PyPy Project' +copyright = u'2016, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -121,7 +121,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'RPython Documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None diff -Nru pypy-4.0.1+dfsg/rpython/doc/rlib.rst pypy-5.0.1+dfsg/rpython/doc/rlib.rst --- pypy-4.0.1+dfsg/rpython/doc/rlib.rst 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/doc/rlib.rst 2016-03-19 16:40:12.000000000 +0000 @@ -52,7 +52,7 @@ backend emits code, the function is called to determine the value. ``CDefinedIntSymbolic``: - Instances of ``ComputedIntSymbolic`` are also treated like integers of + Instances of ``CDefinedIntSymbolic`` are also treated like integers of unknown value by the annotator. When C code is emitted they will be represented by the attribute ``expr`` of the symbolic (which is also the first argument of the constructor). diff -Nru pypy-4.0.1+dfsg/rpython/doc/translation.rst pypy-5.0.1+dfsg/rpython/doc/translation.rst --- pypy-4.0.1+dfsg/rpython/doc/translation.rst 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/doc/translation.rst 2016-03-19 16:40:12.000000000 +0000 @@ -33,7 +33,7 @@ The RPython translation toolchain never sees Python source code or syntax trees, but rather starts with the *code objects* that define the behaviour of the function objects one gives it as input. The -`bytecode evaluator`_ and the :ref:`flow graph builder` work through these +:ref:`flow graph builder` works through these code objects using `abstract interpretation`_ to produce a control flow graph (one per function): yet another representation of the source program, but one which is suitable for applying type inference @@ -85,7 +85,6 @@ .. _PDF color version: _static/translation.pdf -.. _bytecode evaluator: interpreter.html .. _abstract interpretation: http://en.wikipedia.org/wiki/Abstract_interpretation diff -Nru pypy-4.0.1+dfsg/rpython/flowspace/flowcontext.py pypy-5.0.1+dfsg/rpython/flowspace/flowcontext.py --- pypy-4.0.1+dfsg/rpython/flowspace/flowcontext.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/flowspace/flowcontext.py 2016-03-19 16:40:12.000000000 +0000 @@ -597,7 +597,7 @@ Returns an FSException object whose w_value is an instance of w_type. """ - w_is_type = op.simple_call(const(isinstance), w_arg1, const(type)).eval(self) + w_is_type = op.isinstance(w_arg1, const(type)).eval(self) if self.guessbool(w_is_type): # this is for all cases of the form (Class, something) if self.guessbool(op.is_(w_arg2, w_None).eval(self)): diff -Nru pypy-4.0.1+dfsg/rpython/flowspace/generator.py pypy-5.0.1+dfsg/rpython/flowspace/generator.py --- pypy-4.0.1+dfsg/rpython/flowspace/generator.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/flowspace/generator.py 2016-03-19 16:40:12.000000000 +0000 @@ -156,8 +156,7 @@ regular_entry_block = Block([Variable('entry')]) block = regular_entry_block for Resume in mappings: - op_check = op.simple_call( - const(isinstance), block.inputargs[0], const(Resume)) + op_check = op.isinstance(block.inputargs[0], const(Resume)) block.operations.append(op_check) block.exitswitch = op_check.result link1 = Link([block.inputargs[0]], Resume.block) diff -Nru pypy-4.0.1+dfsg/rpython/flowspace/objspace.py pypy-5.0.1+dfsg/rpython/flowspace/objspace.py --- pypy-4.0.1+dfsg/rpython/flowspace/objspace.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/flowspace/objspace.py 2016-03-19 16:40:12.000000000 +0000 @@ -23,7 +23,7 @@ if func.func_code.co_cellvars: raise ValueError( """RPython functions cannot create closures -Possible casues: +Possible causes: Function is inner function Function uses generator expressions Lambda expressions diff -Nru pypy-4.0.1+dfsg/rpython/flowspace/operation.py pypy-5.0.1+dfsg/rpython/flowspace/operation.py --- pypy-4.0.1+dfsg/rpython/flowspace/operation.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/flowspace/operation.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,5 +1,5 @@ """ -This module defines all the SpaceOeprations used in rpython.flowspace. +This module defines all the SpaceOperations used in rpython.flowspace. """ import __builtin__ @@ -196,21 +196,6 @@ return cls._dispatch(type(s_arg)) @classmethod - def get_specialization(cls, s_arg, *_ignored): - try: - impl = getattr(s_arg, cls.opname) - - def specialized(annotator, arg, *other_args): - return impl(*[annotator.annotation(x) for x in other_args]) - try: - specialized.can_only_throw = impl.can_only_throw - except AttributeError: - pass - return specialized - except AttributeError: - return cls._dispatch(type(s_arg)) - - @classmethod def register_transform(cls, Some_cls): def decorator(func): cls._transform[Some_cls] = func @@ -413,6 +398,7 @@ add_operator('id', 1, dispatch=1, pyfunc=id) add_operator('type', 1, dispatch=1, pyfunc=new_style_type, pure=True) add_operator('issubtype', 2, dispatch=1, pyfunc=issubclass, pure=True) # not for old-style classes +add_operator('isinstance', 2, dispatch=1, pyfunc=isinstance, pure=True) add_operator('repr', 1, dispatch=1, pyfunc=repr, pure=True) add_operator('str', 1, dispatch=1, pyfunc=str, pure=True) add_operator('format', 2, pyfunc=unsupported) @@ -522,6 +508,14 @@ *[annotator.annotation(arg) for arg in self.args]) +class NewSlice(HLOperation): + opname = 'newslice' + canraise = [] + + def consider(self, annotator): + raise AnnotatorError("Cannot use extended slicing in rpython") + + class Pow(PureOperation): opname = 'pow' arity = 3 diff -Nru pypy-4.0.1+dfsg/rpython/flowspace/specialcase.py pypy-5.0.1+dfsg/rpython/flowspace/specialcase.py --- pypy-4.0.1+dfsg/rpython/flowspace/specialcase.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/flowspace/specialcase.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,5 +1,5 @@ import os -from rpython.flowspace.model import Constant, const +from rpython.flowspace.model import Constant SPECIAL_CASES = {} @@ -40,12 +40,6 @@ "pytest.ini from the root of the PyPy repository into your " "own project.") -@register_flow_sc(isinstance) -def sc_isinstance(ctx, w_instance, w_type): - if w_instance.foldable() and w_type.foldable(): - return const(isinstance(w_instance.value, w_type.value)) - return ctx.appcall(isinstance, w_instance, w_type) - @register_flow_sc(getattr) def sc_getattr(ctx, w_obj, w_index, w_default=None): if w_default is not None: diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/arm/assembler.py pypy-5.0.1+dfsg/rpython/jit/backend/arm/assembler.py --- pypy-4.0.1+dfsg/rpython/jit/backend/arm/assembler.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/arm/assembler.py 2016-03-19 16:40:12.000000000 +0000 @@ -956,6 +956,8 @@ regalloc.possibly_free_vars_for_op(op) regalloc.free_temp_vars() regalloc._check_invariants() + if not we_are_translated(): + self.mc.BKPT() self.mc.mark_op(None) # end of the loop regalloc.operations = None diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/arm/opassembler.py pypy-5.0.1+dfsg/rpython/jit/backend/arm/opassembler.py --- pypy-4.0.1+dfsg/rpython/jit/backend/arm/opassembler.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/arm/opassembler.py 2016-03-19 16:40:15.000000000 +0000 @@ -19,7 +19,6 @@ from rpython.jit.backend.arm.locations import imm, RawSPStackLocation from rpython.jit.backend.llsupport import symbolic from rpython.jit.backend.llsupport.gcmap import allocate_gcmap -from rpython.jit.backend.llsupport.descr import InteriorFieldDescr from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler from rpython.jit.backend.llsupport.regalloc import get_scale from rpython.jit.metainterp.history import (AbstractFailDescr, ConstInt, @@ -655,31 +654,24 @@ pmc.B_offs(offset, c.EQ) return fcond - def emit_op_setfield_gc(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs, size = arglocs - scale = get_scale(size.value) - self._write_to_mem(value_loc, base_loc, - ofs, imm(scale), fcond) - return fcond - - emit_op_setfield_raw = emit_op_setfield_gc - emit_op_zero_ptr_field = emit_op_setfield_gc - - def _genop_getfield(self, op, arglocs, regalloc, fcond): - base_loc, ofs, res, size = arglocs - signed = op.getdescr().is_field_signed() - scale = get_scale(size.value) - self._load_from_mem(res, base_loc, ofs, imm(scale), signed, fcond) - return fcond - - emit_op_getfield_gc_i = _genop_getfield - emit_op_getfield_gc_r = _genop_getfield - emit_op_getfield_gc_f = _genop_getfield - emit_op_getfield_gc_pure_i = _genop_getfield - emit_op_getfield_gc_pure_r = _genop_getfield - emit_op_getfield_gc_pure_f = _genop_getfield - emit_op_getfield_raw_i = _genop_getfield - emit_op_getfield_raw_f = _genop_getfield + def emit_op_gc_store(self, op, arglocs, regalloc, fcond): + value_loc, base_loc, ofs_loc, size_loc = arglocs + scale = get_scale(size_loc.value) + self._write_to_mem(value_loc, base_loc, ofs_loc, imm(scale), fcond) + return fcond + + def _emit_op_gc_load(self, op, arglocs, regalloc, fcond): + base_loc, ofs_loc, res_loc, nsize_loc = arglocs + nsize = nsize_loc.value + signed = (nsize < 0) + scale = get_scale(abs(nsize)) + self._load_from_mem(res_loc, base_loc, ofs_loc, imm(scale), + signed, fcond) + return fcond + + emit_op_gc_load_i = _emit_op_gc_load + emit_op_gc_load_r = _emit_op_gc_load + emit_op_gc_load_f = _emit_op_gc_load def emit_op_increment_debug_counter(self, op, arglocs, regalloc, fcond): base_loc, value_loc = arglocs @@ -688,68 +680,21 @@ self.mc.STR_ri(value_loc.value, base_loc.value, 0, cond=fcond) return fcond - def _genop_getinteriorfield(self, op, arglocs, regalloc, fcond): - (base_loc, index_loc, res_loc, - ofs_loc, ofs, itemsize, fieldsize) = arglocs - scale = get_scale(fieldsize.value) - tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) - assert not save - self.mc.gen_load_int(tmploc.value, itemsize.value) - self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) - descr = op.getdescr() - assert isinstance(descr, InteriorFieldDescr) - signed = descr.fielddescr.is_field_signed() - if ofs.value > 0: - if ofs_loc.is_imm(): - self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) - else: - self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) - ofs_loc = tmploc - self._load_from_mem(res_loc, base_loc, ofs_loc, - imm(scale), signed, fcond) - return fcond - - emit_op_getinteriorfield_gc_i = _genop_getinteriorfield - emit_op_getinteriorfield_gc_r = _genop_getinteriorfield - emit_op_getinteriorfield_gc_f = _genop_getinteriorfield - - def emit_op_setinteriorfield_gc(self, op, arglocs, regalloc, fcond): - (base_loc, index_loc, value_loc, - ofs_loc, ofs, itemsize, fieldsize) = arglocs - scale = get_scale(fieldsize.value) - tmploc, save = self.get_tmp_reg([base_loc, index_loc, value_loc, ofs_loc]) - assert not save - self.mc.gen_load_int(tmploc.value, itemsize.value) - self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) - if ofs.value > 0: - if ofs_loc.is_imm(): - self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) - else: - self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) - self._write_to_mem(value_loc, base_loc, tmploc, imm(scale), fcond) - return fcond - emit_op_setinteriorfield_raw = emit_op_setinteriorfield_gc - - def emit_op_arraylen_gc(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs = arglocs - self.load_reg(self.mc, res, base_loc, ofs.value) - return fcond - - def emit_op_setarrayitem_gc(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - if scale.value > 0: - self.mc.LSL_ri(r.ip.value, ofs_loc.value, scale.value) - ofs_loc = r.ip - + def emit_op_gc_store_indexed(self, op, arglocs, regalloc, fcond): + value_loc, base_loc, index_loc, size_loc, ofs_loc = arglocs + assert index_loc.is_core_reg() # add the base offset - if ofs.value > 0: - self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) - ofs_loc = r.ip - self._write_to_mem(value_loc, base_loc, ofs_loc, scale, fcond) + if ofs_loc.value > 0: + self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) + index_loc = r.ip + scale = get_scale(size_loc.value) + self._write_to_mem(value_loc, base_loc, index_loc, imm(scale), fcond) return fcond def _write_to_mem(self, value_loc, base_loc, ofs_loc, scale, fcond=c.AL): + # Write a value of size '1 << scale' at the address + # 'base_ofs + ofs_loc'. Note that 'scale' is not used to scale + # the offset! if scale.value == 3: assert value_loc.is_vfp_reg() # vstr only supports imm offsets @@ -789,43 +734,31 @@ else: assert 0 - emit_op_setarrayitem_raw = emit_op_setarrayitem_gc - - def emit_op_raw_store(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - self._write_to_mem(value_loc, base_loc, ofs_loc, scale, fcond) - return fcond - - def _genop_getarrayitem(self, op, arglocs, regalloc, fcond): - res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - signed = op.getdescr().is_item_signed() - - # scale the offset as required - # XXX we should try to encode the scale inside the "shift" part of LDR - if scale.value > 0: - self.mc.LSL_ri(r.ip.value, ofs_loc.value, scale.value) - ofs_loc = r.ip + def _emit_op_gc_load_indexed(self, op, arglocs, regalloc, fcond): + res_loc, base_loc, index_loc, nsize_loc, ofs_loc = arglocs + assert index_loc.is_core_reg() + nsize = nsize_loc.value + signed = (nsize < 0) # add the base offset - if ofs.value > 0: - self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) - ofs_loc = r.ip + if ofs_loc.value > 0: + self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) + index_loc = r.ip # - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) + scale = get_scale(abs(nsize)) + self._load_from_mem(res_loc, base_loc, index_loc, imm(scale), + signed, fcond) return fcond - emit_op_getarrayitem_gc_i = _genop_getarrayitem - emit_op_getarrayitem_gc_r = _genop_getarrayitem - emit_op_getarrayitem_gc_f = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_i = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_r = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_f = _genop_getarrayitem - emit_op_getarrayitem_raw_i = _genop_getarrayitem - emit_op_getarrayitem_raw_f = _genop_getarrayitem + emit_op_gc_load_indexed_i = _emit_op_gc_load_indexed + emit_op_gc_load_indexed_r = _emit_op_gc_load_indexed + emit_op_gc_load_indexed_f = _emit_op_gc_load_indexed def _load_from_mem(self, res_loc, base_loc, ofs_loc, scale, signed=False, fcond=c.AL): + # Load a value of '1 << scale' bytes, from the memory location + # 'base_loc + ofs_loc'. Note that 'scale' is not used to scale + # the offset! + # if scale.value == 3: assert res_loc.is_vfp_reg() # vldr only supports imm offsets @@ -881,51 +814,6 @@ else: assert 0 - def _genop_raw_load(self, op, arglocs, regalloc, fcond): - res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - # no base offset - assert ofs.value == 0 - signed = op.getdescr().is_item_signed() - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) - return fcond - - emit_op_raw_load_i = _genop_raw_load - emit_op_raw_load_f = _genop_raw_load - - def emit_op_strlen(self, op, arglocs, regalloc, fcond): - l0, l1, res = arglocs - if l1.is_imm(): - self.mc.LDR_ri(res.value, l0.value, l1.getint(), cond=fcond) - else: - self.mc.LDR_rr(res.value, l0.value, l1.value, cond=fcond) - return fcond - - def emit_op_strgetitem(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs_loc, basesize = arglocs - if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), - cond=fcond) - else: - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, - cond=fcond) - - self.mc.LDRB_ri(res.value, r.ip.value, basesize.value, cond=fcond) - return fcond - - def emit_op_strsetitem(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, basesize = arglocs - if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), - cond=fcond) - else: - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, - cond=fcond) - - self.mc.STRB_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - return fcond - #from ../x86/regalloc.py:928 ff. def emit_op_copystrcontent(self, op, arglocs, regalloc, fcond): assert len(arglocs) == 0 @@ -1016,35 +904,6 @@ else: raise AssertionError("bad unicode item size") - emit_op_unicodelen = emit_op_strlen - - def emit_op_unicodegetitem(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs_loc, scale, basesize, itemsize = arglocs - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond, - imm=scale.value, shifttype=shift.LSL) - if scale.value == 2: - self.mc.LDR_ri(res.value, r.ip.value, basesize.value, cond=fcond) - elif scale.value == 1: - self.mc.LDRH_ri(res.value, r.ip.value, basesize.value, cond=fcond) - else: - assert 0, itemsize.value - return fcond - - def emit_op_unicodesetitem(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, basesize, itemsize = arglocs - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond, - imm=scale.value, shifttype=shift.LSL) - if scale.value == 2: - self.mc.STR_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - elif scale.value == 1: - self.mc.STRH_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - else: - assert 0, itemsize.value - - return fcond - def store_force_descr(self, op, fail_locs, frame_depth): pos = self.mc.currpos() guard_token = self.build_guard_token(op, frame_depth, fail_locs, pos, c.AL) diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/arm/regalloc.py pypy-5.0.1+dfsg/rpython/jit/backend/arm/regalloc.py --- pypy-4.0.1+dfsg/rpython/jit/backend/arm/regalloc.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/arm/regalloc.py 2016-03-19 16:40:11.000000000 +0000 @@ -34,9 +34,6 @@ from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop from rpython.jit.codewriter.effectinfo import EffectInfo -from rpython.jit.backend.llsupport.descr import unpack_arraydescr -from rpython.jit.backend.llsupport.descr import unpack_fielddescr -from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr from rpython.rlib.rarithmetic import r_uint from rpython.jit.backend.llsupport.descr import CallDescr @@ -802,15 +799,12 @@ src_locations2, dst_locations2, vfptmploc) return [] - def prepare_op_setfield_gc(self, op, fcond): + def prepare_op_gc_store(self, op, fcond): boxes = op.getarglist() - ofs, size, sign = unpack_fielddescr(op.getdescr()) - return self._prepare_op_setfield(boxes, ofs, size) - - def _prepare_op_setfield(self, boxes, ofs, size): - a0, a1 = boxes - base_loc = self.make_sure_var_in_reg(a0, boxes) - value_loc = self.make_sure_var_in_reg(a1, boxes) + base_loc = self.make_sure_var_in_reg(boxes[0], boxes) + ofs = boxes[1].getint() + value_loc = self.make_sure_var_in_reg(boxes[2], boxes) + size = boxes[3].getint() ofs_size = default_imm_size if size < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = imm(ofs) @@ -819,19 +813,13 @@ self.assembler.load(ofs_loc, imm(ofs)) return [value_loc, base_loc, ofs_loc, imm(size)] - prepare_op_setfield_raw = prepare_op_setfield_gc - - def prepare_op_zero_ptr_field(self, op, fcond): + def _prepare_op_gc_load(self, op, fcond): a0 = op.getarg(0) ofs = op.getarg(1).getint() - return self._prepare_op_setfield([a0, ConstInt(0)], ofs, WORD) - - def _prepare_op_getfield(self, op, fcond): - a0 = op.getarg(0) - ofs, size, sign = unpack_fielddescr(op.getdescr()) + nsize = op.getarg(2).getint() # negative for "signed" base_loc = self.make_sure_var_in_reg(a0) immofs = imm(ofs) - ofs_size = default_imm_size if size < 8 else VMEM_imm_size + ofs_size = default_imm_size if abs(nsize) < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = immofs else: @@ -839,17 +827,12 @@ self.assembler.load(ofs_loc, immofs) self.possibly_free_vars_for_op(op) self.free_temp_vars() - res = self.force_allocate_reg(op) - return [base_loc, ofs_loc, res, imm(size)] + res_loc = self.force_allocate_reg(op) + return [base_loc, ofs_loc, res_loc, imm(nsize)] - prepare_op_getfield_gc_i = _prepare_op_getfield - prepare_op_getfield_gc_r = _prepare_op_getfield - prepare_op_getfield_gc_f = _prepare_op_getfield - prepare_op_getfield_raw_i = _prepare_op_getfield - prepare_op_getfield_raw_f = _prepare_op_getfield - prepare_op_getfield_gc_pure_i = _prepare_op_getfield - prepare_op_getfield_gc_pure_r = _prepare_op_getfield - prepare_op_getfield_gc_pure_f = _prepare_op_getfield + prepare_op_gc_load_i = _prepare_op_gc_load + prepare_op_gc_load_r = _prepare_op_gc_load + prepare_op_gc_load_f = _prepare_op_gc_load def prepare_op_increment_debug_counter(self, op, fcond): boxes = op.getarglist() @@ -859,188 +842,38 @@ self.free_temp_vars() return [base_loc, value_loc] - def _prepare_op_getinteriorfield(self, op, fcond): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - args = op.getarglist() - base_loc = self.make_sure_var_in_reg(op.getarg(0), args) - index_loc = self.make_sure_var_in_reg(op.getarg(1), args) - immofs = imm(ofs) - ofs_size = default_imm_size if fieldsize < 8 else VMEM_imm_size - if check_imm_arg(ofs, size=ofs_size): - ofs_loc = immofs - else: - ofs_loc = self.get_scratch_reg(INT, args) - self.assembler.load(ofs_loc, immofs) - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, imm(ofs), - imm(itemsize), imm(fieldsize)] - - prepare_op_getinteriorfield_gc_i = _prepare_op_getinteriorfield - prepare_op_getinteriorfield_gc_r = _prepare_op_getinteriorfield - prepare_op_getinteriorfield_gc_f = _prepare_op_getinteriorfield - - def prepare_op_setinteriorfield_gc(self, op, fcond): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - args = op.getarglist() - base_loc = self.make_sure_var_in_reg(op.getarg(0), args) - index_loc = self.make_sure_var_in_reg(op.getarg(1), args) - value_loc = self.make_sure_var_in_reg(op.getarg(2), args) - immofs = imm(ofs) - ofs_size = default_imm_size if fieldsize < 8 else VMEM_imm_size - if check_imm_arg(ofs, size=ofs_size): - ofs_loc = immofs - else: - ofs_loc = self.get_scratch_reg(INT, args) - self.assembler.load(ofs_loc, immofs) - return [base_loc, index_loc, value_loc, ofs_loc, imm(ofs), - imm(itemsize), imm(fieldsize)] - prepare_op_setinteriorfield_raw = prepare_op_setinteriorfield_gc - - def prepare_op_arraylen_gc(self, op, fcond): - arraydescr = op.getdescr() - assert isinstance(arraydescr, ArrayDescr) - ofs = arraydescr.lendescr.offset - arg = op.getarg(0) - base_loc = self.make_sure_var_in_reg(arg) - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - return [res, base_loc, imm(ofs)] - - def prepare_op_setarrayitem_gc(self, op, fcond): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - scale = get_scale(size) - args = op.getarglist() - base_loc = self.make_sure_var_in_reg(args[0], args) - value_loc = self.make_sure_var_in_reg(args[2], args) - ofs_loc = self.make_sure_var_in_reg(args[1], args) - assert check_imm_arg(ofs) - return [value_loc, base_loc, ofs_loc, imm(scale), imm(ofs)] - prepare_op_setarrayitem_raw = prepare_op_setarrayitem_gc - prepare_op_raw_store = prepare_op_setarrayitem_gc - - def _prepare_op_getarrayitem(self, op, fcond): + def prepare_op_gc_store_indexed(self, op, fcond): boxes = op.getarglist() - size, ofs, _ = unpack_arraydescr(op.getdescr()) - scale = get_scale(size) base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) + value_loc = self.make_sure_var_in_reg(boxes[2], boxes) + index_loc = self.make_sure_var_in_reg(boxes[1], boxes) + assert boxes[3].getint() == 1 # scale + ofs = boxes[4].getint() + size = boxes[5].getint() assert check_imm_arg(ofs) - return [res, base_loc, ofs_loc, imm(scale), imm(ofs)] - - prepare_op_getarrayitem_gc_i = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_r = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_f = _prepare_op_getarrayitem - prepare_op_getarrayitem_raw_i = _prepare_op_getarrayitem - prepare_op_getarrayitem_raw_f = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_pure_i = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_pure_r = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_pure_f = _prepare_op_getarrayitem - prepare_op_raw_load_i = _prepare_op_getarrayitem - prepare_op_raw_load_f = _prepare_op_getarrayitem + return [value_loc, base_loc, index_loc, imm(size), imm(ofs)] - def prepare_op_strlen(self, op, fcond): - args = op.getarglist() - l0 = self.make_sure_var_in_reg(op.getarg(0)) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - immofs = imm(ofs_length) - if check_imm_arg(ofs_length): - l1 = immofs - else: - l1 = self.get_scratch_reg(INT, args) - self.assembler.load(l1, immofs) - - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - - res = self.force_allocate_reg(op) - self.possibly_free_var(op) - return [l0, l1, res] - - def prepare_op_strgetitem(self, op, fcond): + def _prepare_op_gc_load_indexed(self, op, fcond): boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0]) - - a1 = boxes[1] - imm_a1 = check_imm_box(a1) - if imm_a1: - ofs_loc = self.convert_to_imm(a1) - else: - ofs_loc = self.make_sure_var_in_reg(a1, boxes) - + base_loc = self.make_sure_var_in_reg(boxes[0], boxes) + index_loc = self.make_sure_var_in_reg(boxes[1], boxes) + assert boxes[2].getint() == 1 # scale + ofs = boxes[3].getint() + nsize = boxes[4].getint() + assert check_imm_arg(ofs) self.possibly_free_vars_for_op(op) self.free_temp_vars() - res = self.force_allocate_reg(op) + res_loc = self.force_allocate_reg(op) + return [res_loc, base_loc, index_loc, imm(nsize), imm(ofs)] - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - assert itemsize == 1 - return [res, base_loc, ofs_loc, imm(basesize)] - - def prepare_op_strsetitem(self, op, fcond): - boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - value_loc = self.make_sure_var_in_reg(boxes[2], boxes) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - assert itemsize == 1 - return [value_loc, base_loc, ofs_loc, imm(basesize)] + prepare_op_gc_load_indexed_i = _prepare_op_gc_load_indexed + prepare_op_gc_load_indexed_r = _prepare_op_gc_load_indexed + prepare_op_gc_load_indexed_f = _prepare_op_gc_load_indexed prepare_op_copystrcontent = void prepare_op_copyunicodecontent = void prepare_op_zero_array = void - def prepare_op_unicodelen(self, op, fcond): - l0 = self.make_sure_var_in_reg(op.getarg(0)) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - immofs = imm(ofs_length) - if check_imm_arg(ofs_length): - l1 = immofs - else: - l1 = self.get_scratch_reg(INT, [op.getarg(0)]) - self.assembler.load(l1, immofs) - - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - return [l0, l1, res] - - def prepare_op_unicodegetitem(self, op, fcond): - boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - scale = itemsize / 2 - return [res, base_loc, ofs_loc, - imm(scale), imm(basesize), imm(itemsize)] - - def prepare_op_unicodesetitem(self, op, fcond): - boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - value_loc = self.make_sure_var_in_reg(boxes[2], boxes) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - scale = itemsize / 2 - return [value_loc, base_loc, ofs_loc, - imm(scale), imm(basesize), imm(itemsize)] - def _prepare_op_same_as(self, op, fcond): arg = op.getarg(0) imm_arg = check_imm_box(arg) @@ -1142,8 +975,7 @@ def prepare_op_cond_call_gc_wb(self, op, fcond): # we force all arguments in a reg because it will be needed anyway by - # the following setfield_gc or setarrayitem_gc. It avoids loading it - # twice from the memory. + # the following gc_store. It avoids loading it twice from the memory. N = op.numargs() args = op.getarglist() arglocs = [self.make_sure_var_in_reg(op.getarg(i), args) diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/arm/runner.py pypy-5.0.1+dfsg/rpython/jit/backend/arm/runner.py --- pypy-4.0.1+dfsg/rpython/jit/backend/arm/runner.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/arm/runner.py 2016-03-19 16:40:11.000000000 +0000 @@ -29,6 +29,10 @@ float_regs = VFPRegisterManager.all_regs frame_reg = fp + # can an ISA instruction handle a factor to the offset? + # XXX should be: tuple(1 << i for i in range(31)) + load_supported_factors = (1,) + def __init__(self, rtyper, stats, opts=None, translate_support_code=False, gcdescr=None): AbstractLLCPU.__init__(self, rtyper, stats, opts, diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/arm/test/test_regalloc.py pypy-5.0.1+dfsg/rpython/jit/backend/arm/test/test_regalloc.py --- pypy-4.0.1+dfsg/rpython/jit/backend/arm/test/test_regalloc.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/arm/test/test_regalloc.py 2016-03-19 16:40:11.000000000 +0000 @@ -479,7 +479,7 @@ i13 = int_eq(i5, i6) i14 = int_gt(i6, i2) i15 = int_ne(i2, i6) - guard_true(i15) [i10, i11, i12, i13, i14, i15] + guard_true(i0) [i10, i11, i12, i13, i14, i15] ''' self.interpret(ops, [0, 1, 2, 3, 4, 5, 6]) @@ -616,7 +616,7 @@ i7 = float_ne(f7, 0.0) i8 = float_ne(f8, 0.0) i9 = float_ne(f9, 0.0) - guard_true(i9), [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] + guard_false(i9), [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] ''' self.interpret(ops, [0.0, .1, .2, .3, .4, .5, .6, .7, .8, .9]) assert self.getints(9) == [0, 1, 1, 1, 1, 1, 1, 1, 1] @@ -631,7 +631,7 @@ ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] i10 = call_i(ConstClass(f1ptr), i0, descr=f1_calldescr) - guard_true(i10), [i10, i1, i2, i3, i4, i5, i6, i7, i8, i9] + guard_false(i10), [i10, i1, i2, i3, i4, i5, i6, i7, i8, i9] ''' self.interpret(ops, [4, 7, 9, 9, 9, 9, 9, 9, 9, 9]) assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9] @@ -641,7 +641,7 @@ [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] i10 = call_i(ConstClass(f1ptr), i0, descr=f1_calldescr) i11 = call_i(ConstClass(f2ptr), i10, i1, descr=f2_calldescr) - guard_true(i11) [i11, i1, i2, i3, i4, i5, i6, i7, i8, i9] + guard_false(i11) [i11, i1, i2, i3, i4, i5, i6, i7, i8, i9] ''' self.interpret(ops, [4, 7, 9, 9, 9, 9, 9, 9, 9, 9]) assert self.getints(10) == [5 * 7, 7, 9, 9, 9, 9, 9, 9, 9, 9] diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/arm/test/test_runner.py pypy-5.0.1+dfsg/rpython/jit/backend/arm/test/test_runner.py --- pypy-4.0.1+dfsg/rpython/jit/backend/arm/test/test_runner.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/arm/test/test_runner.py 2016-03-19 16:40:11.000000000 +0000 @@ -26,12 +26,12 @@ # for the individual tests see # ====> ../../test/runner_test.py - add_loop_instructions = 'ldr; adds; cmp; beq; b;$' + add_loop_instructions = 'ldr; adds; cmp; beq; b;' arch_version = detect_arch_version() if arch_version == 7: bridge_loop_instructions = ('ldr; movw; nop; cmp; bge; ' 'push; movw; movt; push; movw; movt; ' - 'blx; movw; movt; bx;$') + 'blx; movw; movt; bx;') else: bridge_loop_instructions = ('ldr; mov; nop; nop; nop; cmp; bge; ' 'push; ldr; mov; ' @@ -40,7 +40,7 @@ '[^;]+; ' # inline constant 'blx; ldr; mov; ' '[^;]+; ' # inline constant - 'bx;$') + 'bx;') def get_cpu(self): cpu = CPU(rtyper=None, stats=FakeStats()) diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/llgraph/runner.py pypy-5.0.1+dfsg/rpython/jit/backend/llgraph/runner.py --- pypy-4.0.1+dfsg/rpython/jit/backend/llgraph/runner.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/llgraph/runner.py 2016-03-19 16:40:11.000000000 +0000 @@ -13,6 +13,7 @@ from rpython.rtyper.llinterp import LLInterpreter, LLException from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper import rclass from rpython.rlib.clibffi import FFI_DEFAULT_ABI @@ -151,7 +152,7 @@ self.fieldname = fieldname self.FIELD = getattr(S, fieldname) self.index = heaptracker.get_fielddescr_index_in(S, fieldname) - self._is_pure = S._immutable_field(fieldname) + self._is_pure = S._immutable_field(fieldname) != False def is_always_pure(self): return self._is_pure @@ -607,9 +608,6 @@ p = support.cast_arg(lltype.Ptr(descr.S), p) return support.cast_result(descr.FIELD, getattr(p, descr.fieldname)) - bh_getfield_gc_pure_i = bh_getfield_gc - bh_getfield_gc_pure_r = bh_getfield_gc - bh_getfield_gc_pure_f = bh_getfield_gc bh_getfield_gc_i = bh_getfield_gc bh_getfield_gc_r = bh_getfield_gc bh_getfield_gc_f = bh_getfield_gc @@ -705,6 +703,25 @@ else: return self.bh_raw_load_i(struct, offset, descr) + def bh_gc_load_indexed_i(self, struct, index, scale, base_ofs, bytes): + if bytes == 1: T = rffi.UCHAR + elif bytes == 2: T = rffi.USHORT + elif bytes == 4: T = rffi.UINT + elif bytes == 8: T = rffi.ULONGLONG + elif bytes == -1: T = rffi.SIGNEDCHAR + elif bytes == -2: T = rffi.SHORT + elif bytes == -4: T = rffi.INT + elif bytes == -8: T = rffi.LONGLONG + else: raise NotImplementedError(bytes) + x = llop.gc_load_indexed(T, struct, index, scale, base_ofs) + return lltype.cast_primitive(lltype.Signed, x) + + def bh_gc_load_indexed_f(self, struct, index, scale, base_ofs, bytes): + if bytes != 8: + raise Exception("gc_load_indexed_f is only for 'double'!") + return llop.gc_load_indexed(longlong.FLOATSTORAGE, + struct, index, scale, base_ofs) + def bh_increment_debug_counter(self, addr): p = rffi.cast(rffi.CArrayPtr(lltype.Signed), addr) p[0] += 1 @@ -1148,17 +1165,23 @@ self.do_renaming(argboxes, args) def _test_true(self, arg): - if isinstance(arg, list): - return all(arg) assert arg in (0, 1) return arg def _test_false(self, arg): - if isinstance(arg, list): - return any(arg) assert arg in (0, 1) return arg + def execute_vec_guard_true(self, descr, arg): + assert isinstance(arg, list) + if not all(arg): + self.fail_guard(descr) + + def execute_vec_guard_false(self, descr, arg): + assert isinstance(arg, list) + if any(arg): + self.fail_guard(descr) + def execute_guard_true(self, descr, arg): if not self._test_true(arg): self.fail_guard(descr) diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/assembler.py pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/assembler.py --- pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/assembler.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/assembler.py 2016-03-19 16:40:11.000000000 +0000 @@ -380,6 +380,8 @@ # the call that it is no longer equal to css. See description # in translator/c/src/thread_pthread.c. + # XXX some duplicated logic here, but note that rgil.acquire() + # does more than just RPyGilAcquire() if old_rpy_fastgil == 0: # this case occurs if some other thread stole the GIL but # released it again. What occurred here is that we changed @@ -390,9 +392,8 @@ elif old_rpy_fastgil == 1: # 'rpy_fastgil' was (and still is) locked by someone else. # We need to wait for the regular mutex. - after = rffi.aroundstate.after - if after: - after() + from rpython.rlib import rgil + rgil.acquire() else: # stole the GIL from a different thread that is also # currently in an external call from the jit. Attach @@ -421,9 +422,8 @@ # 'rpy_fastgil' contains only zero or non-zero, and this is only # called when the old value stored in 'rpy_fastgil' was non-zero # (i.e. still locked, must wait with the regular mutex) - after = rffi.aroundstate.after - if after: - after() + from rpython.rlib import rgil + rgil.acquire() _REACQGIL0_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void)) _REACQGIL2_FUNC = lltype.Ptr(lltype.FuncType([rffi.CCHARP, lltype.Signed], diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/descr.py pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/descr.py --- pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/descr.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/descr.py 2016-03-19 16:40:11.000000000 +0000 @@ -180,7 +180,8 @@ return self.offset def repr_of_descr(self): - return '' % (self.flag, self.name, self.offset) + ispure = " pure" if self._is_pure else "" + return '' % (self.flag, self.name, self.offset, ispure) def get_parent_descr(self): return self.parent_descr @@ -200,7 +201,7 @@ flag = get_type_flag(FIELDTYPE) name = '%s.%s' % (STRUCT._name, fieldname) index_in_parent = heaptracker.get_fielddescr_index_in(STRUCT, fieldname) - is_pure = bool(STRUCT._immutable_field(fieldname)) + is_pure = STRUCT._immutable_field(fieldname) != False fielddescr = FieldDescr(name, offset, size, flag, index_in_parent, is_pure) cachedict = cache.setdefault(STRUCT, {}) diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/gc.py pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/gc.py --- pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/gc.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/gc.py 2016-03-19 16:40:15.000000000 +0000 @@ -13,10 +13,11 @@ from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.backend.llsupport import symbolic, jitframe from rpython.jit.backend.llsupport.symbolic import WORD -from rpython.jit.backend.llsupport.descr import SizeDescr, ArrayDescr +from rpython.jit.backend.llsupport.descr import SizeDescr, ArrayDescr, FieldDescr from rpython.jit.backend.llsupport.descr import GcCache, get_field_descr from rpython.jit.backend.llsupport.descr import get_array_descr from rpython.jit.backend.llsupport.descr import get_call_descr +from rpython.jit.backend.llsupport.descr import unpack_arraydescr from rpython.jit.backend.llsupport.rewrite import GcRewriterAssembler from rpython.memory.gctransform import asmgcroot from rpython.jit.codewriter.effectinfo import EffectInfo @@ -161,10 +162,15 @@ # assert to make sure we got what we expected assert isinstance(v, ConstPtr) array_index = moving_obj_tracker.get_array_index(v) - load_op = ResOperation(rop.GETARRAYITEM_GC_R, - [moving_obj_tracker.const_ptr_gcref_array, - ConstInt(array_index)], - descr=moving_obj_tracker.ptr_array_descr) + + size, offset, _ = unpack_arraydescr(moving_obj_tracker.ptr_array_descr) + scale = size + args = [moving_obj_tracker.const_ptr_gcref_array, + ConstInt(array_index), + ConstInt(scale), + ConstInt(offset), + ConstInt(size)] + load_op = ResOperation(rop.GC_LOAD_INDEXED_R, args) newops.append(load_op) op.setarg(arg_i, load_op) # @@ -460,7 +466,7 @@ def _initialize_for_tests(self): self.layoutbuilder = None - self.fielddescr_tid = AbstractDescr() + self.fielddescr_tid = FieldDescr("test_tid",0,8,0) self.max_size_of_young_obj = 1000 self.GCClass = None diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/llmodel.py pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/llmodel.py --- pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/llmodel.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/llmodel.py 2016-03-19 16:40:11.000000000 +0000 @@ -32,6 +32,9 @@ done_with_this_frame_descr_void = None exit_frame_with_exception_descr_ref = None + # can an ISA instruction handle a factor to the offset? + load_supported_factors = (1,) + vector_extension = False vector_register_size = 0 # in bytes vector_horizontal_operations = False @@ -722,6 +725,16 @@ def bh_raw_load_f(self, addr, offset, descr): return self.read_float_at_mem(addr, offset) + def bh_gc_load_indexed_i(self, addr, index, scale, base_ofs, bytes): + offset = base_ofs + scale * index + return self.read_int_at_mem(addr, offset, abs(bytes), bytes < 0) + + def bh_gc_load_indexed_f(self, addr, index, scale, base_ofs, bytes): + # only for 'double'! + assert bytes == rffi.sizeof(lltype.Float) + offset = base_ofs + scale * index + return self.read_float_at_mem(addr, offset) + def bh_new(self, sizedescr): return self.gc_ll_descr.gc_malloc(sizedescr) diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/regalloc.py pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/regalloc.py --- pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/regalloc.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/regalloc.py 2016-03-19 16:40:11.000000000 +0000 @@ -666,6 +666,7 @@ self.rm._sync_var(op.getarg(1)) return [self.loc(op.getarg(0)), self.fm.loc(op.getarg(1))] else: + assert op.numargs() == 1 return [self.loc(op.getarg(0))] diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/rewrite.py pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/rewrite.py --- pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/rewrite.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/rewrite.py 2016-03-19 16:40:15.000000000 +0000 @@ -1,15 +1,18 @@ from rpython.rlib import rgc from rpython.rlib.objectmodel import we_are_translated -from rpython.rlib.rarithmetic import ovfcheck -from rpython.rtyper.lltypesystem import llmemory, lltype +from rpython.rlib.rarithmetic import ovfcheck, highest_bit +from rpython.rtyper.lltypesystem import llmemory, lltype, rstr from rpython.jit.metainterp import history from rpython.jit.metainterp.history import ConstInt, ConstPtr from rpython.jit.metainterp.resoperation import ResOperation, rop, OpHelpers from rpython.jit.codewriter import heaptracker -from rpython.jit.backend.llsupport.symbolic import WORD +from rpython.jit.backend.llsupport.symbolic import (WORD, + get_array_token) from rpython.jit.backend.llsupport.descr import SizeDescr, ArrayDescr,\ FLAG_POINTER from rpython.jit.metainterp.history import JitCellToken +from rpython.jit.backend.llsupport.descr import (unpack_arraydescr, + unpack_fielddescr, unpack_interiorfielddescr) FLAG_ARRAY = 0 FLAG_STR = 1 @@ -112,6 +115,186 @@ assert not op.get_forwarded() op.set_forwarded(newop) + def handle_setarrayitem(self, op): + itemsize, basesize, _ = unpack_arraydescr(op.getdescr()) + ptr_box = op.getarg(0) + index_box = op.getarg(1) + value_box = op.getarg(2) + self.emit_gc_store_or_indexed(op, ptr_box, index_box, value_box, + itemsize, itemsize, basesize) + + def emit_gc_store_or_indexed(self, op, ptr_box, index_box, value_box, + itemsize, factor, offset): + factor, offset, index_box = \ + self._emit_mul_if_factor_offset_not_supported(index_box, + factor, offset) + # + if index_box is None: + args = [ptr_box, ConstInt(offset), value_box, ConstInt(itemsize)] + newload = ResOperation(rop.GC_STORE, args) + else: + args = [ptr_box, index_box, value_box, ConstInt(factor), + ConstInt(offset), ConstInt(itemsize)] + newload = ResOperation(rop.GC_STORE_INDEXED, args) + if op is not None: + self.replace_op_with(op, newload) + else: + self.emit_op(newload) + + def handle_getarrayitem(self, op): + itemsize, ofs, sign = unpack_arraydescr(op.getdescr()) + ptr_box = op.getarg(0) + index_box = op.getarg(1) + self.emit_gc_load_or_indexed(op, ptr_box, index_box, itemsize, itemsize, ofs, sign) + + def handle_rawload(self, op): + itemsize, ofs, sign = unpack_arraydescr(op.getdescr()) + ptr_box = op.getarg(0) + index_box = op.getarg(1) + self.emit_gc_load_or_indexed(op, ptr_box, index_box, itemsize, 1, ofs, sign) + + def _emit_mul_if_factor_offset_not_supported(self, index_box, + factor, offset): + # Returns (factor, offset, index_box) where index_box is either + # a non-constant BoxInt or None. + if isinstance(index_box, ConstInt): + return 1, index_box.value * factor + offset, None + else: + if factor != 1 and factor not in self.cpu.load_supported_factors: + # the factor is supported by the cpu + # x & (x - 1) == 0 is a quick test for power of 2 + assert factor > 0 + if (factor & (factor - 1)) == 0: + index_box = ResOperation(rop.INT_LSHIFT, + [index_box, ConstInt(highest_bit(factor))]) + else: + index_box = ResOperation(rop.INT_MUL, + [index_box, ConstInt(factor)]) + self.emit_op(index_box) + factor = 1 + return factor, offset, index_box + + def emit_gc_load_or_indexed(self, op, ptr_box, index_box, itemsize, + factor, offset, sign, type='i'): + factor, offset, index_box = \ + self._emit_mul_if_factor_offset_not_supported(index_box, + factor, offset) + # + if sign: + # encode signed into the itemsize value + itemsize = -itemsize + # + optype = type + if op is not None: + optype = op.type + if index_box is None: + args = [ptr_box, ConstInt(offset), ConstInt(itemsize)] + newload = ResOperation(OpHelpers.get_gc_load(optype), args) + else: + args = [ptr_box, index_box, ConstInt(factor), + ConstInt(offset), ConstInt(itemsize)] + newload = ResOperation(OpHelpers.get_gc_load_indexed(optype), args) + if op is None: + self.emit_op(newload) + else: + self.replace_op_with(op, newload) + return newload + + def transform_to_gc_load(self, op): + NOT_SIGNED = 0 + CINT_ZERO = ConstInt(0) + if op.is_getarrayitem() or \ + op.getopnum() in (rop.GETARRAYITEM_RAW_I, + rop.GETARRAYITEM_RAW_F): + self.handle_getarrayitem(op) + elif op.getopnum() in (rop.SETARRAYITEM_GC, rop.SETARRAYITEM_RAW): + self.handle_setarrayitem(op) + elif op.getopnum() == rop.RAW_STORE: + itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) + ptr_box = op.getarg(0) + index_box = op.getarg(1) + value_box = op.getarg(2) + self.emit_gc_store_or_indexed(op, ptr_box, index_box, value_box, itemsize, 1, ofs) + elif op.getopnum() in (rop.RAW_LOAD_I, rop.RAW_LOAD_F): + itemsize, ofs, sign = unpack_arraydescr(op.getdescr()) + ptr_box = op.getarg(0) + index_box = op.getarg(1) + self.emit_gc_load_or_indexed(op, ptr_box, index_box, itemsize, 1, ofs, sign) + elif op.getopnum() in (rop.GETINTERIORFIELD_GC_I, rop.GETINTERIORFIELD_GC_R, + rop.GETINTERIORFIELD_GC_F): + ofs, itemsize, fieldsize, sign = unpack_interiorfielddescr(op.getdescr()) + ptr_box = op.getarg(0) + index_box = op.getarg(1) + self.emit_gc_load_or_indexed(op, ptr_box, index_box, fieldsize, itemsize, ofs, sign) + elif op.getopnum() in (rop.SETINTERIORFIELD_RAW, rop.SETINTERIORFIELD_GC): + ofs, itemsize, fieldsize, sign = unpack_interiorfielddescr(op.getdescr()) + ptr_box = op.getarg(0) + index_box = op.getarg(1) + value_box = op.getarg(2) + self.emit_gc_store_or_indexed(op, ptr_box, index_box, value_box, + fieldsize, itemsize, ofs) + elif op.getopnum() in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R, + rop.GETFIELD_RAW_I, rop.GETFIELD_RAW_F, rop.GETFIELD_RAW_R): + ofs, itemsize, sign = unpack_fielddescr(op.getdescr()) + ptr_box = op.getarg(0) + if op.getopnum() in (rop.GETFIELD_GC_F, rop.GETFIELD_GC_I, rop.GETFIELD_GC_R): + # See test_zero_ptr_field_before_getfield(). We hope there is + # no getfield_gc in the middle of initialization code, but there + # shouldn't be, given that a 'new' is already delayed by previous + # optimization steps. In practice it should immediately be + # followed by a bunch of 'setfields', and the 'pending_zeros' + # optimization we do here is meant for this case. + self.emit_pending_zeros() + self.emit_gc_load_or_indexed(op, ptr_box, ConstInt(0), itemsize, 1, ofs, sign) + self.emit_op(op) + return True + self.emit_gc_load_or_indexed(op, ptr_box, ConstInt(0), itemsize, 1, ofs, sign) + elif op.getopnum() in (rop.SETFIELD_GC, rop.SETFIELD_RAW): + ofs, itemsize, sign = unpack_fielddescr(op.getdescr()) + ptr_box = op.getarg(0) + value_box = op.getarg(1) + self.emit_gc_store_or_indexed(op, ptr_box, ConstInt(0), value_box, itemsize, 1, ofs) + elif op.getopnum() == rop.ARRAYLEN_GC: + descr = op.getdescr() + assert isinstance(descr, ArrayDescr) + ofs = descr.lendescr.offset + self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), + WORD, 1, ofs, NOT_SIGNED) + elif op.getopnum() == rop.STRLEN: + basesize, itemsize, ofs_length = get_array_token(rstr.STR, + self.cpu.translate_support_code) + self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), + WORD, 1, ofs_length, NOT_SIGNED) + elif op.getopnum() == rop.UNICODELEN: + basesize, itemsize, ofs_length = get_array_token(rstr.UNICODE, + self.cpu.translate_support_code) + self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), + WORD, 1, ofs_length, NOT_SIGNED) + elif op.getopnum() == rop.STRGETITEM: + basesize, itemsize, ofs_length = get_array_token(rstr.STR, + self.cpu.translate_support_code) + assert itemsize == 1 + self.emit_gc_load_or_indexed(op, op.getarg(0), op.getarg(1), + itemsize, itemsize, basesize, NOT_SIGNED) + elif op.getopnum() == rop.UNICODEGETITEM: + basesize, itemsize, ofs_length = get_array_token(rstr.UNICODE, + self.cpu.translate_support_code) + self.emit_gc_load_or_indexed(op, op.getarg(0), op.getarg(1), + itemsize, itemsize, basesize, NOT_SIGNED) + elif op.getopnum() == rop.STRSETITEM: + basesize, itemsize, ofs_length = get_array_token(rstr.STR, + self.cpu.translate_support_code) + assert itemsize == 1 + self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2), + itemsize, itemsize, basesize) + elif op.getopnum() == rop.UNICODESETITEM: + basesize, itemsize, ofs_length = get_array_token(rstr.UNICODE, + self.cpu.translate_support_code) + self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2), + itemsize, itemsize, basesize) + return False + + def rewrite(self, operations): # we can only remember one malloc since the next malloc can possibly # collect; but we can try to collapse several known-size mallocs into @@ -128,10 +311,8 @@ continue if op is self._changed_op: op = self._changed_op_to - # ---------- GETFIELD_GC ---------- - if op.getopnum() in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, - rop.GETFIELD_GC_R): - self.handle_getfield_gc(op) + # ---------- GC_LOAD/STORE transformations -------------- + if self.transform_to_gc_load(op): continue # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- if op.is_malloc(): @@ -221,18 +402,6 @@ # ---------- - def handle_getfield_gc(self, op): - """See test_zero_ptr_field_before_getfield(). We hope there is - no getfield_gc in the middle of initialization code, but there - shouldn't be, given that a 'new' is already delayed by previous - optimization steps. In practice it should immediately be - followed by a bunch of 'setfields', and the 'pending_zeros' - optimization we do here is meant for this case.""" - self.emit_pending_zeros() - self.emit_op(op) - - # ---------- - def handle_malloc_operation(self, op): opnum = op.getopnum() if opnum == rop.NEW: @@ -241,10 +410,8 @@ descr = op.getdescr() self.handle_new_fixedsize(descr, op) if self.gc_ll_descr.fielddescr_vtable is not None: - op = ResOperation(rop.SETFIELD_GC, - [op, ConstInt(descr.get_vtable())], - descr=self.gc_ll_descr.fielddescr_vtable) - self.emit_op(op) + self.emit_setfield(op, ConstInt(descr.get_vtable()), + descr=self.gc_ll_descr.fielddescr_vtable) elif opnum == rop.NEW_ARRAY or opnum == rop.NEW_ARRAY_CLEAR: descr = op.getdescr() assert isinstance(descr, ArrayDescr) @@ -295,9 +462,7 @@ hash_descr = self.gc_ll_descr.unicode_hash_descr else: return - op = ResOperation(rop.SETFIELD_GC, [result, self.c_zero], - descr=hash_descr) - self.emit_op(op) + self.emit_setfield(result, self.c_zero, descr=hash_descr) def handle_new_fixedsize(self, descr, op): assert isinstance(descr, SizeDescr) @@ -356,7 +521,9 @@ return # the ZERO_ARRAY operation will be optimized according to what # SETARRAYITEM_GC we see before the next allocation operation. - # See emit_pending_zeros(). + # See emit_pending_zeros(). (This optimization is done by + # hacking the object 'o' in-place: e.g., o.getarg(1) may be + # replaced with another constant greater than 0.) o = ResOperation(rop.ZERO_ARRAY, [v_arr, self.c_zero, v_length], descr=arraydescr) self.emit_op(o) @@ -366,48 +533,57 @@ def gen_malloc_frame(self, frame_info): descrs = self.gc_ll_descr.getframedescrs(self.cpu) if self.gc_ll_descr.kind == 'boehm': - size = ResOperation(rop.GETFIELD_RAW_I, - [history.ConstInt(frame_info)], - descr=descrs.jfi_frame_depth) + ofs, size, sign = unpack_fielddescr(descrs.jfi_frame_depth) + if sign: + size = -size + args = [ConstInt(frame_info), ConstInt(ofs), ConstInt(size)] + size = ResOperation(rop.GC_LOAD_I, args) self.emit_op(size) frame = ResOperation(rop.NEW_ARRAY, [size], - descr=descrs.arraydescr) + descr=descrs.arraydescr) self.handle_new_array(descrs.arraydescr, frame) return self.get_box_replacement(frame) else: # we read size in bytes here, not the length - size = ResOperation(rop.GETFIELD_RAW_I, - [history.ConstInt(frame_info)], - descr=descrs.jfi_frame_size) + ofs, size, sign = unpack_fielddescr(descrs.jfi_frame_size) + if sign: + size = -size + args = [ConstInt(frame_info), ConstInt(ofs), ConstInt(size)] + size = ResOperation(rop.GC_LOAD_I, args) self.emit_op(size) frame = self.gen_malloc_nursery_varsize_frame(size) self.gen_initialize_tid(frame, descrs.arraydescr.tid) # we need to explicitely zero all the gc fields, because # of the unusal malloc pattern - length = ResOperation(rop.GETFIELD_RAW_I, - [history.ConstInt(frame_info)], - descr=descrs.jfi_frame_depth) - extra_ops = [ - length, - ResOperation(rop.SETFIELD_GC, [frame, self.c_zero], - descr=descrs.jf_extra_stack_depth), - ResOperation(rop.SETFIELD_GC, [frame, self.c_null], - descr=descrs.jf_savedata), - ResOperation(rop.SETFIELD_GC, [frame, self.c_null], - descr=descrs.jf_force_descr), - ResOperation(rop.SETFIELD_GC, [frame, self.c_null], - descr=descrs.jf_descr), - ResOperation(rop.SETFIELD_GC, [frame, self.c_null], - descr=descrs.jf_guard_exc), - ResOperation(rop.SETFIELD_GC, [frame, self.c_null], - descr=descrs.jf_forward), - ] - for op in extra_ops: - self.emit_op(op) + + length = self.emit_getfield(ConstInt(frame_info), + descr=descrs.jfi_frame_depth, raw=True) + self.emit_setfield(frame, self.c_zero, + descr=descrs.jf_extra_stack_depth) + self.emit_setfield(frame, self.c_null, + descr=descrs.jf_savedata) + self.emit_setfield(frame, self.c_null, + descr=descrs.jf_force_descr) + self.emit_setfield(frame, self.c_null, + descr=descrs.jf_descr) + self.emit_setfield(frame, self.c_null, + descr=descrs.jf_guard_exc) + self.emit_setfield(frame, self.c_null, + descr=descrs.jf_forward) self.gen_initialize_len(frame, length, descrs.arraydescr.lendescr) return self.get_box_replacement(frame) + def emit_getfield(self, ptr, descr, type='i', raw=False): + ofs, size, sign = unpack_fielddescr(descr) + op = self.emit_gc_load_or_indexed(None, ptr, ConstInt(0), size, 1, ofs, sign) + return op + + def emit_setfield(self, ptr, value, descr): + ofs, size, sign = unpack_fielddescr(descr) + self.emit_gc_store_or_indexed(None, ptr, ConstInt(0), value, + size, 1, ofs) + def handle_call_assembler(self, op): descrs = self.gc_ll_descr.getframedescrs(self.cpu) loop_token = op.getdescr() @@ -415,20 +591,21 @@ jfi = loop_token.compiled_loop_token.frame_info llfi = heaptracker.adr2int(llmemory.cast_ptr_to_adr(jfi)) frame = self.gen_malloc_frame(llfi) - op2 = ResOperation(rop.SETFIELD_GC, [frame, history.ConstInt(llfi)], + self.emit_setfield(frame, history.ConstInt(llfi), descr=descrs.jf_frame_info) - self.emit_op(op2) arglist = op.getarglist() index_list = loop_token.compiled_loop_token._ll_initial_locs for i, arg in enumerate(arglist): descr = self.cpu.getarraydescr_for_frame(arg.type) assert self.cpu.JITFRAME_FIXED_SIZE & 1 == 0 _, itemsize, _ = self.cpu.unpack_arraydescr_size(descr) - index = index_list[i] // itemsize # index is in bytes - self.emit_op(ResOperation(rop.SETARRAYITEM_GC, - [frame, ConstInt(index), - arg], - descr)) + array_offset = index_list[i] # index, already measured in bytes + # emit GC_STORE + _, basesize, _ = unpack_arraydescr(descr) + offset = basesize + array_offset + args = [frame, ConstInt(offset), arg, ConstInt(itemsize)] + self.emit_op(ResOperation(rop.GC_STORE, args)) + descr = op.getdescr() assert isinstance(descr, JitCellToken) jd = descr.outermost_jitdriver_sd @@ -438,7 +615,7 @@ else: args = [frame] call_asm = ResOperation(op.getopnum(), args, - op.getdescr()) + op.getdescr()) self.replace_op_with(self.get_box_replacement(op), call_asm) self.emit_op(call_asm) @@ -485,12 +662,12 @@ del self.last_zero_arrays[:] self._setarrayitems_occurred.clear() # - # Then write the ZERO_PTR_FIELDs that are still pending + # Then write the NULL-pointer-writing ops that are still pending for v, d in self._delayed_zero_setfields.iteritems(): v = self.get_box_replacement(v) for ofs in d.iterkeys(): - op = ResOperation(rop.ZERO_PTR_FIELD, [v, ConstInt(ofs)], None) - self.emit_op(op) + self.emit_gc_store_or_indexed(None, v, ConstInt(ofs), ConstInt(0), + WORD, 1, 0) self._delayed_zero_setfields.clear() def _gen_call_malloc_gc(self, args, v_result, descr): @@ -641,15 +818,12 @@ def gen_initialize_tid(self, v_newgcobj, tid): if self.gc_ll_descr.fielddescr_tid is not None: # produce a SETFIELD to initialize the GC header - op = ResOperation(rop.SETFIELD_GC, - [v_newgcobj, ConstInt(tid)], - descr=self.gc_ll_descr.fielddescr_tid) - self.emit_op(op) + self.emit_setfield(v_newgcobj, ConstInt(tid), + descr=self.gc_ll_descr.fielddescr_tid) def gen_initialize_len(self, v_newgcobj, v_length, arraylen_descr): # produce a SETFIELD to initialize the array length - self.emit_op(ResOperation(rop.SETFIELD_GC, [v_newgcobj, v_length], - descr=arraylen_descr)) + self.emit_setfield(v_newgcobj, v_length, descr=arraylen_descr) # ---------- diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/test/test_gc_integration.py pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/test/test_gc_integration.py --- pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/test/test_gc_integration.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/test/test_gc_integration.py 2016-03-19 16:40:15.000000000 +0000 @@ -17,7 +17,6 @@ from rpython.jit.backend.llsupport.test.test_regalloc_integration import BaseTestRegalloc from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.codewriter import longlong -from rpython.rlib.objectmodel import invoke_around_extcall CPU = getcpuclass() @@ -251,7 +250,7 @@ p0 = call_malloc_nursery_varsize_frame(i0) p1 = call_malloc_nursery_varsize_frame(i1) p2 = call_malloc_nursery_varsize_frame(i2) - guard_true(i0) [p0, p1, p2] + guard_false(i0) [p0, p1, p2] ''' self.interpret(ops, [16, 32, 16]) # check the returned pointers @@ -625,9 +624,6 @@ self.S = S self.cpu = cpu - def teardown_method(self, meth): - rffi.aroundstate._cleanup_() - def test_shadowstack_call(self): cpu = self.cpu cpu.gc_ll_descr.init_nursery(100) @@ -720,7 +716,7 @@ [i0, p0] p = force_token() cond_call(i0, ConstClass(funcptr), i0, p, descr=calldescr) - guard_true(i0, descr=faildescr) [p0] + guard_false(i0, descr=faildescr) [p0] """, namespace={ 'faildescr': BasicFailDescr(), 'funcptr': checkptr, diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py --- pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py 2016-03-19 16:40:15.000000000 +0000 @@ -127,8 +127,8 @@ i0 = getfield_gc_i(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) """, """ [] - p1 = getarrayitem_gc_r(ConstPtr(ptr_array_gcref), 0, descr=ptr_array_descr) - i0 = getfield_gc_i(p1, descr=pinned_obj_my_int_descr) + p1 = gc_load_indexed_r(ConstPtr(ptr_array_gcref), 0, %(ptr_array_descr.itemsize)s, 1, %(ptr_array_descr.itemsize)s) + i0 = gc_load_i(p1, 0, -%(pinned_obj_my_int_descr.field_size)s) """) assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 1 @@ -140,10 +140,10 @@ i2 = getfield_gc_i(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) """, """ [] - p1 = getarrayitem_gc_r(ConstPtr(ptr_array_gcref), 0, descr=ptr_array_descr) - i0 = getfield_gc_i(p1, descr=pinned_obj_my_int_descr) - i1 = getfield_gc_i(ConstPtr(notpinned_obj_gcref), descr=notpinned_obj_my_int_descr) - p2 = getarrayitem_gc_r(ConstPtr(ptr_array_gcref), 1, descr=ptr_array_descr) - i2 = getfield_gc_i(p2, descr=pinned_obj_my_int_descr) + p1 = gc_load_indexed_r(ConstPtr(ptr_array_gcref), 0, %(ptr_array_descr.itemsize)s, 1, %(ptr_array_descr.itemsize)s) + i0 = gc_load_i(p1, 0, -%(pinned_obj_my_int_descr.field_size)s) + i1 = gc_load_i(ConstPtr(notpinned_obj_gcref), 0, -%(notpinned_obj_my_int_descr.field_size)s) + p2 = gc_load_indexed_r(ConstPtr(ptr_array_gcref), 1, %(ptr_array_descr.itemsize)s, 1, %(ptr_array_descr.itemsize)s) + i2 = gc_load_i(p2, 0, -%(pinned_obj_my_int_descr.field_size)s) """) assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 2 diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/test/test_rewrite.py pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/test/test_rewrite.py --- pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/test/test_rewrite.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/test/test_rewrite.py 2016-03-19 16:40:15.000000000 +0000 @@ -1,3 +1,4 @@ +import py from rpython.jit.backend.llsupport.descr import get_size_descr,\ get_field_descr, get_array_descr, ArrayDescr, FieldDescr,\ SizeDescr, get_interiorfield_descr @@ -12,6 +13,8 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper import rclass from rpython.jit.backend.x86.arch import WORD +from rpython.jit.backend.llsupport.symbolic import (WORD, + get_array_token) class Evaluator(object): def __init__(self, scope): @@ -27,6 +30,27 @@ class RewriteTests(object): def check_rewrite(self, frm_operations, to_operations, **namespace): + def setfield(baseptr, newvalue, descr): + assert isinstance(baseptr, str) + assert isinstance(newvalue, (str, int)) + assert not isinstance(descr, (str, int)) + return 'gc_store(%s, %d, %s, %d)' % (baseptr, descr.offset, + newvalue, descr.field_size) + def setarrayitem(baseptr, index, newvalue, descr): + assert isinstance(baseptr, str) + assert isinstance(index, (str, int)) + assert isinstance(newvalue, (str, int)) + assert not isinstance(descr, (str, int)) + if isinstance(index, int): + offset = descr.basesize + index * descr.itemsize + return 'gc_store(%s, %d, %s, %d)' % (baseptr, offset, + newvalue, descr.itemsize) + else: + return 'gc_store_indexed(%s, %s, %s, %d, %d, %s)' % ( + baseptr, index, newvalue, + descr.itemsize, descr.basesize, descr.itemsize) + # + WORD = globals()['WORD'] S = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) sdescr = get_size_descr(self.gc_ll_descr, S) @@ -54,6 +78,26 @@ cdescr.tid = 8111 clendescr = cdescr.lendescr # + S1 = lltype.GcStruct('S1') + S1I = lltype.GcArray(('x', lltype.Ptr(S1)), + ('y', lltype.Ptr(S1)), + ('z', lltype.Ptr(S1))) + itzdescr = get_interiorfield_descr(self.gc_ll_descr, S1I, 'z') + itydescr = get_interiorfield_descr(self.gc_ll_descr, S1I, 'y') + itxdescr = get_interiorfield_descr(self.gc_ll_descr, S1I, 'x') + S2I = lltype.GcArray(('x', lltype.Ptr(S1)), + ('y', lltype.Ptr(S1)), + ('z', lltype.Ptr(S1)), + ('t', lltype.Ptr(S1))) # size is a power of two + s2i_item_size_in_bits = (4 if WORD == 4 else 5) + ity2descr = get_interiorfield_descr(self.gc_ll_descr, S2I, 'y') + R1 = lltype.GcStruct('R', ('x', lltype.Signed), + ('y', lltype.Float), + ('z', lltype.Ptr(S1))) + xdescr = get_field_descr(self.gc_ll_descr, R1, 'x') + ydescr = get_field_descr(self.gc_ll_descr, R1, 'y') + zdescr = get_field_descr(self.gc_ll_descr, R1, 'z') + # E = lltype.GcStruct('Empty') edescr = get_size_descr(self.gc_ll_descr, E) edescr.tid = 9000 @@ -66,7 +110,13 @@ # tiddescr = self.gc_ll_descr.fielddescr_tid wbdescr = self.gc_ll_descr.write_barrier_descr - WORD = globals()['WORD'] + # + F = lltype.GcArray(lltype.Float) + fdescr = get_array_descr(self.gc_ll_descr, F) + SF = lltype.GcArray(lltype.SingleFloat) + sfdescr = get_array_descr(self.gc_ll_descr, SF) + RAW_SF = lltype.Array(lltype.SingleFloat) + raw_sfdescr = get_array_descr(self.gc_ll_descr, RAW_SF) # strdescr = self.gc_ll_descr.str_descr unicodedescr = self.gc_ll_descr.unicode_descr @@ -124,6 +174,11 @@ class BaseFakeCPU(object): JITFRAME_FIXED_SIZE = 0 + load_constant_offset = True + load_supported_factors = (1,2,4,8) + + translate_support_code = None + def __init__(self): self.tracker = FakeTracker() self._cache = {} @@ -241,7 +296,7 @@ [p1] p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \ descr=malloc_fixedsize_descr) - setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr) + gc_store(p0, 0, ConstClass(o_vtable), %(vtable_descr.field_size)s) jump() """) @@ -314,7 +369,7 @@ """, """ [p1] p0 = call_malloc_nursery(%(sdescr.size)d) - setfield_gc(p0, 1234, descr=tiddescr) + gc_store(p0, 0, 1234, 8) jump() """) @@ -329,12 +384,12 @@ [] p0 = call_malloc_nursery( \ %(sdescr.size + tdescr.size + sdescr.size)d) - setfield_gc(p0, 1234, descr=tiddescr) + gc_store(p0, 0, 1234, 8) p1 = nursery_ptr_increment(p0, %(sdescr.size)d) - setfield_gc(p1, 5678, descr=tiddescr) + gc_store(p1, 0, 5678, 8) p2 = nursery_ptr_increment(p1, %(tdescr.size)d) - setfield_gc(p2, 1234, descr=tiddescr) - zero_ptr_field(p1, %(tdescr.gc_fielddescrs[0].offset)s) + gc_store(p2, 0, 1234, 8) + %(setfield('p1', 0, tdescr.gc_fielddescrs[0]))s jump() """) @@ -347,8 +402,8 @@ [] p0 = call_malloc_nursery( \ %(adescr.basesize + 10 * adescr.itemsize)d) - setfield_gc(p0, 4321, descr=tiddescr) - setfield_gc(p0, 10, descr=alendescr) + gc_store(p0, 0, 4321, %(tiddescr.field_size)s) + gc_store(p0, 0, 10, %(alendescr.field_size)s) jump() """) @@ -363,10 +418,10 @@ p0 = call_malloc_nursery( \ %(sdescr.size + \ adescr.basesize + 10 * adescr.itemsize)d) - setfield_gc(p0, 1234, descr=tiddescr) + gc_store(p0, 0, 1234, %(tiddescr.field_size)s) p1 = nursery_ptr_increment(p0, %(sdescr.size)d) - setfield_gc(p1, 4321, descr=tiddescr) - setfield_gc(p1, 10, descr=alendescr) + gc_store(p1, 0, 4321, %(tiddescr.field_size)s) + gc_store(p1, 0, 10, %(alendescr.field_size)s) jump() """) @@ -378,8 +433,8 @@ """, """ [] p0 = call_malloc_nursery(%(bdescr.basesize + 8)d) - setfield_gc(p0, 8765, descr=tiddescr) - setfield_gc(p0, 6, descr=blendescr) + gc_store(p0, 0, 8765, %(tiddescr.field_size)s) + gc_store(p0, 0, 6, %(blendescr.field_size)s) jump() """) @@ -394,17 +449,17 @@ """, """ [] p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d) - setfield_gc(p0, 8765, descr=tiddescr) - setfield_gc(p0, 5, descr=blendescr) + gc_store(p0, 0, 8765, %(tiddescr.field_size)s) + gc_store(p0, 0, 5, %(blendescr.field_size)s) p1 = nursery_ptr_increment(p0, %(bdescr.basesize + 8)d) - setfield_gc(p1, 8765, descr=tiddescr) - setfield_gc(p1, 5, descr=blendescr) + gc_store(p1, 0, 8765, %(tiddescr.field_size)s) + gc_store(p1, 0, 5, %(blendescr.field_size)s) p2 = nursery_ptr_increment(p1, %(bdescr.basesize + 8)d) - setfield_gc(p2, 8765, descr=tiddescr) - setfield_gc(p2, 5, descr=blendescr) + gc_store(p2, 0, 8765, %(tiddescr.field_size)s) + gc_store(p2, 0, 5, %(blendescr.field_size)s) p3 = nursery_ptr_increment(p2, %(bdescr.basesize + 8)d) - setfield_gc(p3, 8765, descr=tiddescr) - setfield_gc(p3, 5, descr=blendescr) + gc_store(p3, 0, 8765, %(tiddescr.field_size)s) + gc_store(p3, 0, 5, %(blendescr.field_size)s) jump() """) @@ -417,9 +472,9 @@ """, """ [] p0 = call_malloc_nursery(%(4*WORD)d) - setfield_gc(p0, 9000, descr=tiddescr) + gc_store(p0, 0, 9000, %(tiddescr.field_size)s) p1 = nursery_ptr_increment(p0, %(2*WORD)d) - setfield_gc(p1, 9000, descr=tiddescr) + gc_store(p1, 0, 9000, %(tiddescr.field_size)s) jump() """) @@ -431,7 +486,7 @@ """, """ [i0] p0 = call_malloc_nursery_varsize(0, 1, i0, descr=bdescr) - setfield_gc(p0, i0, descr=blendescr) + gc_store(p0, 0, i0, %(bdescr.basesize)s) jump(i0) """) @@ -443,8 +498,8 @@ """, """ [i0] p0 = call_malloc_nursery_varsize(1, 1, i0, descr=strdescr) - setfield_gc(p0, i0, descr=strlendescr) - setfield_gc(p0, 0, descr=strhashdescr) + gc_store(p0, %(strlendescr.offset)s, i0, %(strlendescr.field_size)s) + gc_store(p0, 0, 0, %(strlendescr.field_size)s) jump(i0) """) @@ -457,10 +512,12 @@ nonstd_descr.basesize = 64 # <= hacked nonstd_descr.itemsize = 8 nonstd_descr_gcref = 123 + # REVIEW: added descr=nonstd_descr to setarrayitem + # is it even valid to have a setarrayitem WITHOUT a descr? self.check_rewrite(""" [i0, p1] p0 = new_array(i0, descr=nonstd_descr) - setarrayitem_gc(p0, i0, p1) + setarrayitem_gc(p0, i0, p1, descr=nonstd_descr) jump(i0) """, """ [i0, p1] @@ -470,7 +527,7 @@ 6464, i0, \ descr=malloc_array_nonstandard_descr) cond_call_gc_wb_array(p0, i0, descr=wbdescr) - setarrayitem_gc(p0, i0, p1) + gc_store_indexed(p0, i0, p1, 8, 64, 8) jump(i0) """, nonstd_descr=nonstd_descr) @@ -500,15 +557,15 @@ [] p0 = call_malloc_nursery( \ %(2 * (bdescr.basesize + 104))d) - setfield_gc(p0, 8765, descr=tiddescr) - setfield_gc(p0, 101, descr=blendescr) + gc_store(p0, 0, 8765, %(tiddescr.field_size)s) + gc_store(p0, 0, 101, %(blendescr.field_size)s) p1 = nursery_ptr_increment(p0, %(bdescr.basesize + 104)d) - setfield_gc(p1, 8765, descr=tiddescr) - setfield_gc(p1, 102, descr=blendescr) + gc_store(p1, 0, 8765, %(tiddescr.field_size)s) + gc_store(p1, 0, 102, %(blendescr.field_size)s) p2 = call_malloc_nursery( \ %(bdescr.basesize + 104)d) - setfield_gc(p2, 8765, descr=tiddescr) - setfield_gc(p2, 103, descr=blendescr) + gc_store(p2, 0, 8765, %(tiddescr.field_size)s) + gc_store(p2, 0, 103, %(blendescr.field_size)s) jump() """) @@ -534,8 +591,8 @@ """, """ [p1] p0 = call_malloc_nursery(104) # rounded up - setfield_gc(p0, 9315, descr=tiddescr) - setfield_gc(p0, 0, descr=vtable_descr) + gc_store(p0, 0, 9315, %(tiddescr.field_size)s) + gc_store(p0, 0, 0, %(vtable_descr.field_size)s) jump() """) @@ -549,7 +606,7 @@ [p1] p0 = call_malloc_gc(ConstClass(malloc_big_fixedsize), 104, 9315, \ descr=malloc_big_fixedsize_descr) - setfield_gc(p0, 0, descr=vtable_descr) + gc_store(p0, 0, 0, %(vtable_descr.field_size)s) jump() """) @@ -566,21 +623,21 @@ p0 = call_malloc_nursery( \ %(strdescr.basesize + 16 * strdescr.itemsize + \ unicodedescr.basesize + 10 * unicodedescr.itemsize)d) - setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) - setfield_gc(p0, 14, descr=strlendescr) - setfield_gc(p0, 0, descr=strhashdescr) + gc_store(p0, 0, %(strdescr.tid)d, %(tiddescr.field_size)s) + gc_store(p0, %(strlendescr.offset)s, 14, %(strlendescr.field_size)s) + gc_store(p0, 0, 0, %(strhashdescr.field_size)s) p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) - setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) - setfield_gc(p1, 10, descr=unicodelendescr) - setfield_gc(p1, 0, descr=unicodehashdescr) + gc_store(p1, 0, %(unicodedescr.tid)d, %(tiddescr.field_size)s) + gc_store(p1, %(unicodelendescr.offset)s, 10, %(unicodelendescr.field_size)s) + gc_store(p1, 0, 0, %(unicodehashdescr.field_size)s) p2 = call_malloc_nursery_varsize(2, %(unicodedescr.itemsize)d, i2,\ descr=unicodedescr) - setfield_gc(p2, i2, descr=unicodelendescr) - setfield_gc(p2, 0, descr=unicodehashdescr) + gc_store(p2, %(unicodelendescr.offset)s, i2, %(unicodelendescr.field_size)s) + gc_store(p2, 0, 0, %(unicodehashdescr.field_size)s) p3 = call_malloc_nursery_varsize(1, 1, i2, \ descr=strdescr) - setfield_gc(p3, i2, descr=strlendescr) - setfield_gc(p3, 0, descr=strhashdescr) + gc_store(p3, %(strlendescr.offset)s, i2, %(strlendescr.field_size)s) + gc_store(p3, 0, 0, %(strhashdescr.field_size)s) jump() """) @@ -592,7 +649,7 @@ """, """ [p1, p2] cond_call_gc_wb(p1, descr=wbdescr) - setfield_gc(p1, p2, descr=tzdescr) + gc_store(p1, %(tzdescr.offset)s, p2, %(tzdescr.field_size)s) jump() """) @@ -606,7 +663,7 @@ """, """ [p1, i2, p3] cond_call_gc_wb(p1, descr=wbdescr) - setarrayitem_gc(p1, i2, p3, descr=cdescr) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -622,12 +679,12 @@ [i2, p3] p1 = call_malloc_nursery( \ %(cdescr.basesize + 129 * cdescr.itemsize)d) - setfield_gc(p1, 8111, descr=tiddescr) - setfield_gc(p1, 129, descr=clendescr) + gc_store(p1, 0, 8111, %(tiddescr.field_size)s) + gc_store(p1, 0, 129, %(clendescr.field_size)s) zero_array(p1, 0, 129, descr=cdescr) call_n(123456) cond_call_gc_wb(p1, descr=wbdescr) - setarrayitem_gc(p1, i2, p3, descr=cdescr) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -644,12 +701,12 @@ [i2, p3] p1 = call_malloc_nursery( \ %(cdescr.basesize + 130 * cdescr.itemsize)d) - setfield_gc(p1, 8111, descr=tiddescr) - setfield_gc(p1, 130, descr=clendescr) + gc_store(p1, 0, 8111, %(tiddescr.field_size)s) + gc_store(p1, 0, 130, %(clendescr.field_size)s) zero_array(p1, 0, 130, descr=cdescr) call_n(123456) cond_call_gc_wb_array(p1, i2, descr=wbdescr) - setarrayitem_gc(p1, i2, p3, descr=cdescr) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -661,7 +718,7 @@ """, """ [p1, i2, p3] cond_call_gc_wb_array(p1, i2, descr=wbdescr) - setarrayitem_gc(p1, i2, p3, descr=cdescr) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -676,12 +733,12 @@ [i2, p3] p1 = call_malloc_nursery( \ %(cdescr.basesize + 5 * cdescr.itemsize)d) - setfield_gc(p1, 8111, descr=tiddescr) - setfield_gc(p1, 5, descr=clendescr) + gc_store(p1, 0, 8111, %(tiddescr.field_size)s) + gc_store(p1, 0, 5, %(clendescr.field_size)s) zero_array(p1, 0, 5, descr=cdescr) label(p1, i2, p3) cond_call_gc_wb_array(p1, i2, descr=wbdescr) - setarrayitem_gc(p1, i2, p3, descr=cdescr) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -693,16 +750,21 @@ interiorlendescr = interiordescr.lendescr interiorzdescr = get_interiorfield_descr(self.gc_ll_descr, INTERIOR, 'z') + scale = interiorzdescr.arraydescr.itemsize + offset = interiorzdescr.arraydescr.basesize + offset += interiorzdescr.fielddescr.offset + size = interiorzdescr.arraydescr.itemsize self.check_rewrite(""" [p1, p2] - setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + setinteriorfield_gc(p1, 7, p2, descr=interiorzdescr) jump(p1, p2) """, """ [p1, p2] - cond_call_gc_wb_array(p1, 0, descr=wbdescr) - setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + cond_call_gc_wb_array(p1, 7, descr=wbdescr) + gc_store(p1, %(offset + 7 * scale)s, p2, %(size)s) jump(p1, p2) - """, interiorzdescr=interiorzdescr) + """, interiorzdescr=interiorzdescr, scale=scale, + offset=offset, size=size) def test_initialization_store(self): self.check_rewrite(""" @@ -713,8 +775,8 @@ """, """ [p1] p0 = call_malloc_nursery(%(tdescr.size)d) - setfield_gc(p0, 5678, descr=tiddescr) - setfield_gc(p0, p1, descr=tzdescr) + gc_store(p0, 0, 5678, %(tiddescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -728,11 +790,11 @@ """, """ [] p0 = call_malloc_nursery(%(tdescr.size + sdescr.size)d) - setfield_gc(p0, 5678, descr=tiddescr) + gc_store(p0, 0, 5678, %(tiddescr.field_size)s) p1 = nursery_ptr_increment(p0, %(tdescr.size)d) - setfield_gc(p1, 1234, descr=tiddescr) + gc_store(p1, 0, 1234, %(tiddescr.field_size)s) # <<>> - setfield_gc(p0, p1, descr=tzdescr) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -746,10 +808,10 @@ [p1, i2] p0 = call_malloc_nursery( \ %(cdescr.basesize + 5 * cdescr.itemsize)d) - setfield_gc(p0, 8111, descr=tiddescr) - setfield_gc(p0, 5, descr=clendescr) + gc_store(p0, 0, 8111, %(tiddescr.field_size)s) + gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 5, descr=cdescr) - setarrayitem_gc(p0, i2, p1, descr=cdescr) + %(setarrayitem('p0', 'i2', 'p1', cdescr))s jump() """) @@ -764,11 +826,11 @@ [p1, p2] p0 = call_malloc_nursery( \ %(cdescr.basesize + 5 * cdescr.itemsize)d) - setfield_gc(p0, 8111, descr=tiddescr) - setfield_gc(p0, 5, descr=clendescr) + gc_store(p0, 0, 8111, %(tiddescr.field_size)s) + gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 2, 3, descr=cdescr) - setarrayitem_gc(p0, 1, p1, descr=cdescr) - setarrayitem_gc(p0, 0, p2, descr=cdescr) + %(setarrayitem('p0', 1, 'p1', cdescr))s + %(setarrayitem('p0', 0, 'p2', cdescr))s jump() """) @@ -783,11 +845,11 @@ [p1, p2] p0 = call_malloc_nursery( \ %(cdescr.basesize + 5 * cdescr.itemsize)d) - setfield_gc(p0, 8111, descr=tiddescr) - setfield_gc(p0, 5, descr=clendescr) + gc_store(p0, 0, 8111, %(tiddescr.field_size)s) + gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 3, descr=cdescr) - setarrayitem_gc(p0, 3, p1, descr=cdescr) - setarrayitem_gc(p0, 4, p2, descr=cdescr) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 4, 'p2', cdescr))s jump() """) @@ -803,12 +865,12 @@ [p1, p2] p0 = call_malloc_nursery( \ %(cdescr.basesize + 5 * cdescr.itemsize)d) - setfield_gc(p0, 8111, descr=tiddescr) - setfield_gc(p0, 5, descr=clendescr) + gc_store(p0, 0, 8111, %(tiddescr.field_size)s) + gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 5, descr=cdescr) - setarrayitem_gc(p0, 3, p1, descr=cdescr) - setarrayitem_gc(p0, 2, p2, descr=cdescr) - setarrayitem_gc(p0, 1, p2, descr=cdescr) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 2, 'p2', cdescr))s + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -826,14 +888,14 @@ [p1, p2] p0 = call_malloc_nursery( \ %(cdescr.basesize + 5 * cdescr.itemsize)d) - setfield_gc(p0, 8111, descr=tiddescr) - setfield_gc(p0, 5, descr=clendescr) + gc_store(p0, 0, 8111, %(tiddescr.field_size)s) + gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 5, 0, descr=cdescr) - setarrayitem_gc(p0, 3, p1, descr=cdescr) - setarrayitem_gc(p0, 4, p2, descr=cdescr) - setarrayitem_gc(p0, 0, p1, descr=cdescr) - setarrayitem_gc(p0, 2, p2, descr=cdescr) - setarrayitem_gc(p0, 1, p2, descr=cdescr) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 4, 'p2', cdescr))s + %(setarrayitem('p0', 0, 'p1', cdescr))s + %(setarrayitem('p0', 2, 'p2', cdescr))s + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -849,13 +911,13 @@ [p1, p2] p0 = call_malloc_nursery( \ %(cdescr.basesize + 5 * cdescr.itemsize)d) - setfield_gc(p0, 8111, descr=tiddescr) - setfield_gc(p0, 5, descr=clendescr) + gc_store(p0, 0, 8111, %(tiddescr.field_size)s) + gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 1, 4, descr=cdescr) - setarrayitem_gc(p0, 0, p1, descr=cdescr) + %(setarrayitem('p0', 0, 'p1', cdescr))s call_n(321321) cond_call_gc_wb(p0, descr=wbdescr) - setarrayitem_gc(p0, 1, p2, descr=cdescr) + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -871,13 +933,13 @@ [p1, p2] p0 = call_malloc_nursery( \ %(cdescr.basesize + 5 * cdescr.itemsize)d) - setfield_gc(p0, 8111, descr=tiddescr) - setfield_gc(p0, 5, descr=clendescr) + gc_store(p0, 0, 8111, %(tiddescr.field_size)s) + gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 1, 4, descr=cdescr) - setarrayitem_gc(p0, 0, p1, descr=cdescr) + %(setarrayitem('p0', 0, 'p1', cdescr))s label(p0, p2) cond_call_gc_wb_array(p0, 1, descr=wbdescr) - setarrayitem_gc(p0, 1, p2, descr=cdescr) + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -889,7 +951,7 @@ """, """ [p1, p2, i3] p0 = call_malloc_nursery_varsize(0, 1, i3, descr=bdescr) - setfield_gc(p0, i3, descr=blendescr) + gc_store(p0, 0, i3, %(blendescr.field_size)s) zero_array(p0, 0, i3, descr=bdescr) jump() """) @@ -903,10 +965,10 @@ """, """ [p1, p2, i3] p0 = call_malloc_nursery_varsize(0, 1, i3, descr=bdescr) - setfield_gc(p0, i3, descr=blendescr) + gc_store(p0, 0, i3, %(blendescr.field_size)s) zero_array(p0, 0, i3, descr=bdescr) cond_call_gc_wb_array(p0, 0, descr=wbdescr) - setarrayitem_gc(p0, 0, p1, descr=bdescr) + %(setarrayitem('p0', 0, 'p1', bdescr))s jump() """) @@ -922,9 +984,9 @@ """, """ [i0, p1, i2] p0 = call_malloc_nursery_varsize(0, 1, i0, descr=bdescr) - setfield_gc(p0, i0, descr=blendescr) + gc_store(p0, 0, i0, %(blendescr.field_size)s) cond_call_gc_wb_array(p0, i2, descr=wbdescr) - setarrayitem_gc(p0, i2, p1, descr=bdescr) + gc_store_indexed(p0, i2, p1, 1, %(bdescr.basesize)s, 1) jump() """) @@ -938,14 +1000,14 @@ """, """ [i0] p0 = call_malloc_nursery(%(tdescr.size)d) - setfield_gc(p0, 5678, descr=tiddescr) - zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s) + gc_store(p0, 0, 5678, %(tiddescr.field_size)s) + gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) p1 = call_malloc_nursery_varsize(1, 1, i0, \ descr=strdescr) - setfield_gc(p1, i0, descr=strlendescr) - setfield_gc(p1, 0, descr=strhashdescr) + gc_store(p1, %(strlendescr.offset)s, i0, %(strlendescr.field_size)s) + gc_store(p1, 0, 0, %(strhashdescr.field_size)s) cond_call_gc_wb(p0, descr=wbdescr) - setfield_gc(p0, p1, descr=tzdescr) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -959,11 +1021,11 @@ """, """ [p1] p0 = call_malloc_nursery(%(tdescr.size)d) - setfield_gc(p0, 5678, descr=tiddescr) - zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s) + gc_store(p0, 0, 5678, %(tiddescr.field_size)s) + gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) label(p0, p1) cond_call_gc_wb(p0, descr=wbdescr) - setfield_gc(p0, p1, descr=tzdescr) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -976,8 +1038,8 @@ """, """ [p0, p1, p2] cond_call_gc_wb(p0, descr=wbdescr) - setfield_gc(p0, p1, descr=tzdescr) - setfield_gc(p0, p2, descr=tzdescr) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p2, %(tzdescr.field_size)s) jump(p1, p2, p0) """) @@ -987,20 +1049,20 @@ i2 = call_assembler_i(i0, f0, descr=casmdescr) """, """ [i0, f0] - i1 = getfield_raw_i(ConstClass(frame_info), descr=jfi_frame_size) + i1 = gc_load_i(ConstClass(frame_info), %(jfi_frame_size.offset)s, %(jfi_frame_size.field_size)s) p1 = call_malloc_nursery_varsize_frame(i1) - setfield_gc(p1, 0, descr=tiddescr) - i2 = getfield_raw_i(ConstClass(frame_info), descr=jfi_frame_depth) - setfield_gc(p1, 0, descr=jf_extra_stack_depth) - setfield_gc(p1, NULL, descr=jf_savedata) - setfield_gc(p1, NULL, descr=jf_force_descr) - setfield_gc(p1, NULL, descr=jf_descr) - setfield_gc(p1, NULL, descr=jf_guard_exc) - setfield_gc(p1, NULL, descr=jf_forward) - setfield_gc(p1, i2, descr=framelendescr) - setfield_gc(p1, ConstClass(frame_info), descr=jf_frame_info) - setarrayitem_gc(p1, 0, i0, descr=signedframedescr) - setarrayitem_gc(p1, 1, f0, descr=floatframedescr) + gc_store(p1, 0, 0, %(tiddescr.field_size)s) + i2 = gc_load_i(ConstClass(frame_info), %(jfi_frame_depth.offset)s, %(jfi_frame_depth.field_size)s) + %(setfield('p1', 0, jf_extra_stack_depth))s + %(setfield('p1', 'NULL', jf_savedata))s + %(setfield('p1', 'NULL', jf_force_descr))s + %(setfield('p1', 'NULL', jf_descr))s + %(setfield('p1', 'NULL', jf_guard_exc))s + %(setfield('p1', 'NULL', jf_forward))s + gc_store(p1, 0, i2, %(framelendescr.field_size)s) + %(setfield('p1', 'ConstClass(frame_info)', jf_frame_info))s + gc_store(p1, 3, i0, 8) + gc_store(p1, 13, f0, 8) i3 = call_assembler_i(p1, descr=casmdescr) """) @@ -1014,8 +1076,8 @@ """, """ [i0] p0 = call_malloc_nursery(%(tdescr.size)d) - setfield_gc(p0, 5678, descr=tiddescr) - zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s) + gc_store(p0, 0, 5678, %(tiddescr.field_size)s) + gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) i1 = int_add_ovf(i0, 123) guard_overflow(descr=guarddescr) [] jump() @@ -1031,8 +1093,8 @@ """, """ [i0] p0 = call_malloc_nursery(%(tdescr.size)d) - setfield_gc(p0, 5678, descr=tiddescr) - zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s) + gc_store(p0, 0, 5678, %(tiddescr.field_size)s) + gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) i1 = int_gt(i0, 123) guard_false(i1, descr=guarddescr) [] jump() @@ -1045,14 +1107,14 @@ self.check_rewrite(""" [] p0 = new(descr=tdescr) - p1 = getfield_gc_r(p0, descr=tdescr) + p1 = getfield_gc_r(p0, descr=tzdescr) jump(p1) """, """ [] p0 = call_malloc_nursery(%(tdescr.size)d) - setfield_gc(p0, 5678, descr=tiddescr) - zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s) - p1 = getfield_gc_r(p0, descr=tdescr) + gc_store(p0, 0, 5678, %(tiddescr.field_size)s) + gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) + p1 = gc_load_r(p0, %(tzdescr.offset)s, %(tzdescr.field_size)s) jump(p1) """) @@ -1081,3 +1143,126 @@ guard_false(i2) [i5, i0] jump() """) + + @py.test.mark.parametrize('support_offset,factors,fromto',[ + # [False, (1,2,4,8), 'setarrayitem_gc(p0,i1,i2,descr=adescr)' '->' + # 'i3 = int_mul(i1,%(adescr.itemsize)s);' + # 'i4 = int_add(i3,%(adescr.basesize)s);' + # 'gc_store(p0,i4,i2,%(adescr.itemsize)s)'], + [True, (1,2,4,8), 'setarrayitem_gc(p0,i1,i2,descr=adescr)' '->' + 'gc_store_indexed(p0,i1,i2,%(adescr.itemsize)s,' + '%(adescr.basesize)s,%(adescr.itemsize)s)'], + #[False, (1,), 'setarrayitem_gc(p0,i1,i2,descr=adescr)' '->' + # 'i3 = int_mul(i1,%(adescr.itemsize)s);' + # 'i4 = int_add(i3,%(adescr.basesize)s);' + # 'gc_store(p0,i4,i2,%(adescr.itemsize)s)'], + [True, None, 'i3 = raw_load_i(p0,i1,descr=adescr)' '->' + 'gc_load_indexed_i(p0,i1,1,%(adescr.basesize)s,-%(adescr.itemsize)s)'], + [True, None, 'i3 = raw_load_f(p0,i1,descr=fdescr)' '->' + 'gc_load_indexed_f(p0,i1,1,%(fdescr.basesize)s,%(fdescr.itemsize)s)'], + [True, None, 'i3 = raw_load_i(p0,i1,descr=sfdescr)' '->' + 'gc_load_indexed_i(p0,i1,1,%(sfdescr.basesize)s,%(sfdescr.itemsize)s)'], + [True, (1,2,4,8), 'i3 = raw_store(p0,i1,i2,descr=raw_sfdescr)' '->' + 'gc_store_indexed(p0,i1,i2,1,%(raw_sfdescr.basesize)s,%(raw_sfdescr.itemsize)s)'], + # [False, (1,), 'i3 = raw_store(p0,i1,i2,descr=raw_sfdescr)' '->' + # 'i5 = int_add(i1,%(raw_sfdescr.basesize)s);' + # 'gc_store(p0,i5,i2,%(raw_sfdescr.itemsize)s)'], + [True, (1,2,4,8), 'i3 = getfield_gc_f(p0,descr=ydescr)' '->' + 'i3 = gc_load_f(p0,%(ydescr.offset)s,%(ydescr.field_size)s)'], + [True, (1,2,4,8), 'setfield_raw(p0,i1,descr=ydescr)' '->' + 'gc_store(p0,%(ydescr.offset)s,i1,%(ydescr.field_size)s)'], + [True, (1,2,4,8), 'setfield_gc(p0,p0,descr=zdescr)' '->' + 'cond_call_gc_wb(p0, descr=wbdescr);' + 'gc_store(p0,%(zdescr.offset)s,p0,%(zdescr.field_size)s)'], + [False, (1,), 'i3 = arraylen_gc(p0, descr=adescr)' '->' + 'i3 = gc_load_i(p0,0,%(adescr.itemsize)s)'], + #[False, (1,), 'i3 = strlen(p0)' '->' + # 'i3 = gc_load_i(p0,' + # '%(strlendescr.offset)s,%(strlendescr.field_size)s)'], + [True, (1,), 'i3 = strlen(p0)' '->' + 'i3 = gc_load_i(p0,' + '%(strlendescr.offset)s,' + '%(strlendescr.field_size)s)'], + #[False, (1,), 'i3 = unicodelen(p0)' '->' + # 'i3 = gc_load_i(p0,' + # '%(unicodelendescr.offset)s,' + # '%(unicodelendescr.field_size)s)'], + [True, (1,), 'i3 = unicodelen(p0)' '->' + 'i3 = gc_load_i(p0,' + '%(unicodelendescr.offset)s,' + '%(unicodelendescr.field_size)s)'], + + ## getitem str/unicode + [True, (4,), 'i3 = unicodegetitem(p0,i1)' '->' + 'i3 = gc_load_indexed_i(p0,i1,' + '%(unicodedescr.itemsize)d,' + '%(unicodedescr.basesize)d,' + '%(unicodedescr.itemsize)d)'], + #[False, (4,), 'i3 = unicodegetitem(p0,i1)' '->' + # 'i4 = int_mul(i1, %(unicodedescr.itemsize)d);' + # 'i5 = int_add(i4, %(unicodedescr.basesize)d);' + # 'i3 = gc_load_i(p0,i5,%(unicodedescr.itemsize)d)'], + [True, (4,), 'i3 = strgetitem(p0,i1)' '->' + 'i3 = gc_load_indexed_i(p0,i1,1,' + '%(strdescr.basesize)d,1)'], + #[False, (4,), 'i3 = strgetitem(p0,i1)' '->' + # 'i5 = int_add(i1, %(strdescr.basesize)d);' + # 'i3 = gc_load_i(p0,i5,1)'], + ## setitem str/unicode + [True, (4,), 'i3 = strsetitem(p0,i1,0)' '->' + 'i3 = gc_store_indexed(p0,i1,0,1,' + '%(strdescr.basesize)d,1)'], + [True, (4,), 'i3 = unicodesetitem(p0,i1,0)' '->' + 'i3 = gc_store_indexed(p0,i1,0,' + '%(unicodedescr.itemsize)d,' + '%(unicodedescr.basesize)d,' + '%(unicodedescr.itemsize)d)'], + ## interior + [True, (1,2,4,8), 'i3 = getinteriorfield_gc_i(p0,i1,descr=itzdescr)' '->' + 'i4 = int_mul(i1,' + '%(itzdescr.arraydescr.itemsize)d);' + 'i3 = gc_load_indexed_i(p0,i4,1,' + '%(itzdescr.arraydescr.basesize' + ' + itzdescr.fielddescr.offset)d,' + '%(itzdescr.fielddescr.field_size)d)'], + [True, (1,2,4,8), 'i3 = getinteriorfield_gc_r(p0,i1,descr=itxdescr)' '->' + 'i4 = int_mul(i1,' + '%(itxdescr.arraydescr.itemsize)d);' + 'i3 = gc_load_indexed_r(p0,i4,1,' + '%(itxdescr.arraydescr.basesize' + ' + itxdescr.fielddescr.offset)d,' + '%(itxdescr.fielddescr.field_size)d)'], + [True, (1,2,4,8), 'i3 = setinteriorfield_gc(p0,i1,i2,descr=itydescr)' '->' + 'i4 = int_mul(i1,' + '%(itydescr.arraydescr.itemsize)d);' + 'i3 = gc_store_indexed(p0,i4,i2,1,' + '%(itydescr.arraydescr.basesize' + ' + itydescr.fielddescr.offset)d,' + '%(itydescr.fielddescr.field_size)d)'], + [True, (1,2,4,8), 'i3 = setinteriorfield_gc(p0,i1,i2,descr=ity2descr)' '->' + 'i4 = int_lshift(i1,' + '%(s2i_item_size_in_bits)d);' + 'i3 = gc_store_indexed(p0,i4,i2,1,' + '%(ity2descr.arraydescr.basesize' + ' + itydescr.fielddescr.offset)d,' + '%(ity2descr.fielddescr.field_size)d)'], + ]) + def test_gc_load_store_transform(self, support_offset, factors, fromto): + self.cpu.load_constant_offset = support_offset + all_supported_sizes = [factors] + + if not factors: + all_supported_sizes = [(1,), (1,2,), (4,), (1,2,4,8)] + for factors in all_supported_sizes: + self.cpu.load_supported_factors = factors + f, t = fromto.split('->') + t = ('\n' +(' '*16)).join([s for s in t.split(';')]) + self.check_rewrite(""" + [p0,i1,i2] + {f} + jump() + """.format(**locals()), """ + [p0,i1,i2] + {t} + jump() + """.format(**locals())) diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/test/zrpy_gc_test.py pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/test/zrpy_gc_test.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/test/zrpy_gc_test.py 2016-03-19 16:40:15.000000000 +0000 @@ -167,7 +167,7 @@ funcs[num][2](n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s) myjitdriver = JitDriver(greens = ['num'], reds = ['n', 'x', 'x0', 'x1', 'x2', 'x3', 'x4', - 'x5', 'x6', 'x7', 'l', 's']) + 'x5', 'x6', 'x7', 'l', 's'], is_recursive=True) cls.main_allfuncs = staticmethod(main_allfuncs) cls.name_to_func = name_to_func OLD_DEBUG = GcLLDescr_framework.DEBUG @@ -768,7 +768,7 @@ def define_compile_framework_call_assembler(self): S = lltype.GcForwardReference() S.become(lltype.GcStruct('S', ('s', lltype.Ptr(S)))) - driver = JitDriver(greens = [], reds = 'auto') + driver = JitDriver(greens = [], reds = 'auto', is_recursive=True) def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s0): driver.jit_merge_point() diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py --- pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py 2016-03-19 16:40:11.000000000 +0000 @@ -1,6 +1,5 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib.jit import dont_look_inside -from rpython.rlib.objectmodel import invoke_around_extcall from rpython.jit.metainterp.optimizeopt import ALL_OPTS_NAMES from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib import rposix @@ -16,20 +15,10 @@ compile_kwds = dict(enable_opts=ALL_OPTS_NAMES, thread=True) def define_simple(self): - class Glob: - def __init__(self): - self.event = 0 - glob = Glob() - # - c_strchr = rffi.llexternal('strchr', [rffi.CCHARP, lltype.Signed], rffi.CCHARP) - def func(): - glob.event += 1 - def before(n, x): - invoke_around_extcall(func, func) return (n, None, None, None, None, None, None, None, None, None, None, None) # @@ -73,7 +62,8 @@ def f42(n): length = len(glob.lst) raw = alloc1() - fn = llhelper(CALLBACK, rffi._make_wrapper_for(CALLBACK, callback)) + wrapper = rffi._make_wrapper_for(CALLBACK, callback, None, True) + fn = llhelper(CALLBACK, wrapper) if n & 1: # to create a loop and a bridge, and also pass # to run the qsort() call in the blackhole interp c_qsort(rffi.cast(rffi.VOIDP, raw), rffi.cast(rffi.SIZE_T, 2), diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py --- pypy-4.0.1+dfsg/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/llsupport/test/zrpy_vmprof_test.py 2016-03-19 16:40:11.000000000 +0000 @@ -0,0 +1,88 @@ + +import os, py +from rpython.jit.backend.test.support import CCompiledMixin +from rpython.rlib.jit import JitDriver +from rpython.tool.udir import udir +from rpython.rlib import rthread +from rpython.translator.translator import TranslationContext +from rpython.jit.backend.detect_cpu import getcpuclass + +class CompiledVmprofTest(CCompiledMixin): + CPUClass = getcpuclass() + + def _get_TranslationContext(self): + t = TranslationContext() + t.config.translation.gc = 'incminimark' + t.config.translation.list_comprehension_operations = True + return t + + def test_vmprof(self): + from rpython.rlib import rvmprof + + class MyCode: + _vmprof_unique_id = 0 + def __init__(self, name): + self.name = name + + def get_name(code): + return code.name + + code2 = MyCode("py:y:foo:4") + rvmprof.register_code(code2, get_name) + + try: + rvmprof.register_code_object_class(MyCode, get_name) + except rvmprof.VMProfPlatformUnsupported, e: + py.test.skip(str(e)) + + def get_unique_id(code): + return rvmprof.get_unique_id(code) + + driver = JitDriver(greens = ['code'], reds = ['i', 's', 'num'], + is_recursive=True, get_unique_id=get_unique_id) + + @rvmprof.vmprof_execute_code("xcode13", lambda code, num: code) + def main(code, num): + return main_jitted(code, num) + + def main_jitted(code, num): + s = 0 + i = 0 + while i < num: + driver.jit_merge_point(code=code, i=i, s=s, num=num) + s += (i << 1) + if i % 3 == 0 and code is not code2: + main(code2, 100) + i += 1 + return s + + tmpfilename = str(udir.join('test_rvmprof')) + + def f(num): + rthread.get_ident() # register TLOFS_thread_ident + code = MyCode("py:x:foo:3") + rvmprof.register_code(code, get_name) + fd = os.open(tmpfilename, os.O_WRONLY | os.O_CREAT, 0666) + period = 0.0001 + rvmprof.enable(fd, period) + res = main(code, num) + #assert res == 499999500000 + rvmprof.disable() + os.close(fd) + return 0 + + def check_vmprof_output(): + from vmprof import read_profile + tmpfile = str(udir.join('test_rvmprof')) + stats = read_profile(tmpfile) + t = stats.get_tree() + assert t.name == 'py:x:foo:3' + assert len(t.children) == 1 # jit + + self.meta_interp(f, [1000000], inline=True) + try: + import vmprof + except ImportError: + pass + else: + check_vmprof_output() diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/ppc/opassembler.py pypy-5.0.1+dfsg/rpython/jit/backend/ppc/opassembler.py --- pypy-4.0.1+dfsg/rpython/jit/backend/ppc/opassembler.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/ppc/opassembler.py 2016-03-19 16:40:15.000000000 +0000 @@ -20,7 +20,7 @@ PPCBuilder, PPCGuardToken) from rpython.jit.backend.ppc.regalloc import TempPtr, TempInt from rpython.jit.backend.llsupport import symbolic, jitframe -from rpython.jit.backend.llsupport.descr import InteriorFieldDescr, CallDescr +from rpython.jit.backend.llsupport.descr import CallDescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rtyper.lltypesystem import rstr, rffi, lltype from rpython.rtyper.annlowlevel import cast_instance_to_gcref @@ -706,8 +706,10 @@ _mixin_ = True - def _write_to_mem(self, value_loc, base_loc, ofs, size): - if size.value == 8: + def _write_to_mem(self, value_loc, base_loc, ofs, size_loc): + assert size_loc.is_imm() + size = size_loc.value + if size == 8: if value_loc.is_fp_reg(): if ofs.is_imm(): self.mc.stfd(value_loc.value, base_loc.value, ofs.value) @@ -718,17 +720,17 @@ self.mc.std(value_loc.value, base_loc.value, ofs.value) else: self.mc.stdx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 4: + elif size == 4: if ofs.is_imm(): self.mc.stw(value_loc.value, base_loc.value, ofs.value) else: self.mc.stwx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 2: + elif size == 2: if ofs.is_imm(): self.mc.sth(value_loc.value, base_loc.value, ofs.value) else: self.mc.sthx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 1: + elif size == 1: if ofs.is_imm(): self.mc.stb(value_loc.value, base_loc.value, ofs.value) else: @@ -736,18 +738,35 @@ else: assert 0, "size not supported" - def emit_setfield_gc(self, op, arglocs, regalloc): - value_loc, base_loc, ofs, size = arglocs - self._write_to_mem(value_loc, base_loc, ofs, size) + def emit_gc_store(self, op, arglocs, regalloc): + value_loc, base_loc, ofs_loc, size_loc = arglocs + self._write_to_mem(value_loc, base_loc, ofs_loc, size_loc) + + def _apply_offset(self, index_loc, ofs_loc): + # If offset != 0 then we have to add it here. Note that + # mc.addi() would not be valid with operand r0. + assert ofs_loc.is_imm() # must be an immediate... + assert _check_imm_arg(ofs_loc.getint()) # ...that fits 16 bits + assert index_loc is not r.SCRATCH2 + # (simplified version of _apply_scale()) + if ofs_loc.value > 0: + self.mc.addi(r.SCRATCH2.value, index_loc.value, ofs_loc.value) + index_loc = r.SCRATCH2 + return index_loc + + def emit_gc_store_indexed(self, op, arglocs, regalloc): + base_loc, index_loc, value_loc, ofs_loc, size_loc = arglocs + index_loc = self._apply_offset(index_loc, ofs_loc) + self._write_to_mem(value_loc, base_loc, index_loc, size_loc) - emit_setfield_raw = emit_setfield_gc - emit_zero_ptr_field = emit_setfield_gc - - def _load_from_mem(self, res, base_loc, ofs, size, signed): + def _load_from_mem(self, res, base_loc, ofs, size_loc, sign_loc): # res, base_loc, ofs, size and signed are all locations assert base_loc is not r.SCRATCH - sign = signed.value - if size.value == 8: + assert size_loc.is_imm() + size = size_loc.value + assert sign_loc.is_imm() + sign = sign_loc.value + if size == 8: if res.is_fp_reg(): if ofs.is_imm(): self.mc.lfd(res.value, base_loc.value, ofs.value) @@ -758,7 +777,7 @@ self.mc.ld(res.value, base_loc.value, ofs.value) else: self.mc.ldx(res.value, base_loc.value, ofs.value) - elif size.value == 4: + elif size == 4: if IS_PPC_64 and sign: if ofs.is_imm(): self.mc.lwa(res.value, base_loc.value, ofs.value) @@ -769,7 +788,7 @@ self.mc.lwz(res.value, base_loc.value, ofs.value) else: self.mc.lwzx(res.value, base_loc.value, ofs.value) - elif size.value == 2: + elif size == 2: if sign: if ofs.is_imm(): self.mc.lha(res.value, base_loc.value, ofs.value) @@ -780,7 +799,7 @@ self.mc.lhz(res.value, base_loc.value, ofs.value) else: self.mc.lhzx(res.value, base_loc.value, ofs.value) - elif size.value == 1: + elif size == 1: if ofs.is_imm(): self.mc.lbz(res.value, base_loc.value, ofs.value) else: @@ -790,22 +809,28 @@ else: assert 0, "size not supported" - def _genop_getfield(self, op, arglocs, regalloc): - base_loc, ofs, res, size, sign = arglocs - self._load_from_mem(res, base_loc, ofs, size, sign) - - emit_getfield_gc_i = _genop_getfield - emit_getfield_gc_r = _genop_getfield - emit_getfield_gc_f = _genop_getfield - emit_getfield_gc_pure_i = _genop_getfield - emit_getfield_gc_pure_r = _genop_getfield - emit_getfield_gc_pure_f = _genop_getfield - emit_getfield_raw_i = _genop_getfield - emit_getfield_raw_f = _genop_getfield + def _genop_gc_load(self, op, arglocs, regalloc): + base_loc, ofs_loc, res_loc, size_loc, sign_loc = arglocs + self._load_from_mem(res_loc, base_loc, ofs_loc, size_loc, sign_loc) + + emit_gc_load_i = _genop_gc_load + emit_gc_load_r = _genop_gc_load + emit_gc_load_f = _genop_gc_load + + def _genop_gc_load_indexed(self, op, arglocs, regalloc): + base_loc, index_loc, res_loc, ofs_loc, size_loc, sign_loc = arglocs + index_loc = self._apply_offset(index_loc, ofs_loc) + self._load_from_mem(res_loc, base_loc, index_loc, size_loc, sign_loc) + + emit_gc_load_indexed_i = _genop_gc_load_indexed + emit_gc_load_indexed_r = _genop_gc_load_indexed + emit_gc_load_indexed_f = _genop_gc_load_indexed SIZE2SCALE = dict([(1<<_i, _i) for _i in range(32)]) def _multiply_by_constant(self, loc, multiply_by, scratch_loc): + # XXX should die together with _apply_scale() but can't because + # of emit_zero_array() and malloc_cond_varsize() at the moment assert loc.is_reg() if multiply_by == 1: return loc @@ -827,6 +852,9 @@ return scratch_loc def _apply_scale(self, ofs, index_loc, itemsize): + # XXX should die now that getarrayitem and getinteriorfield are gone + # but can't because of emit_zero_array() at the moment + # For arrayitem and interiorfield reads and writes: this returns an # offset suitable for use in ld/ldx or similar instructions. # The result will be either the register r2 or a 16-bit immediate. @@ -857,44 +885,6 @@ index_loc = r.SCRATCH2 return index_loc - def _genop_getarray_or_interiorfield(self, op, arglocs, regalloc): - (base_loc, index_loc, res_loc, ofs_loc, - itemsize, fieldsize, fieldsign) = arglocs - ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize) - self._load_from_mem(res_loc, base_loc, ofs_loc, fieldsize, fieldsign) - - emit_getinteriorfield_gc_i = _genop_getarray_or_interiorfield - emit_getinteriorfield_gc_r = _genop_getarray_or_interiorfield - emit_getinteriorfield_gc_f = _genop_getarray_or_interiorfield - - def emit_setinteriorfield_gc(self, op, arglocs, regalloc): - (base_loc, index_loc, value_loc, ofs_loc, - itemsize, fieldsize) = arglocs - ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize) - self._write_to_mem(value_loc, base_loc, ofs_loc, fieldsize) - - emit_setinteriorfield_raw = emit_setinteriorfield_gc - - def emit_arraylen_gc(self, op, arglocs, regalloc): - res, base_loc, ofs = arglocs - self.mc.load(res.value, base_loc.value, ofs.value) - - emit_setarrayitem_gc = emit_setinteriorfield_gc - emit_setarrayitem_raw = emit_setarrayitem_gc - - emit_getarrayitem_gc_i = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_r = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_f = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_i = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_r = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_f = _genop_getarray_or_interiorfield - emit_getarrayitem_raw_i = _genop_getarray_or_interiorfield - emit_getarrayitem_raw_f = _genop_getarray_or_interiorfield - - emit_raw_store = emit_setarrayitem_gc - emit_raw_load_i = _genop_getarray_or_interiorfield - emit_raw_load_f = _genop_getarray_or_interiorfield - def _copy_in_scratch2(self, loc): if loc.is_imm(): self.mc.li(r.SCRATCH2.value, loc.value) @@ -998,10 +988,6 @@ _mixin_ = True - emit_strlen = FieldOpAssembler._genop_getfield - emit_strgetitem = FieldOpAssembler._genop_getarray_or_interiorfield - emit_strsetitem = FieldOpAssembler.emit_setarrayitem_gc - def emit_copystrcontent(self, op, arglocs, regalloc): self._emit_copycontent(arglocs, is_unicode=False) @@ -1059,12 +1045,8 @@ class UnicodeOpAssembler(object): - _mixin_ = True - - emit_unicodelen = FieldOpAssembler._genop_getfield - emit_unicodegetitem = FieldOpAssembler._genop_getarray_or_interiorfield - emit_unicodesetitem = FieldOpAssembler.emit_setarrayitem_gc + # empty! class AllocOpAssembler(object): diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/ppc/regalloc.py pypy-5.0.1+dfsg/rpython/jit/backend/ppc/regalloc.py --- pypy-4.0.1+dfsg/rpython/jit/backend/ppc/regalloc.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/ppc/regalloc.py 2016-03-19 16:40:15.000000000 +0000 @@ -17,12 +17,9 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.backend.llsupport import symbolic -from rpython.jit.backend.llsupport.descr import ArrayDescr +from rpython.jit.backend.llsupport.descr import unpack_arraydescr import rpython.jit.backend.ppc.register as r import rpython.jit.backend.ppc.condition as c -from rpython.jit.backend.llsupport.descr import unpack_arraydescr -from rpython.jit.backend.llsupport.descr import unpack_fielddescr -from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import debug_print @@ -318,6 +315,8 @@ i += 1 assert not self.rm.reg_bindings assert not self.fprm.reg_bindings + if not we_are_translated(): + self.assembler.mc.trap() self.flush_loop() self.assembler.mc.mark_op(None) # end of the loop self.operations = None @@ -689,158 +688,68 @@ src_locations2, dst_locations2, fptmploc) return [] - def prepare_setfield_gc(self, op): - ofs, size, _ = unpack_fielddescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - value_loc = self.ensure_reg(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [value_loc, base_loc, ofs_loc, imm(size)] - - prepare_setfield_raw = prepare_setfield_gc - - def _prepare_getfield(self, op): - ofs, size, sign = unpack_fielddescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - res = self.force_allocate_reg(op) - return [base_loc, ofs_loc, res, imm(size), imm(sign)] - - prepare_getfield_gc_i = _prepare_getfield - prepare_getfield_gc_r = _prepare_getfield - prepare_getfield_gc_f = _prepare_getfield - prepare_getfield_raw_i = _prepare_getfield - prepare_getfield_raw_f = _prepare_getfield - prepare_getfield_gc_pure_i = _prepare_getfield - prepare_getfield_gc_pure_r = _prepare_getfield - prepare_getfield_gc_pure_f = _prepare_getfield - - def prepare_increment_debug_counter(self, op): - base_loc = self.ensure_reg(op.getarg(0)) - temp_loc = r.SCRATCH2 - return [base_loc, temp_loc] - - def _prepare_getinteriorfield(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t + def prepare_gc_store(self, op): base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, - imm(itemsize), imm(fieldsize), imm(sign)] - - prepare_getinteriorfield_gc_i = _prepare_getinteriorfield - prepare_getinteriorfield_gc_r = _prepare_getinteriorfield - prepare_getinteriorfield_gc_f = _prepare_getinteriorfield - - def prepare_setinteriorfield_gc(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, _ = t - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, index_loc, value_loc, ofs_loc, - imm(itemsize), imm(fieldsize)] - - prepare_setinteriorfield_raw = prepare_setinteriorfield_gc - - def prepare_arraylen_gc(self, op): - arraydescr = op.getdescr() - assert isinstance(arraydescr, ArrayDescr) - ofs = arraydescr.lendescr.offset - assert _check_imm_arg(ofs) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - res = self.force_allocate_reg(op) - return [res, base_loc, imm(ofs)] + size_loc = self.ensure_reg_or_any_imm(op.getarg(3)) + return [value_loc, base_loc, ofs_loc, size_loc] - def prepare_setarrayitem_gc(self, op): - size, ofs, _ = unpack_arraydescr(op.getdescr()) + def _prepare_gc_load(self, op): base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - imm_size = imm(size) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - - prepare_setarrayitem_raw = prepare_setarrayitem_gc + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) + self.free_op_vars() + res_loc = self.force_allocate_reg(op) + size_box = op.getarg(2) + assert isinstance(size_box, ConstInt) + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: + sign = 1 + else: + sign = 0 + return [base_loc, ofs_loc, res_loc, size_loc, imm(sign)] + + prepare_gc_load_i = _prepare_gc_load + prepare_gc_load_r = _prepare_gc_load + prepare_gc_load_f = _prepare_gc_load - def prepare_raw_store(self, op): - size, ofs, _ = unpack_arraydescr(op.getdescr()) + def prepare_gc_store_indexed(self, op): base_loc = self.ensure_reg(op.getarg(0)) index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, index_loc, value_loc, ofs_loc, - imm(1), imm(size)] + assert op.getarg(3).getint() == 1 # scale + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(4)) + assert ofs_loc.is_imm() # the arg(4) should always be a small constant + size_loc = self.ensure_reg_or_any_imm(op.getarg(5)) + return [base_loc, index_loc, value_loc, ofs_loc, size_loc] - def _prepare_getarrayitem(self, op): - size, ofs, sign = unpack_arraydescr(op.getdescr()) + def _prepare_gc_load_indexed(self, op): base_loc = self.ensure_reg(op.getarg(0)) index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) + assert op.getarg(2).getint() == 1 # scale + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(3)) + assert ofs_loc.is_imm() # the arg(3) should always be a small constant self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(size) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(sign)] - - prepare_getarrayitem_gc_i = _prepare_getarrayitem - prepare_getarrayitem_gc_r = _prepare_getarrayitem - prepare_getarrayitem_gc_f = _prepare_getarrayitem - prepare_getarrayitem_raw_i = _prepare_getarrayitem - prepare_getarrayitem_raw_f = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_i = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_r = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_f = _prepare_getarrayitem - - def _prepare_raw_load(self, op): - size, ofs, sign = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, - imm(1), imm(size), imm(sign)] - - prepare_raw_load_i = _prepare_raw_load - prepare_raw_load_f = _prepare_raw_load - - def prepare_strlen(self, op): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] + res_loc = self.force_allocate_reg(op) + size_box = op.getarg(4) + assert isinstance(size_box, ConstInt) + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: + sign = 1 + else: + sign = 0 + return [base_loc, index_loc, res_loc, ofs_loc, size_loc, imm(sign)] + + prepare_gc_load_indexed_i = _prepare_gc_load_indexed + prepare_gc_load_indexed_r = _prepare_gc_load_indexed + prepare_gc_load_indexed_f = _prepare_gc_load_indexed - def prepare_strgetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(itemsize) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(0)] - - def prepare_strsetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) + def prepare_increment_debug_counter(self, op): base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - imm_size = imm(itemsize) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] + temp_loc = r.SCRATCH2 + return [base_loc, temp_loc] def prepare_copystrcontent(self, op): src_ptr_loc = self.ensure_reg(op.getarg(0)) @@ -854,37 +763,6 @@ prepare_copyunicodecontent = prepare_copystrcontent - def prepare_unicodelen(self, op): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] - - def prepare_unicodegetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(itemsize) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(0)] - - def prepare_unicodesetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - imm_size = imm(itemsize) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - prepare_same_as_i = helper.prepare_unary_op prepare_same_as_r = helper.prepare_unary_op prepare_same_as_f = helper.prepare_unary_op @@ -1076,12 +954,6 @@ arglocs = self._prepare_guard(op) return arglocs - def prepare_zero_ptr_field(self, op): - base_loc = self.ensure_reg(op.getarg(0)) - ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) - value_loc = self.ensure_reg(ConstInt(0)) - return [value_loc, base_loc, ofs_loc, imm(WORD)] - def prepare_zero_array(self, op): itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) base_loc = self.ensure_reg(op.getarg(0)) diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/ppc/runner.py pypy-5.0.1+dfsg/rpython/jit/backend/ppc/runner.py --- pypy-4.0.1+dfsg/rpython/jit/backend/ppc/runner.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/ppc/runner.py 2016-03-19 16:40:15.000000000 +0000 @@ -21,6 +21,9 @@ IS_64_BIT = True backend_name = 'ppc64' + # can an ISA instruction handle a factor to the offset? + load_supported_factors = (1,) + from rpython.jit.backend.ppc.register import JITFRAME_FIXED_SIZE frame_reg = r.SP all_reg_indexes = [-1] * 32 diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/ppc/test/test_runner.py pypy-5.0.1+dfsg/rpython/jit/backend/ppc/test/test_runner.py --- pypy-4.0.1+dfsg/rpython/jit/backend/ppc/test/test_runner.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/ppc/test/test_runner.py 2016-03-19 16:40:11.000000000 +0000 @@ -24,11 +24,11 @@ assert not IS_PPC_32 load_imm_instructions = ( "(li|lis(; ori)?)(; rldicr(; oris)?(; ori)?)?") - add_loop_instructions = "ld; add; cmpdi; beq-?; b;$" + add_loop_instructions = "ld; add; cmpdi; beq-?; b;" bridge_loop_instructions = ( "ld; cmpdi; bge.; " "li; %s; mtctr; %s; bctrl; " - "%s; mtctr; bctr;$" % ( + "%s; mtctr; bctr;" % ( load_imm_instructions, load_imm_instructions, load_imm_instructions)) @@ -134,7 +134,7 @@ def test_debugger_on(self): py.test.skip("XXX") - from pypy.rlib import debug + from rpython.rlib import debug targettoken, preambletoken = TargetToken(), TargetToken() loop = """ diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/test/runner_test.py pypy-5.0.1+dfsg/rpython/jit/backend/test/runner_test.py --- pypy-4.0.1+dfsg/rpython/jit/backend/test/runner_test.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/test/runner_test.py 2016-03-19 16:40:15.000000000 +0000 @@ -548,7 +548,9 @@ if cpu.supports_floats: def func(f0, f1, f2, f3, f4, f5, f6, i0, f7, i1, f8, f9): + seen.append((f0, f1, f2, f3, f4, f5, f6, i0, f7, i1, f8, f9)) return f0 + f1 + f2 + f3 + f4 + f5 + f6 + float(i0 + i1) + f7 + f8 + f9 + seen = [] F = lltype.Float I = lltype.Signed FUNC = self.FuncType([F] * 7 + [I] + [F] + [I] + [F]* 2, F) @@ -557,13 +559,15 @@ calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo.MOST_GENERAL) funcbox = self.get_funcbox(cpu, func_ptr) - args = ([boxfloat(.1) for i in range(7)] + - [InputArgInt(1), boxfloat(.2), InputArgInt(2), boxfloat(.3), - boxfloat(.4)]) + args = ([boxfloat(.0), boxfloat(.1), boxfloat(.2), boxfloat(.3), + boxfloat(.4), boxfloat(.5), boxfloat(.6), + InputArgInt(1), boxfloat(.7), InputArgInt(2), boxfloat(.8), + boxfloat(.9)]) res = self.execute_operation(rop.CALL_F, [funcbox] + args, 'float', descr=calldescr) - assert abs(longlong.getrealfloat(res) - 4.6) < 0.0001 + assert seen == [(.0, .1, .2, .3, .4, .5, .6, 1, .7, 2, .8, .9)] + assert abs(longlong.getrealfloat(res) - 7.5) < 0.0001 def test_call_many_arguments(self): # Test calling a function with a large number of arguments (more than @@ -3649,6 +3653,8 @@ [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] i10 = int_add(i0, 42) i11 = call_assembler_i(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, descr=looptoken) + # NOTE: call_assembler_i() is turned into a single-argument version + # by rewrite.py guard_not_forced()[] finish(i11) ''' @@ -4964,52 +4970,6 @@ [boxfloat(12.5)], 'int') assert res == struct.unpack("I", struct.pack("f", 12.5))[0] - def test_zero_ptr_field(self): - if not isinstance(self.cpu, AbstractLLCPU): - py.test.skip("llgraph can't do zero_ptr_field") - T = lltype.GcStruct('T') - S = lltype.GcStruct('S', ('x', lltype.Ptr(T))) - tdescr = self.cpu.sizeof(T) - sdescr = self.cpu.sizeof(S) - fielddescr = self.cpu.fielddescrof(S, 'x') - loop = parse(""" - [] - p0 = new(descr=tdescr) - p1 = new(descr=sdescr) - setfield_gc(p1, p0, descr=fielddescr) - zero_ptr_field(p1, %d) - finish(p1) - """ % fielddescr.offset, namespace=locals()) - looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - deadframe = self.cpu.execute_token(looptoken) - ref = self.cpu.get_ref_value(deadframe, 0) - s = lltype.cast_opaque_ptr(lltype.Ptr(S), ref) - assert not s.x - - def test_zero_ptr_field_2(self): - if not isinstance(self.cpu, AbstractLLCPU): - py.test.skip("llgraph does not do zero_ptr_field") - - from rpython.jit.backend.llsupport import symbolic - S = lltype.GcStruct('S', ('x', lltype.Signed), - ('p', llmemory.GCREF), - ('y', lltype.Signed)) - s = lltype.malloc(S) - s.x = -1296321 - s.y = -4398176 - s_ref = lltype.cast_opaque_ptr(llmemory.GCREF, s) - s.p = s_ref - ofs_p, _ = symbolic.get_field_token(S, 'p', False) - # - self.execute_operation(rop.ZERO_PTR_FIELD, [ - InputArgRef(s_ref), ConstInt(ofs_p)], # OK for now to assume that the - 'void') # 2nd argument is a constant - # - assert s.x == -1296321 - assert s.p == lltype.nullptr(llmemory.GCREF.TO) - assert s.y == -4398176 - def test_zero_array(self): if not isinstance(self.cpu, AbstractLLCPU): py.test.skip("llgraph does not do zero_array") diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/test/test_ll_random.py pypy-5.0.1+dfsg/rpython/jit/backend/test/test_ll_random.py --- pypy-4.0.1+dfsg/rpython/jit/backend/test/test_ll_random.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/test/test_ll_random.py 2016-03-19 16:40:11.000000000 +0000 @@ -745,7 +745,6 @@ OPERATIONS.append(GetInteriorFieldOperation(rop.GETINTERIORFIELD_GC_I)) OPERATIONS.append(GetInteriorFieldOperation(rop.GETINTERIORFIELD_GC_I)) OPERATIONS.append(SetFieldOperation(rop.SETFIELD_GC)) - OPERATIONS.append(ZeroPtrFieldOperation(rop.ZERO_PTR_FIELD)) OPERATIONS.append(SetInteriorFieldOperation(rop.SETINTERIORFIELD_GC)) OPERATIONS.append(NewOperation(rop.NEW)) OPERATIONS.append(NewOperation(rop.NEW_WITH_VTABLE)) diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/test/test_random.py pypy-5.0.1+dfsg/rpython/jit/backend/test/test_random.py --- pypy-4.0.1+dfsg/rpython/jit/backend/test/test_random.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/test/test_random.py 2016-03-19 16:40:15.000000000 +0000 @@ -57,19 +57,11 @@ def do(self, opnum, argboxes, descr=None): self.fakemetainterp._got_exc = None op = ResOperation(opnum, argboxes, descr) - if opnum != rop.ZERO_PTR_FIELD: - result = _execute_arglist(self.cpu, self.fakemetainterp, - opnum, argboxes, descr) - if result is not None: - c_result = wrap_constant(result) - op.copy_value_from(c_result) - else: - import ctypes - addr = self.cpu.cast_gcref_to_int(argboxes[0].getref_base()) - offset = argboxes[1].getint() - assert (offset % ctypes.sizeof(ctypes.c_long)) == 0 - ptr = ctypes.cast(addr, ctypes.POINTER(ctypes.c_long)) - ptr[offset / ctypes.sizeof(ctypes.c_long)] = 0 + result = _execute_arglist(self.cpu, self.fakemetainterp, + opnum, argboxes, descr) + if result is not None: + c_result = wrap_constant(result) + op.copy_value_from(c_result) self.loop.operations.append(op) return op diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/test/test_rvmprof.py pypy-5.0.1+dfsg/rpython/jit/backend/test/test_rvmprof.py --- pypy-4.0.1+dfsg/rpython/jit/backend/test/test_rvmprof.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/test/test_rvmprof.py 2016-03-19 16:40:11.000000000 +0000 @@ -0,0 +1,49 @@ +import py +from rpython.rlib import jit +from rpython.rtyper.annlowlevel import llhelper +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.rvmprof import cintf +from rpython.jit.backend.x86.arch import WORD +from rpython.jit.codewriter.policy import JitPolicy + +class BaseRVMProfTest(object): + def test_one(self): + py.test.skip("needs thread-locals in the JIT, which is only available " + "after translation") + visited = [] + + def helper(): + stack = cintf.vmprof_tl_stack.getraw() + if stack: + # not during tracing + visited.append(stack.c_value) + else: + visited.append(0) + + llfn = llhelper(lltype.Ptr(lltype.FuncType([], lltype.Void)), helper) + + driver = jit.JitDriver(greens=[], reds='auto') + + def f(n): + i = 0 + while i < n: + driver.jit_merge_point() + i += 1 + llfn() + + class Hooks(jit.JitHookInterface): + def after_compile(self, debug_info): + self.raw_start = debug_info.asminfo.rawstart + + hooks = Hooks() + + null = lltype.nullptr(cintf.VMPROFSTACK) + cintf.vmprof_tl_stack.setraw(null) # make it empty + self.meta_interp(f, [10], policy=JitPolicy(hooks)) + v = set(visited) + assert 0 in v + v.remove(0) + assert len(v) == 1 + assert 0 <= list(v)[0] - hooks.raw_start <= 10*1024 + assert cintf.vmprof_tl_stack.getraw() == null + # ^^^ make sure we didn't leave anything dangling diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/x86/arch.py pypy-5.0.1+dfsg/rpython/jit/backend/x86/arch.py --- pypy-4.0.1+dfsg/rpython/jit/backend/x86/arch.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/x86/arch.py 2016-03-19 16:40:11.000000000 +0000 @@ -31,7 +31,7 @@ if WORD == 4: # ebp + ebx + esi + edi + 15 extra words = 19 words - FRAME_FIXED_SIZE = 19 + FRAME_FIXED_SIZE = 19 + 4 # 4 for vmprof, XXX make more compact! PASS_ON_MY_FRAME = 15 JITFRAME_FIXED_SIZE = 6 + 8 * 2 # 6 GPR + 8 XMM * 2 WORDS/float # 'threadlocal_addr' is passed as 2nd argument on the stack, @@ -41,7 +41,7 @@ THREADLOCAL_OFS = (FRAME_FIXED_SIZE + 2) * WORD else: # rbp + rbx + r12 + r13 + r14 + r15 + threadlocal + 12 extra words = 19 - FRAME_FIXED_SIZE = 19 + FRAME_FIXED_SIZE = 19 + 4 # 4 for vmprof, XXX make more compact! PASS_ON_MY_FRAME = 12 JITFRAME_FIXED_SIZE = 28 # 13 GPR + 15 XMM # 'threadlocal_addr' is passed as 2nd argument in %esi, diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/x86/assembler.py pypy-5.0.1+dfsg/rpython/jit/backend/x86/assembler.py --- pypy-4.0.1+dfsg/rpython/jit/backend/x86/assembler.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/x86/assembler.py 2016-03-19 16:40:15.000000000 +0000 @@ -12,7 +12,7 @@ from rpython.jit.metainterp.compile import ResumeGuardDescr from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.rtyper.annlowlevel import llhelper, cast_instance_to_gcref +from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rtyper import rclass from rpython.rlib.jit import AsmInfo from rpython.jit.backend.model import CompiledLoopToken @@ -837,11 +837,56 @@ frame_depth = max(frame_depth, target_frame_depth) return frame_depth + def _call_header_vmprof(self): + from rpython.rlib.rvmprof.rvmprof import cintf, VMPROF_JITTED_TAG + + # tloc = address of pypy_threadlocal_s + if IS_X86_32: + # Can't use esi here, its old value is not saved yet. + # But we can use eax and ecx. + self.mc.MOV_rs(edx.value, THREADLOCAL_OFS) + tloc = edx + old = ecx + else: + # The thread-local value is already in esi. + # We should avoid if possible to use ecx or edx because they + # would be used to pass arguments #3 and #4 (even though, so + # far, the assembler only receives two arguments). + tloc = esi + old = r11 + # eax = address in the stack of a 3-words struct vmprof_stack_s + self.mc.LEA_rs(eax.value, (FRAME_FIXED_SIZE - 4) * WORD) + # old = current value of vmprof_tl_stack + offset = cintf.vmprof_tl_stack.getoffset() + self.mc.MOV_rm(old.value, (tloc.value, offset)) + # eax->next = old + self.mc.MOV_mr((eax.value, 0), old.value) + # eax->value = my esp + self.mc.MOV_mr((eax.value, WORD), esp.value) + # eax->kind = VMPROF_JITTED_TAG + self.mc.MOV_mi((eax.value, WORD * 2), VMPROF_JITTED_TAG) + # save in vmprof_tl_stack the new eax + self.mc.MOV_mr((tloc.value, offset), eax.value) + + def _call_footer_vmprof(self): + from rpython.rlib.rvmprof.rvmprof import cintf + # edx = address of pypy_threadlocal_s + self.mc.MOV_rs(edx.value, THREADLOCAL_OFS) + self.mc.AND_ri(edx.value, ~1) + # eax = (our local vmprof_tl_stack).next + self.mc.MOV_rs(eax.value, (FRAME_FIXED_SIZE - 4 + 0) * WORD) + # save in vmprof_tl_stack the value eax + offset = cintf.vmprof_tl_stack.getoffset() + self.mc.MOV_mr((edx.value, offset), eax.value) + def _call_header(self): self.mc.SUB_ri(esp.value, FRAME_FIXED_SIZE * WORD) self.mc.MOV_sr(PASS_ON_MY_FRAME * WORD, ebp.value) if IS_X86_64: self.mc.MOV_sr(THREADLOCAL_OFS, esi.value) + if self.cpu.translate_support_code: + self._call_header_vmprof() # on X86_64, this uses esi + if IS_X86_64: self.mc.MOV_rr(ebp.value, edi.value) else: self.mc.MOV_rs(ebp.value, (FRAME_FIXED_SIZE + 1) * WORD) @@ -873,6 +918,8 @@ def _call_footer(self): # the return value is the jitframe + if self.cpu.translate_support_code: + self._call_footer_vmprof() self.mc.MOV_rr(eax.value, ebp.value) gcrootmap = self.cpu.gc_ll_descr.gcrootmap @@ -1477,34 +1524,27 @@ genop_getfield_gc_f = _genop_getfield genop_getfield_raw_i = _genop_getfield genop_getfield_raw_f = _genop_getfield - genop_getfield_gc_pure_i = _genop_getfield - genop_getfield_gc_pure_r = _genop_getfield - genop_getfield_gc_pure_f = _genop_getfield - - def _genop_getarrayitem(self, op, arglocs, resloc): - base_loc, ofs_loc, size_loc, ofs, sign_loc = arglocs - assert isinstance(ofs, ImmedLoc) + + def _genop_gc_load(self, op, arglocs, resloc): + base_loc, ofs_loc, size_loc, sign_loc = arglocs assert isinstance(size_loc, ImmedLoc) - scale = get_scale(size_loc.value) - src_addr = addr_add(base_loc, ofs_loc, ofs.value, scale) + src_addr = addr_add(base_loc, ofs_loc, 0, 0) self.load_from_mem(resloc, src_addr, size_loc, sign_loc) - genop_getarrayitem_gc_i = _genop_getarrayitem - genop_getarrayitem_gc_r = _genop_getarrayitem - genop_getarrayitem_gc_f = _genop_getarrayitem - genop_getarrayitem_gc_pure_i = _genop_getarrayitem - genop_getarrayitem_gc_pure_r = _genop_getarrayitem - genop_getarrayitem_gc_pure_f = _genop_getarrayitem - genop_getarrayitem_raw_i = _genop_getarrayitem - genop_getarrayitem_raw_f = _genop_getarrayitem - - def _genop_raw_load(self, op, arglocs, resloc): - base_loc, ofs_loc, size_loc, ofs, sign_loc = arglocs - assert isinstance(ofs, ImmedLoc) - src_addr = addr_add(base_loc, ofs_loc, ofs.value, 0) + genop_gc_load_i = _genop_gc_load + genop_gc_load_r = _genop_gc_load + genop_gc_load_f = _genop_gc_load + + def _genop_gc_load_indexed(self, op, arglocs, resloc): + base_loc, ofs_loc, scale_loc, offset_loc, size_loc, sign_loc = arglocs + assert isinstance(scale_loc, ImmedLoc) + scale = get_scale(scale_loc.value) + src_addr = addr_add(base_loc, ofs_loc, offset_loc.value, scale) self.load_from_mem(resloc, src_addr, size_loc, sign_loc) - genop_raw_load_i = _genop_raw_load - genop_raw_load_f = _genop_raw_load + + genop_gc_load_indexed_i = _genop_gc_load_indexed + genop_gc_load_indexed_r = _genop_gc_load_indexed + genop_gc_load_indexed_f = _genop_gc_load_indexed def _imul_const_scaled(self, mc, targetreg, sourcereg, itemsize): """Produce one operation to do roughly @@ -1551,17 +1591,6 @@ assert isinstance(ofs_loc, ImmedLoc) return AddressLoc(base_loc, temp_loc, shift, ofs_loc.value) - def _genop_getinteriorfield(self, op, arglocs, resloc): - (base_loc, ofs_loc, itemsize_loc, fieldsize_loc, - index_loc, temp_loc, sign_loc) = arglocs - src_addr = self._get_interiorfield_addr(temp_loc, index_loc, - itemsize_loc, base_loc, - ofs_loc) - self.load_from_mem(resloc, src_addr, fieldsize_loc, sign_loc) - genop_getinteriorfield_gc_i = _genop_getinteriorfield - genop_getinteriorfield_gc_r = _genop_getinteriorfield - genop_getinteriorfield_gc_f = _genop_getinteriorfield - def genop_discard_increment_debug_counter(self, op, arglocs): # The argument should be an immediate address. This should # generate code equivalent to a GETFIELD_RAW, an ADD(1), and a @@ -1570,36 +1599,18 @@ base_loc, = arglocs self.mc.INC(mem(base_loc, 0)) - def genop_discard_setfield_gc(self, op, arglocs): - base_loc, ofs_loc, size_loc, value_loc = arglocs - assert isinstance(size_loc, ImmedLoc) - dest_addr = AddressLoc(base_loc, ofs_loc) - self.save_into_mem(dest_addr, value_loc, size_loc) - - genop_discard_zero_ptr_field = genop_discard_setfield_gc - - def genop_discard_setinteriorfield_gc(self, op, arglocs): - (base_loc, ofs_loc, itemsize_loc, fieldsize_loc, - index_loc, temp_loc, value_loc) = arglocs - dest_addr = self._get_interiorfield_addr(temp_loc, index_loc, - itemsize_loc, base_loc, - ofs_loc) - self.save_into_mem(dest_addr, value_loc, fieldsize_loc) - - genop_discard_setinteriorfield_raw = genop_discard_setinteriorfield_gc - - def genop_discard_setarrayitem_gc(self, op, arglocs): - base_loc, ofs_loc, value_loc, size_loc, baseofs = arglocs - assert isinstance(baseofs, ImmedLoc) + def genop_discard_gc_store(self, op, arglocs): + base_loc, ofs_loc, value_loc, size_loc = arglocs assert isinstance(size_loc, ImmedLoc) scale = get_scale(size_loc.value) - dest_addr = AddressLoc(base_loc, ofs_loc, scale, baseofs.value) + dest_addr = AddressLoc(base_loc, ofs_loc, 0, 0) self.save_into_mem(dest_addr, value_loc, size_loc) - def genop_discard_raw_store(self, op, arglocs): - base_loc, ofs_loc, value_loc, size_loc, baseofs = arglocs - assert isinstance(baseofs, ImmedLoc) - dest_addr = AddressLoc(base_loc, ofs_loc, 0, baseofs.value) + def genop_discard_gc_store_indexed(self, op, arglocs): + base_loc, ofs_loc, value_loc, factor_loc, offset_loc, size_loc = arglocs + assert isinstance(size_loc, ImmedLoc) + scale = get_scale(factor_loc.value) + dest_addr = AddressLoc(base_loc, ofs_loc, scale, offset_loc.value) self.save_into_mem(dest_addr, value_loc, size_loc) def genop_discard_strsetitem(self, op, arglocs): @@ -1621,43 +1632,7 @@ else: assert 0, itemsize - genop_discard_setfield_raw = genop_discard_setfield_gc - genop_discard_setarrayitem_raw = genop_discard_setarrayitem_gc - - def genop_strlen(self, op, arglocs, resloc): - base_loc = arglocs[0] - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - self.mc.MOV(resloc, addr_add_const(base_loc, ofs_length)) - - def genop_unicodelen(self, op, arglocs, resloc): - base_loc = arglocs[0] - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - self.mc.MOV(resloc, addr_add_const(base_loc, ofs_length)) - - def genop_arraylen_gc(self, op, arglocs, resloc): - base_loc, ofs_loc = arglocs - assert isinstance(ofs_loc, ImmedLoc) - self.mc.MOV(resloc, addr_add_const(base_loc, ofs_loc.value)) - - def genop_strgetitem(self, op, arglocs, resloc): - base_loc, ofs_loc = arglocs - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - assert itemsize == 1 - self.mc.MOVZX8(resloc, AddressLoc(base_loc, ofs_loc, 0, basesize)) - - def genop_unicodegetitem(self, op, arglocs, resloc): - base_loc, ofs_loc = arglocs - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - if itemsize == 4: - self.mc.MOV32(resloc, AddressLoc(base_loc, ofs_loc, 2, basesize)) - elif itemsize == 2: - self.mc.MOVZX16(resloc, AddressLoc(base_loc, ofs_loc, 1, basesize)) - else: - assert 0, itemsize + # genop_discard_setfield_raw = genop_discard_setfield_gc def genop_math_read_timestamp(self, op, arglocs, resloc): self.mc.RDTSC() @@ -2136,7 +2111,9 @@ if IS_X86_64: tmploc = esi # already the correct place if argloc is tmploc: - self.mc.MOV_rr(esi.value, edi.value) + # this case is theoretical only so far: in practice, + # argloc is always eax, never esi + self.mc.MOV_rr(edi.value, esi.value) argloc = edi else: tmploc = eax diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/x86/regalloc.py pypy-5.0.1+dfsg/rpython/jit/backend/x86/regalloc.py --- pypy-4.0.1+dfsg/rpython/jit/backend/x86/regalloc.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/x86/regalloc.py 2016-03-19 16:40:15.000000000 +0000 @@ -4,8 +4,7 @@ import os, sys from rpython.jit.backend.llsupport import symbolic -from rpython.jit.backend.llsupport.descr import (ArrayDescr, CallDescr, - unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr) +from rpython.jit.backend.llsupport.descr import CallDescr, unpack_arraydescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, RegisterManager, TempVar, compute_vars_longevity, is_comparison_or_ovf_op, @@ -372,6 +371,8 @@ i += 1 assert not self.rm.reg_bindings assert not self.xrm.reg_bindings + if not we_are_translated(): + self.assembler.mc.UD2() self.flush_loop() self.assembler.mc.mark_op(None) # end of the loop self.operations = None @@ -408,21 +409,15 @@ self.assembler.guard_success_cc = rx86.Conditions['NZ'] - def _consider_guard_cc(true): - def consider_guard_cc(self, op): - arg = op.getarg(0) - if arg.is_vector(): - loc = self.loc(arg) - self.assembler.guard_vector(op, self.loc(arg), true) - else: - self.load_condition_into_cc(arg) - self.perform_guard(op, [], None) - return consider_guard_cc - - consider_guard_true = _consider_guard_cc(True) - consider_guard_false = _consider_guard_cc(False) - consider_guard_nonnull = _consider_guard_cc(True) - consider_guard_isnull = _consider_guard_cc(False) + def _consider_guard_cc(self, op): + arg = op.getarg(0) + self.load_condition_into_cc(arg) + self.perform_guard(op, [], None) + + consider_guard_true = _consider_guard_cc + consider_guard_false = _consider_guard_cc + consider_guard_nonnull = _consider_guard_cc + consider_guard_isnull = _consider_guard_cc def consider_finish(self, op): # the frame is in ebp, but we have to point where in the frame is @@ -921,8 +916,8 @@ args = op.getarglist() N = len(args) # we force all arguments in a reg (unless they are Consts), - # because it will be needed anyway by the following setfield_gc - # or setarrayitem_gc. It avoids loading it twice from the memory. + # because it will be needed anyway by the following gc_load + # It avoids loading it twice from the memory. arglocs = [self.rm.make_sure_var_in_reg(op.getarg(i), args) for i in range(N)] self.perform_discard(op, arglocs) @@ -1038,78 +1033,37 @@ gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8)) return gcmap - def consider_setfield_gc(self, op): - ofs, size, _ = unpack_fielddescr(op.getdescr()) - ofs_loc = imm(ofs) - size_loc = imm(size) - assert isinstance(size_loc, ImmedLoc) - if size_loc.value == 1: - need_lower_byte = True - else: - need_lower_byte = False + def consider_gc_store(self, op): args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) - value_loc = self.make_sure_var_in_reg(op.getarg(1), args, - need_lower_byte=need_lower_byte) - self.perform_discard(op, [base_loc, ofs_loc, size_loc, value_loc]) - - def consider_zero_ptr_field(self, op): - ofs_loc = imm(op.getarg(1).getint()) - size_loc = imm(WORD) - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), []) - value_loc = imm(0) - self.perform_discard(op, [base_loc, ofs_loc, size_loc, value_loc]) - - consider_setfield_raw = consider_setfield_gc - - def consider_setinteriorfield_gc(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize = imm(t[0]), imm(t[1]), imm(t[2]) - args = op.getarglist() - if fieldsize.value == 1: + size_box = op.getarg(3) + assert isinstance(size_box, ConstInt) + size = size_box.value + assert size >= 1 + if size == 1: need_lower_byte = True else: need_lower_byte = False - box_base, box_index, box_value = args - base_loc = self.rm.make_sure_var_in_reg(box_base, args) - index_loc = self.rm.make_sure_var_in_reg(box_index, args) - value_loc = self.make_sure_var_in_reg(box_value, args, - need_lower_byte=need_lower_byte) - # If 'index_loc' is not an immediate, then we need a 'temp_loc' that - # is a register whose value will be destroyed. It's fine to destroy - # the same register as 'index_loc', but not the other ones. - if not isinstance(index_loc, ImmedLoc): - # ...that is, except in a corner case where 'index_loc' would be - # in the same register as 'value_loc'... - tempvar = TempVar() - temp_loc = self.rm.force_allocate_reg(tempvar, [box_base, - box_value]) - self.rm.possibly_free_var(tempvar) - else: - temp_loc = None - self.rm.possibly_free_var(box_index) - self.rm.possibly_free_var(box_base) - self.possibly_free_var(box_value) - self.perform_discard(op, [base_loc, ofs, itemsize, fieldsize, - index_loc, temp_loc, value_loc]) - - consider_setinteriorfield_raw = consider_setinteriorfield_gc - - def consider_strsetitem(self, op): - args = op.getarglist() - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + value_loc = self.make_sure_var_in_reg(op.getarg(2), args, + need_lower_byte=need_lower_byte) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) - value_loc = self.rm.make_sure_var_in_reg(op.getarg(2), args, - need_lower_byte=True) - self.perform_discard(op, [base_loc, ofs_loc, value_loc]) - - consider_unicodesetitem = consider_strsetitem + self.perform_discard(op, [base_loc, ofs_loc, value_loc, + imm(size)]) - def consider_setarrayitem_gc(self, op): - itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) + def consider_gc_store_indexed(self, op): args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) - if itemsize == 1: + scale_box = op.getarg(3) + offset_box = op.getarg(4) + size_box = op.getarg(5) + assert isinstance(scale_box, ConstInt) + assert isinstance(offset_box, ConstInt) + assert isinstance(size_box, ConstInt) + factor = scale_box.value + offset = offset_box.value + size = size_box.value + assert size >= 1 + if size == 1: need_lower_byte = True else: need_lower_byte = False @@ -1117,92 +1071,56 @@ need_lower_byte=need_lower_byte) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) self.perform_discard(op, [base_loc, ofs_loc, value_loc, - imm(itemsize), imm(ofs)]) + imm(factor), imm(offset), imm(size)]) - consider_setarrayitem_raw = consider_setarrayitem_gc - consider_raw_store = consider_setarrayitem_gc + def consider_increment_debug_counter(self, op): + base_loc = self.loc(op.getarg(0)) + self.perform_discard(op, [base_loc]) - def _consider_getfield(self, op): - ofs, size, sign = unpack_fielddescr(op.getdescr()) - ofs_loc = imm(ofs) - size_loc = imm(size) + def _consider_gc_load(self, op): args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) + ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) result_loc = self.force_allocate_reg(op) - if sign: + size_box = op.getarg(2) + assert isinstance(size_box, ConstInt) + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: sign_loc = imm1 else: sign_loc = imm0 self.perform(op, [base_loc, ofs_loc, size_loc, sign_loc], result_loc) - consider_getfield_gc_i = _consider_getfield - consider_getfield_gc_r = _consider_getfield - consider_getfield_gc_f = _consider_getfield - consider_getfield_raw_i = _consider_getfield - consider_getfield_raw_f = _consider_getfield - consider_getfield_gc_pure_i = _consider_getfield - consider_getfield_gc_pure_r = _consider_getfield - consider_getfield_gc_pure_f = _consider_getfield + consider_gc_load_i = _consider_gc_load + consider_gc_load_r = _consider_gc_load + consider_gc_load_f = _consider_gc_load - def consider_increment_debug_counter(self, op): - base_loc = self.loc(op.getarg(0)) - self.perform_discard(op, [base_loc]) - - def _consider_getarrayitem(self, op): - itemsize, ofs, sign = unpack_arraydescr(op.getdescr()) + def _consider_gc_load_indexed(self, op): args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) result_loc = self.force_allocate_reg(op) - if sign: + scale_box = op.getarg(2) + offset_box = op.getarg(3) + size_box = op.getarg(4) + assert isinstance(scale_box, ConstInt) + assert isinstance(offset_box, ConstInt) + assert isinstance(size_box, ConstInt) + scale = scale_box.value + offset = offset_box.value + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: sign_loc = imm1 else: sign_loc = imm0 - self.perform(op, [base_loc, ofs_loc, imm(itemsize), imm(ofs), - sign_loc], result_loc) + locs = [base_loc, ofs_loc, imm(scale), imm(offset), size_loc, sign_loc] + self.perform(op, locs, result_loc) - consider_getarrayitem_gc_i = _consider_getarrayitem - consider_getarrayitem_gc_r = _consider_getarrayitem - consider_getarrayitem_gc_f = _consider_getarrayitem - consider_getarrayitem_raw_i = _consider_getarrayitem - consider_getarrayitem_raw_f = _consider_getarrayitem - consider_getarrayitem_gc_pure_i = _consider_getarrayitem - consider_getarrayitem_gc_pure_r = _consider_getarrayitem - consider_getarrayitem_gc_pure_f = _consider_getarrayitem - consider_raw_load_i = _consider_getarrayitem - consider_raw_load_f = _consider_getarrayitem - - def _consider_getinteriorfield(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = imm(t[0]), imm(t[1]), imm(t[2]), t[3] - if sign: - sign_loc = imm1 - else: - sign_loc = imm0 - args = op.getarglist() - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) - index_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) - # 'base' and 'index' are put in two registers (or one if 'index' - # is an immediate). 'result' can be in the same register as - # 'index' but must be in a different register than 'base'. - result_loc = self.force_allocate_reg(op, [op.getarg(0)]) - assert isinstance(result_loc, RegLoc) - # two cases: 1) if result_loc is a normal register, use it as temp_loc - if not result_loc.is_xmm: - temp_loc = result_loc - else: - # 2) if result_loc is an xmm register, we (likely) need another - # temp_loc that is a normal register. It can be in the same - # register as 'index' but not 'base'. - tempvar = TempVar() - temp_loc = self.rm.force_allocate_reg(tempvar, [op.getarg(0)]) - self.rm.possibly_free_var(tempvar) - self.perform(op, [base_loc, ofs, itemsize, fieldsize, - index_loc, temp_loc, sign_loc], result_loc) - - consider_getinteriorfield_gc_i = _consider_getinteriorfield - consider_getinteriorfield_gc_r = _consider_getinteriorfield - consider_getinteriorfield_gc_f = _consider_getinteriorfield + consider_gc_load_indexed_i = _consider_gc_load_indexed + consider_gc_load_indexed_r = _consider_gc_load_indexed + consider_gc_load_indexed_f = _consider_gc_load_indexed def consider_int_is_true(self, op): # doesn't need arg to be in a register @@ -1227,32 +1145,6 @@ resloc = self.force_allocate_reg(op, [op.getarg(0)]) self.perform(op, [argloc], resloc) - def consider_strlen(self, op): - args = op.getarglist() - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) - result_loc = self.rm.force_allocate_reg(op) - self.perform(op, [base_loc], result_loc) - - consider_unicodelen = consider_strlen - - def consider_arraylen_gc(self, op): - arraydescr = op.getdescr() - assert isinstance(arraydescr, ArrayDescr) - ofs = arraydescr.lendescr.offset - args = op.getarglist() - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) - result_loc = self.rm.force_allocate_reg(op) - self.perform(op, [base_loc, imm(ofs)], result_loc) - - def consider_strgetitem(self, op): - args = op.getarglist() - base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) - ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) - result_loc = self.rm.force_allocate_reg(op) - self.perform(op, [base_loc, ofs_loc], result_loc) - - consider_unicodegetitem = consider_strgetitem - def consider_copystrcontent(self, op): self._consider_copystrcontent(op, is_unicode=False) diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/x86/runner.py pypy-5.0.1+dfsg/rpython/jit/backend/x86/runner.py --- pypy-4.0.1+dfsg/rpython/jit/backend/x86/runner.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/x86/runner.py 2016-03-19 16:40:15.000000000 +0000 @@ -24,6 +24,9 @@ with_threads = False frame_reg = regloc.ebp + # can an ISA instruction handle a factor to the offset? + load_supported_factors = (1,2,4,8) + from rpython.jit.backend.x86.arch import JITFRAME_FIXED_SIZE all_reg_indexes = gpr_reg_mgr_cls.all_reg_indexes gen_regs = gpr_reg_mgr_cls.all_regs diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/x86/test/test_runner.py pypy-5.0.1+dfsg/rpython/jit/backend/x86/test/test_runner.py --- pypy-4.0.1+dfsg/rpython/jit/backend/x86/test/test_runner.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/x86/test/test_runner.py 2016-03-19 16:40:11.000000000 +0000 @@ -265,6 +265,22 @@ 'void', ofsi) assert p.i == 3**33 + def test_getfield_64bit_offset(self): + if WORD == 4: + py.test.skip("only for 64 bits") + TP = lltype.Struct('S', ('i', lltype.Signed)) + p = lltype.malloc(TP, flavor='raw') + p.i = 0x123456789ABC + offset = 3**33 + val = rffi.cast(lltype.Signed, rffi.cast(lltype.Signed, p) - offset) + res = self.execute_operation(rop.GC_LOAD_I, + [InputArgInt(val), + ConstInt(offset), + ConstInt(WORD)], + 'int') + assert res == 0x123456789ABC + lltype.free(p, flavor='raw') + def test_and_mask_common_patterns(self): cases = [8, 16, 24] if WORD == 8: diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/x86/test/test_rvmprof.py pypy-5.0.1+dfsg/rpython/jit/backend/x86/test/test_rvmprof.py --- pypy-4.0.1+dfsg/rpython/jit/backend/x86/test/test_rvmprof.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/x86/test/test_rvmprof.py 2016-03-19 16:40:11.000000000 +0000 @@ -0,0 +1,7 @@ + +import py +from rpython.jit.backend.test.test_rvmprof import BaseRVMProfTest +from rpython.jit.backend.x86.test.test_basic import Jit386Mixin + +class TestFfiCall(Jit386Mixin, BaseRVMProfTest): + pass \ No newline at end of file diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/x86/test/test_strstorage.py pypy-5.0.1+dfsg/rpython/jit/backend/x86/test/test_strstorage.py --- pypy-4.0.1+dfsg/rpython/jit/backend/x86/test/test_strstorage.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/x86/test/test_strstorage.py 2016-03-19 16:40:11.000000000 +0000 @@ -0,0 +1,8 @@ +from rpython.jit.backend.x86.test.test_basic import Jit386Mixin +from rpython.jit.metainterp.test.test_strstorage import TestStrStorage as _TestStrStorage + + +class TestStrStorage(Jit386Mixin, _TestStrStorage): + # for the individual tests see + # ====> ../../../metainterp/test/test_strstorage.py + pass diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/x86/test/test_zrpy_vmprof.py pypy-5.0.1+dfsg/rpython/jit/backend/x86/test/test_zrpy_vmprof.py --- pypy-4.0.1+dfsg/rpython/jit/backend/x86/test/test_zrpy_vmprof.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/x86/test/test_zrpy_vmprof.py 2016-03-19 16:40:11.000000000 +0000 @@ -0,0 +1,7 @@ + +from rpython.jit.backend.llsupport.test.zrpy_vmprof_test import CompiledVmprofTest + +class TestZVMprof(CompiledVmprofTest): + + gcrootfinder = "shadowstack" + gc = "incminimark" \ No newline at end of file diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/x86/test/test_zvmprof.py pypy-5.0.1+dfsg/rpython/jit/backend/x86/test/test_zvmprof.py --- pypy-4.0.1+dfsg/rpython/jit/backend/x86/test/test_zvmprof.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/x86/test/test_zvmprof.py 2016-03-19 16:40:11.000000000 +0000 @@ -0,0 +1,7 @@ + +from rpython.jit.backend.llsupport.test.zrpy_vmprof_test import CompiledVmprofTest + +class TestZVMprof(CompiledVmprofTest): + + gcrootfinder = "shadowstack" + gc = "incminimark" \ No newline at end of file diff -Nru pypy-4.0.1+dfsg/rpython/jit/backend/x86/vector_ext.py pypy-5.0.1+dfsg/rpython/jit/backend/x86/vector_ext.py --- pypy-4.0.1+dfsg/rpython/jit/backend/x86/vector_ext.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/backend/x86/vector_ext.py 2016-03-19 16:40:15.000000000 +0000 @@ -10,7 +10,8 @@ xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG, AddressLoc) from rpython.jit.backend.llsupport.regalloc import (get_scale, valid_addressing_size) -from rpython.jit.metainterp.resoperation import rop, ResOperation +from rpython.jit.metainterp.resoperation import (rop, ResOperation, + VectorOp, VectorGuardOp) from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.lltypesystem import lltype @@ -34,8 +35,17 @@ class VectorAssemblerMixin(object): _mixin_ = True + def genop_guard_vec_guard_true(self, guard_op, guard_token, locs, resloc): + self.implement_guard(guard_token) + + def genop_guard_vec_guard_false(self, guard_op, guard_token, locs, resloc): + self.guard_success_cc = rx86.invert_condition(self.guard_success_cc) + self.implement_guard(guard_token) + def guard_vector(self, guard_op, loc, true): + assert isinstance(guard_op, VectorGuardOp) arg = guard_op.getarg(0) + assert isinstance(arg, VectorOp) size = arg.bytesize temp = X86_64_XMM_SCRATCH_REG load = arg.bytesize * arg.count - self.cpu.vector_register_size @@ -562,7 +572,8 @@ def consider_vec_arith(self, op): lhs = op.getarg(0) - size = lhs.bytesize + assert isinstance(op, VectorOp) + size = op.bytesize args = op.getarglist() loc1 = self.make_sure_var_in_reg(op.getarg(1), args) loc0 = self.xrm.force_result_in_reg(op, op.getarg(0), args) @@ -579,6 +590,7 @@ def consider_vec_arith_unary(self, op): lhs = op.getarg(0) + assert isinstance(lhs, VectorOp) args = op.getarglist() res = self.xrm.force_result_in_reg(op, op.getarg(0), args) self.perform(op, [res, imm(lhs.bytesize)], res) @@ -589,13 +601,16 @@ def consider_vec_logic(self, op): lhs = op.getarg(0) + assert isinstance(lhs, VectorOp) args = op.getarglist() source = self.make_sure_var_in_reg(op.getarg(1), args) result = self.xrm.force_result_in_reg(op, op.getarg(0), args) self.perform(op, [source, imm(lhs.bytesize)], result) def consider_vec_float_eq(self, op): + assert isinstance(op, VectorOp) lhs = op.getarg(0) + assert isinstance(lhs, VectorOp) args = op.getarglist() rhsloc = self.make_sure_var_in_reg(op.getarg(1), args) lhsloc = self.xrm.force_result_in_reg(op, op.getarg(0), args) @@ -612,6 +627,7 @@ def consider_vec_pack_i(self, op): # new_res = vec_pack_i(res, src, index, count) + assert isinstance(op, VectorOp) arg = op.getarg(1) index = op.getarg(2) count = op.getarg(3) @@ -629,6 +645,7 @@ consider_vec_pack_f = consider_vec_pack_i def consider_vec_unpack_i(self, op): + assert isinstance(op, VectorOp) index = op.getarg(1) count = op.getarg(2) assert isinstance(index, ConstInt) @@ -642,6 +659,7 @@ # unpack into iX box resloc = self.force_allocate_reg(op, args) arg = op.getarg(0) + assert isinstance(arg, VectorOp) size = arg.bytesize residx = 0 args = op.getarglist() @@ -651,6 +669,7 @@ consider_vec_unpack_f = consider_vec_unpack_i def consider_vec_expand_f(self, op): + assert isinstance(op, VectorOp) arg = op.getarg(0) args = op.getarglist() if arg.is_constant(): @@ -662,6 +681,7 @@ self.perform(op, [srcloc, imm(op.bytesize)], resloc) def consider_vec_expand_i(self, op): + assert isinstance(op, VectorOp) arg = op.getarg(0) args = op.getarglist() if arg.is_constant(): @@ -672,15 +692,19 @@ self.perform(op, [srcloc, imm(op.bytesize)], resloc) def consider_vec_int_signext(self, op): + assert isinstance(op, VectorOp) args = op.getarglist() resloc = self.xrm.force_result_in_reg(op, op.getarg(0), args) - size = op.cast_from_bytesize() + arg = op.getarg(0) + assert isinstance(arg, VectorOp) + size = arg.bytesize assert size > 0 self.perform(op, [resloc, imm(size), imm(op.bytesize)], resloc) def consider_vec_int_is_true(self, op): args = op.getarglist() arg = op.getarg(0) + assert isinstance(arg, VectorOp) argloc = self.loc(arg) resloc = self.xrm.force_result_in_reg(op, arg, args) self.perform(op, [resloc,imm(arg.bytesize)], None) @@ -701,3 +725,16 @@ consider_vec_cast_int_to_float = consider_vec_cast_float_to_int consider_vec_cast_float_to_singlefloat = consider_vec_cast_float_to_int consider_vec_cast_singlefloat_to_float = consider_vec_cast_float_to_int + + def consider_vec_guard_true(self, op): + arg = op.getarg(0) + loc = self.loc(arg) + self.assembler.guard_vector(op, self.loc(arg), True) + self.perform_guard(op, [], None) + + def consider_vec_guard_false(self, op): + arg = op.getarg(0) + loc = self.loc(arg) + self.assembler.guard_vector(op, self.loc(arg), False) + self.perform_guard(op, [], None) + diff -Nru pypy-4.0.1+dfsg/rpython/jit/codewriter/codewriter.py pypy-5.0.1+dfsg/rpython/jit/codewriter/codewriter.py --- pypy-4.0.1+dfsg/rpython/jit/codewriter/codewriter.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/codewriter/codewriter.py 2016-03-19 16:40:11.000000000 +0000 @@ -25,10 +25,10 @@ rtyper = support.annotate(func, values) graph = rtyper.annotator.translator.graphs[0] jitcode = JitCode("test") - self.transform_graph_to_jitcode(graph, jitcode, True) + self.transform_graph_to_jitcode(graph, jitcode, True, 0) return jitcode - def transform_graph_to_jitcode(self, graph, jitcode, verbose): + def transform_graph_to_jitcode(self, graph, jitcode, verbose, index): """Transform a graph into a JitCode containing the same bytecode in a different format. """ @@ -58,6 +58,7 @@ # constants are cast to their normalized type (Signed, GCREF or # Float). self.assembler.assemble(ssarepr, jitcode) + jitcode.index = index # # print the resulting assembler if self.debug: @@ -67,13 +68,16 @@ log.info("making JitCodes...") self.callcontrol.grab_initial_jitcodes() count = 0 + all_jitcodes = [] for graph, jitcode in self.callcontrol.enum_pending_graphs(): - self.transform_graph_to_jitcode(graph, jitcode, verbose) + self.transform_graph_to_jitcode(graph, jitcode, verbose, len(all_jitcodes)) + all_jitcodes.append(jitcode) count += 1 if not count % 500: log.info("Produced %d jitcodes" % count) self.assembler.finished(self.callcontrol.callinfocollection) log.info("there are %d JitCode instances." % count) + return all_jitcodes def setup_vrefinfo(self, vrefinfo): # must be called at most once diff -Nru pypy-4.0.1+dfsg/rpython/jit/codewriter/effectinfo.py pypy-5.0.1+dfsg/rpython/jit/codewriter/effectinfo.py --- pypy-4.0.1+dfsg/rpython/jit/codewriter/effectinfo.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/codewriter/effectinfo.py 2016-03-19 16:40:11.000000000 +0000 @@ -330,15 +330,11 @@ return op.opname == 'jit_force_quasi_immutable' class RandomEffectsAnalyzer(BoolGraphAnalyzer): - def analyze_external_call(self, op, seen=None): - try: - funcobj = op.args[0].value._obj - if funcobj.random_effects_on_gcobjs: - return True - except (AttributeError, lltype.DelayedPointer): - return True # better safe than sorry + def analyze_external_call(self, funcobj, seen=None): + if funcobj.random_effects_on_gcobjs: + return True return super(RandomEffectsAnalyzer, self).analyze_external_call( - op, seen) + funcobj, seen) def analyze_simple_operation(self, op, graphinfo): return False diff -Nru pypy-4.0.1+dfsg/rpython/jit/codewriter/jitcode.py pypy-5.0.1+dfsg/rpython/jit/codewriter/jitcode.py --- pypy-4.0.1+dfsg/rpython/jit/codewriter/jitcode.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/codewriter/jitcode.py 2016-03-19 16:40:11.000000000 +0000 @@ -7,7 +7,7 @@ _empty_i = [] _empty_r = [] _empty_f = [] - + def __init__(self, name, fnaddr=None, calldescr=None, called_from=None): self.name = name self.fnaddr = fnaddr @@ -141,17 +141,14 @@ def get_register_index_f(self, index): return ord(self.live_f[index]) - def enumerate_vars(self, callback_i, callback_r, callback_f, spec): - index = 0 + def enumerate_vars(self, callback_i, callback_r, callback_f, spec, index): for i in range(self.get_register_count_i()): - callback_i(index, self.get_register_index_i(i)) - index += 1 + index = callback_i(index, self.get_register_index_i(i)) for i in range(self.get_register_count_r()): - callback_r(index, self.get_register_index_r(i)) - index += 1 + index = callback_r(index, self.get_register_index_r(i)) for i in range(self.get_register_count_f()): - callback_f(index, self.get_register_index_f(i)) - index += 1 + index = callback_f(index, self.get_register_index_f(i)) + return index enumerate_vars._annspecialcase_ = 'specialize:arg(4)' _liveness_cache = {} diff -Nru pypy-4.0.1+dfsg/rpython/jit/codewriter/jtransform.py pypy-5.0.1+dfsg/rpython/jit/codewriter/jtransform.py --- pypy-4.0.1+dfsg/rpython/jit/codewriter/jtransform.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/codewriter/jtransform.py 2016-03-19 16:40:11.000000000 +0000 @@ -784,11 +784,13 @@ return [] # check for _immutable_fields_ hints immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value) + need_live = False if immut: if (self.callcontrol is not None and self.callcontrol.could_be_green_field(v_inst.concretetype.TO, c_fieldname.value)): pure = '_greenfield' + need_live = True else: pure = '_pure' else: @@ -815,10 +817,12 @@ descr1 = self.cpu.fielddescrof( v_inst.concretetype.TO, quasiimmut.get_mutate_field_name(c_fieldname.value)) - op1 = [SpaceOperation('-live-', [], None), + return [SpaceOperation('-live-', [], None), SpaceOperation('record_quasiimmut_field', [v_inst, descr, descr1], None), op1] + if need_live: + return [SpaceOperation('-live-', [], None), op1] return op1 def rewrite_op_setfield(self, op, override_type=None): @@ -1008,12 +1012,11 @@ return SpaceOperation('getarrayitem_gc_i', [op.args[0], v_index, bytearraydescr], op.result) - else: + elif op.result.concretetype is lltype.Void: + return + elif isinstance(op.args[0].concretetype.TO, lltype.GcArray): + # special-case 1: GcArray of Struct v_inst, v_index, c_field = op.args - if op.result.concretetype is lltype.Void: - return - # only GcArray of Struct supported - assert isinstance(v_inst.concretetype.TO, lltype.GcArray) STRUCT = v_inst.concretetype.TO.OF assert isinstance(STRUCT, lltype.Struct) descr = self.cpu.interiorfielddescrof(v_inst.concretetype.TO, @@ -1022,6 +1025,22 @@ kind = getkind(op.result.concretetype)[0] return SpaceOperation('getinteriorfield_gc_%s' % kind, args, op.result) + #elif isinstance(op.args[0].concretetype.TO, lltype.GcStruct): + # # special-case 2: GcStruct with Array field + # ---was added in the faster-rstruct branch,--- + # ---no longer directly supported--- + # v_inst, c_field, v_index = op.args + # STRUCT = v_inst.concretetype.TO + # ARRAY = getattr(STRUCT, c_field.value) + # assert isinstance(ARRAY, lltype.Array) + # arraydescr = self.cpu.arraydescrof(STRUCT) + # kind = getkind(op.result.concretetype)[0] + # assert kind in ('i', 'f') + # return SpaceOperation('getarrayitem_gc_%s' % kind, + # [op.args[0], v_index, arraydescr], + # op.result) + else: + assert False, 'not supported' def rewrite_op_setinteriorfield(self, op): assert len(op.args) == 4 @@ -1071,6 +1090,25 @@ return SpaceOperation('raw_load_%s' % kind, [op.args[0], op.args[1], descr], op.result) + def rewrite_op_gc_load_indexed(self, op): + T = op.result.concretetype + kind = getkind(T)[0] + assert kind != 'r' + descr = self.cpu.arraydescrof(rffi.CArray(T)) + if (not isinstance(op.args[2], Constant) or + not isinstance(op.args[3], Constant)): + raise NotImplementedError("gc_load_indexed: 'scale' and 'base_ofs'" + " should be constants") + # xxx hard-code the size in bytes at translation time, which is + # probably fine and avoids lots of issues later + bytes = descr.get_item_size_in_bytes() + if descr.is_item_signed(): + bytes = -bytes + c_bytes = Constant(bytes, lltype.Signed) + return SpaceOperation('gc_load_indexed_%s' % kind, + [op.args[0], op.args[1], + op.args[2], op.args[3], c_bytes], op.result) + def _rewrite_equality(self, op, opname): arg0, arg1 = op.args if isinstance(arg0, Constant) and not arg0.value: @@ -1130,10 +1168,13 @@ def rewrite_op_force_cast(self, op): v_arg = op.args[0] v_result = op.result - assert not self._is_gc(v_arg) - if v_arg.concretetype == v_result.concretetype: return + elif self._is_gc(v_arg) and self._is_gc(v_result): + # cast from GC to GC is always fine + return + else: + assert not self._is_gc(v_arg) float_arg = v_arg.concretetype in [lltype.Float, lltype.SingleFloat] float_res = v_result.concretetype in [lltype.Float, lltype.SingleFloat] @@ -2005,6 +2046,11 @@ self.vable_flags[op.args[0]] = op.args[2].value return [] + def rewrite_op_jit_enter_portal_frame(self, op): + return [op] + def rewrite_op_jit_leave_portal_frame(self, op): + return [op] + # --------- # ll_math.sqrt_nonneg() diff -Nru pypy-4.0.1+dfsg/rpython/jit/codewriter/support.py pypy-5.0.1+dfsg/rpython/jit/codewriter/support.py --- pypy-4.0.1+dfsg/rpython/jit/codewriter/support.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/codewriter/support.py 2016-03-19 16:40:11.000000000 +0000 @@ -246,12 +246,12 @@ def _ll_2_int_floordiv_ovf_zer(x, y): if y == 0: raise ZeroDivisionError - if x == -sys.maxint - 1 and y == -1: - raise OverflowError - return llop.int_floordiv(lltype.Signed, x, y) + return _ll_2_int_floordiv_ovf(x, y) def _ll_2_int_floordiv_ovf(x, y): - if x == -sys.maxint - 1 and y == -1: + # intentionally not short-circuited to produce only one guard + # and to remove the check fully if one of the arguments is known + if (x == -sys.maxint - 1) & (y == -1): raise OverflowError return llop.int_floordiv(lltype.Signed, x, y) @@ -263,12 +263,11 @@ def _ll_2_int_mod_ovf_zer(x, y): if y == 0: raise ZeroDivisionError - if x == -sys.maxint - 1 and y == -1: - raise OverflowError - return llop.int_mod(lltype.Signed, x, y) + return _ll_2_int_mod_ovf(x, y) def _ll_2_int_mod_ovf(x, y): - if x == -sys.maxint - 1 and y == -1: + #see comment in _ll_2_int_floordiv_ovf + if (x == -sys.maxint - 1) & (y == -1): raise OverflowError return llop.int_mod(lltype.Signed, x, y) diff -Nru pypy-4.0.1+dfsg/rpython/jit/codewriter/test/test_jtransform.py pypy-5.0.1+dfsg/rpython/jit/codewriter/test/test_jtransform.py --- pypy-4.0.1+dfsg/rpython/jit/codewriter/test/test_jtransform.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/codewriter/test/test_jtransform.py 2016-03-19 16:40:15.000000000 +0000 @@ -1024,7 +1024,8 @@ v1 = varoftype(lltype.Ptr(S)) v2 = varoftype(lltype.Char) op = SpaceOperation('getfield', [v1, Constant('x', lltype.Void)], v2) - op1 = Transformer(FakeCPU(), FakeCC()).rewrite_operation(op) + op0, op1 = Transformer(FakeCPU(), FakeCC()).rewrite_operation(op) + assert op0.opname == '-live-' assert op1.opname == 'getfield_gc_i_greenfield' assert op1.args == [v1, ('fielddescr', S, 'x')] assert op1.result == v2 @@ -1332,7 +1333,7 @@ tlfield = ThreadLocalField(lltype.Signed, 'foobar_test_', loop_invariant=loop_inv) OS_THREADLOCALREF_GET = effectinfo.EffectInfo.OS_THREADLOCALREF_GET - c = const(tlfield.offset) + c = const(tlfield.getoffset()) v = varoftype(lltype.Signed) op = SpaceOperation('threadlocalref_get', [c], v) cc = FakeBuiltinCallControl() diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/blackhole.py pypy-5.0.1+dfsg/rpython/jit/metainterp/blackhole.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/blackhole.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/blackhole.py 2016-03-19 16:40:11.000000000 +0000 @@ -944,6 +944,14 @@ pass @arguments("i") + def bhimpl_jit_enter_portal_frame(x): + pass + + @arguments() + def bhimpl_jit_leave_portal_frame(): + pass + + @arguments("i") def bhimpl_int_assert_green(x): pass @arguments("r") @@ -1140,35 +1148,45 @@ @arguments("cpu", "i", "R", "d", returns="i") def bhimpl_residual_call_r_i(cpu, func, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_i(func, None, args_r, None, calldescr) @arguments("cpu", "i", "R", "d", returns="r") def bhimpl_residual_call_r_r(cpu, func, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_r(func, None, args_r, None, calldescr) @arguments("cpu", "i", "R", "d") def bhimpl_residual_call_r_v(cpu, func, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_v(func, None, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "d", returns="i") def bhimpl_residual_call_ir_i(cpu, func, args_i, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_i(func, args_i, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "d", returns="r") def bhimpl_residual_call_ir_r(cpu, func, args_i, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_r(func, args_i, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "d") def bhimpl_residual_call_ir_v(cpu, func, args_i, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_v(func, args_i, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "F", "d", returns="i") def bhimpl_residual_call_irf_i(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_i(func, args_i, args_r, args_f, calldescr) @arguments("cpu", "i", "I", "R", "F", "d", returns="r") def bhimpl_residual_call_irf_r(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_r(func, args_i, args_r, args_f, calldescr) @arguments("cpu", "i", "I", "R", "F", "d", returns="f") def bhimpl_residual_call_irf_f(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_f(func, args_i, args_r, args_f, calldescr) @arguments("cpu", "i", "I", "R", "F", "d") def bhimpl_residual_call_irf_v(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_v(func, args_i, args_r, args_f, calldescr) # conditional calls - note that they cannot return stuff @@ -1196,44 +1214,54 @@ @arguments("cpu", "j", "R", returns="i") def bhimpl_inline_call_r_i(cpu, jitcode, args_r): + workaround2200.active = True return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), None, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "R", returns="r") def bhimpl_inline_call_r_r(cpu, jitcode, args_r): + workaround2200.active = True return cpu.bh_call_r(jitcode.get_fnaddr_as_int(), None, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "R") def bhimpl_inline_call_r_v(cpu, jitcode, args_r): + workaround2200.active = True return cpu.bh_call_v(jitcode.get_fnaddr_as_int(), None, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R", returns="i") def bhimpl_inline_call_ir_i(cpu, jitcode, args_i, args_r): + workaround2200.active = True return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), args_i, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R", returns="r") def bhimpl_inline_call_ir_r(cpu, jitcode, args_i, args_r): + workaround2200.active = True return cpu.bh_call_r(jitcode.get_fnaddr_as_int(), args_i, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R") def bhimpl_inline_call_ir_v(cpu, jitcode, args_i, args_r): + workaround2200.active = True return cpu.bh_call_v(jitcode.get_fnaddr_as_int(), args_i, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F", returns="i") def bhimpl_inline_call_irf_i(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F", returns="r") def bhimpl_inline_call_irf_r(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_r(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F", returns="f") def bhimpl_inline_call_irf_f(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_f(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F") def bhimpl_inline_call_irf_v(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_v(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @@ -1434,6 +1462,13 @@ def bhimpl_raw_load_f(cpu, addr, offset, arraydescr): return cpu.bh_raw_load_f(addr, offset, arraydescr) + @arguments("cpu", "r", "i", "i", "i", "i", returns="i") + def bhimpl_gc_load_indexed_i(cpu, addr, index, scale, base_ofs, bytes): + return cpu.bh_gc_load_indexed_i(addr, index,scale,base_ofs, bytes) + @arguments("cpu", "r", "i", "i", "i", "i", returns="f") + def bhimpl_gc_load_indexed_f(cpu, addr, index, scale, base_ofs, bytes): + return cpu.bh_gc_load_indexed_f(addr, index,scale,base_ofs, bytes) + @arguments("r", "d", "d") def bhimpl_record_quasiimmut_field(struct, fielddescr, mutatefielddescr): pass @@ -1513,6 +1548,8 @@ if not self.nextblackholeinterp: self._exit_frame_with_exception(current_exc) return current_exc + finally: + workaround2200.active = False # # pass the frame's return value to the caller caller = self.nextblackholeinterp @@ -1652,6 +1689,7 @@ #debug_start('jit-blackhole') blackholeinterp = blackhole_from_resumedata( metainterp_sd.blackholeinterpbuilder, + metainterp_sd.jitcodes, jitdriver_sd, resumedescr, deadframe, @@ -1685,3 +1723,10 @@ # _run_forever(firstbh, current_exc) convert_and_run_from_pyjitpl._dont_inline_ = True + +# ____________________________________________________________ + +class WorkaroundIssue2200(object): + pass +workaround2200 = WorkaroundIssue2200() +workaround2200.active = False diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/compile.py pypy-5.0.1+dfsg/rpython/jit/metainterp/compile.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/compile.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/compile.py 2016-03-19 16:40:15.000000000 +0000 @@ -15,8 +15,9 @@ TargetToken, AbstractFailDescr, ConstInt) from rpython.jit.metainterp import history, jitexc from rpython.jit.metainterp.optimize import InvalidLoop -from rpython.jit.metainterp.resume import (NUMBERING, PENDINGFIELDSP, +from rpython.jit.metainterp.resume import (PENDINGFIELDSP, ResumeDataDirectReader, AccumInfo) +from rpython.jit.metainterp.resumecode import NUMBERING from rpython.jit.codewriter import heaptracker, longlong @@ -842,13 +843,11 @@ class ResumeGuardDescr(AbstractResumeGuardDescr): _attrs_ = ('rd_numb', 'rd_count', 'rd_consts', 'rd_virtuals', - 'rd_frame_info_list', 'rd_pendingfields', 'status') - + 'rd_pendingfields', 'status') rd_numb = lltype.nullptr(NUMBERING) rd_count = 0 rd_consts = None rd_virtuals = None - rd_frame_info_list = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) def copy_all_attributes_from(self, other): @@ -857,7 +856,6 @@ assert isinstance(other, ResumeGuardDescr) self.rd_count = other.rd_count self.rd_consts = other.rd_consts - self.rd_frame_info_list = other.rd_frame_info_list self.rd_pendingfields = other.rd_pendingfields self.rd_virtuals = other.rd_virtuals self.rd_numb = other.rd_numb diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/executor.py pypy-5.0.1+dfsg/rpython/jit/metainterp/executor.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/executor.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/executor.py 2016-03-19 16:40:11.000000000 +0000 @@ -367,7 +367,6 @@ rop.INCREMENT_DEBUG_COUNTER, rop.COND_CALL_GC_WB, rop.COND_CALL_GC_WB_ARRAY, - rop.ZERO_PTR_FIELD, rop.ZERO_ARRAY, rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, @@ -397,6 +396,12 @@ rop.VEC_GETARRAYITEM_GC_I, rop.VEC_GETARRAYITEM_GC_F, rop.VEC_SETARRAYITEM_GC, + rop.GC_LOAD_I, + rop.GC_LOAD_R, + rop.GC_LOAD_F, + rop.GC_LOAD_INDEXED_R, + rop.GC_STORE, + rop.GC_STORE_INDEXED, ): # list of opcodes never executed by pyjitpl continue if rop._VEC_PURE_FIRST <= value <= rop._VEC_PURE_LAST: diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/heapcache.py pypy-5.0.1+dfsg/rpython/jit/metainterp/heapcache.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/heapcache.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/heapcache.py 2016-03-19 16:40:11.000000000 +0000 @@ -168,9 +168,6 @@ elif (opnum != rop.GETFIELD_GC_R and opnum != rop.GETFIELD_GC_I and opnum != rop.GETFIELD_GC_F and - opnum != rop.GETFIELD_GC_PURE_R and - opnum != rop.GETFIELD_GC_PURE_I and - opnum != rop.GETFIELD_GC_PURE_F and opnum != rop.PTR_EQ and opnum != rop.PTR_NE and opnum != rop.INSTANCE_PTR_EQ and diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/history.py pypy-5.0.1+dfsg/rpython/jit/metainterp/history.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/history.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/history.py 2016-03-19 16:40:11.000000000 +0000 @@ -68,8 +68,8 @@ return box.value def repr_rpython(box, typechars): - return '%s/%s%d' % (box._get_hash_(), typechars, - compute_unique_id(box)) + return '%s/%s' % (box._get_hash_(), typechars, + ) #compute_unique_id(box)) class XxxAbstractValue(object): @@ -816,9 +816,6 @@ if 'getfield_gc' in check: assert check.pop('getfield_gc') == 0 check['getfield_gc_i'] = check['getfield_gc_r'] = check['getfield_gc_f'] = 0 - if 'getfield_gc_pure' in check: - assert check.pop('getfield_gc_pure') == 0 - check['getfield_gc_pure_i'] = check['getfield_gc_pure_r'] = check['getfield_gc_pure_f'] = 0 if 'getarrayitem_gc_pure' in check: assert check.pop('getarrayitem_gc_pure') == 0 check['getarrayitem_gc_pure_i'] = check['getarrayitem_gc_pure_r'] = check['getarrayitem_gc_pure_f'] = 0 diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/jitprof.py pypy-5.0.1+dfsg/rpython/jit/metainterp/jitprof.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/jitprof.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/jitprof.py 2016-03-19 16:40:11.000000000 +0000 @@ -51,7 +51,7 @@ class Profiler(BaseProfiler): initialized = False - timer = time.time + timer = staticmethod(time.time) starttime = 0 t1 = 0 times = None diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/logger.py pypy-5.0.1+dfsg/rpython/jit/metainterp/logger.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/logger.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/logger.py 2016-03-19 16:40:11.000000000 +0000 @@ -142,8 +142,12 @@ elif arg is None: return 'None' elif arg.is_vector(): - suffix = '[%dx%s%d]' % (arg.count, arg.datatype, arg.bytesize * 8) - return 'v' + str(mv) + suffix + # cannot infer this information, VectorizationInfo + # might be lost here already + #vecinfo = arg.get_forwarded() + #assert isinstance(vecinfo, VectorizationInfo) + #suffix = '[%dx%s%d]' % (vecinfo.count, vecinfo.datatype, vecinfo.bytesize * 8) + return 'v' + str(mv) elif arg.type == 'i': return 'i' + str(mv) elif arg.type == 'r': diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/dependency.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/dependency.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/dependency.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/dependency.py 2016-03-19 16:40:11.000000000 +0000 @@ -18,7 +18,6 @@ , (rop.SETINTERIORFIELD_RAW, 0, -1) , (rop.SETFIELD_GC, 0, -1) , (rop.SETFIELD_RAW, 0, -1) - , (rop.ZERO_PTR_FIELD, 0, -1) , (rop.ZERO_ARRAY, 0, -1) , (rop.STRSETITEM, 0, -1) , (rop.UNICODESETITEM, 0, -1) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/heap.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/heap.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/heap.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/heap.py 2016-03-19 16:40:11.000000000 +0000 @@ -21,7 +21,10 @@ pass -class CachedField(object): +class AbstractCachedEntry(object): + """ abstract base class abstracting over the difference between caching + struct fields and array items. """ + def __init__(self): # Cache information for a field descr, or for an (array descr, index) # pair. It can be in one of two states: @@ -29,8 +32,8 @@ # 1. 'cached_infos' is a list listing all the infos that are # caching this descr # - # 2. we just did one setfield, which is delayed (and thus - # not synchronized). 'lazy_setfield' is the delayed + # 2. we just did one set(field/arrayitem), which is delayed (and thus + # not synchronized). '_lazy_set' is the delayed # ResOperation. In this state, 'cached_infos' contains # out-of-date information. More precisely, the field # value pending in the ResOperation is *not* visible in @@ -38,43 +41,39 @@ # self.cached_infos = [] self.cached_structs = [] - self._lazy_setfield = None - self._lazy_setfield_registered = False + self._lazy_set = None - def register_dirty_field(self, structop, info): + def register_info(self, structop, info): + # invariant: every struct or array ptr info, that is not virtual and + # that has a non-None entry at + # info._fields[descr.get_index()] + # must be in cache_infos self.cached_structs.append(structop) self.cached_infos.append(info) - def invalidate(self, descr): - for opinfo in self.cached_infos: - assert isinstance(opinfo, info.AbstractStructPtrInfo) - opinfo._fields[descr.get_index()] = None - self.cached_infos = [] - self.cached_structs = [] - def produce_potential_short_preamble_ops(self, optimizer, shortboxes, descr, index=-1): - assert self._lazy_setfield is None + assert self._lazy_set is None for i, info in enumerate(self.cached_infos): structbox = optimizer.get_box_replacement(self.cached_structs[i]) info.produce_short_preamble_ops(structbox, descr, index, optimizer, shortboxes) def possible_aliasing(self, optheap, opinfo): - # If lazy_setfield is set and contains a setfield on a different + # If lazy_set is set and contains a setfield on a different # structvalue, then we are annoyed, because it may point to either # the same or a different structure at runtime. # XXX constants? - return (self._lazy_setfield is not None + return (self._lazy_set is not None and (not optheap.getptrinfo( - self._lazy_setfield.getarg(0)).same_info(opinfo))) + self._lazy_set.getarg(0)).same_info(opinfo))) def do_setfield(self, optheap, op): # Update the state with the SETFIELD_GC/SETARRAYITEM_GC operation 'op'. structinfo = optheap.ensure_ptr_info_arg0(op) - arg1 = optheap.get_box_replacement(self._getvalue(op)) + arg1 = optheap.get_box_replacement(self._get_rhs_from_set_op(op)) if self.possible_aliasing(optheap, structinfo): - self.force_lazy_setfield(optheap, op.getdescr()) + self.force_lazy_set(optheap, op.getdescr()) assert not self.possible_aliasing(optheap, structinfo) cached_field = self._getfield(structinfo, op.getdescr(), optheap, False) if cached_field is not None: @@ -87,58 +86,43 @@ # cached_fieldvalue = self._cached_fields.get(structvalue, None) if not cached_field or not cached_field.same_box(arg1): - # common case: store the 'op' as lazy_setfield, and register - # myself in the optheap's _lazy_setfields_and_arrayitems list - self._lazy_setfield = op - #if not self._lazy_setfield_registered: - # self._lazy_setfield_registered = True + # common case: store the 'op' as lazy_set + self._lazy_set = op else: # this is the case where the pending setfield ends up # storing precisely the value that is already there, # as proved by 'cached_fields'. In this case, we don't - # need any _lazy_setfield: the heap value is already right. - # Note that this may reset to None a non-None lazy_setfield, + # need any _lazy_set: the heap value is already right. + # Note that this may reset to None a non-None lazy_set, # cancelling its previous effects with no side effect. # Now, we have to force the item in the short preamble self._getfield(structinfo, op.getdescr(), optheap) - self._lazy_setfield = None + self._lazy_set = None def getfield_from_cache(self, optheap, opinfo, descr): # Returns the up-to-date field's value, or None if not cached. if self.possible_aliasing(optheap, opinfo): - self.force_lazy_setfield(optheap, descr) - if self._lazy_setfield is not None: - op = self._lazy_setfield - return optheap.get_box_replacement(self._getvalue(op)) + self.force_lazy_set(optheap, descr) + if self._lazy_set is not None: + op = self._lazy_set + return optheap.get_box_replacement(self._get_rhs_from_set_op(op)) else: res = self._getfield(opinfo, descr, optheap) if res is not None: return res.get_box_replacement() return None - def _getvalue(self, op): - return op.getarg(1) - - def _getfield(self, opinfo, descr, optheap, true_force=True): - res = opinfo.getfield(descr, optheap) - if isinstance(res, PreambleOp): - if not true_force: - return res.op - res = optheap.optimizer.force_op_from_preamble(res) - opinfo.setfield(descr, None, res, optheap) - return res - - def force_lazy_setfield(self, optheap, descr, can_cache=True): - op = self._lazy_setfield + def force_lazy_set(self, optheap, descr, can_cache=True): + op = self._lazy_set if op is not None: - # This is the way _lazy_setfield is usually reset to None. + # This is the way _lazy_set is usually reset to None. # Now we clear _cached_fields, because actually doing the # setfield might impact any of the stored result (because of # possible aliasing). self.invalidate(descr) - self._lazy_setfield = None + self._lazy_set = None if optheap.postponed_op: for a in op.getarglist(): if a is optheap.postponed_op: @@ -151,25 +135,76 @@ # back in the cache: the value of this particular structure's # field. opinfo = optheap.ensure_ptr_info_arg0(op) - self._setfield(op, opinfo, optheap) + self.put_field_back_to_info(op, opinfo, optheap) elif not can_cache: self.invalidate(descr) - def _setfield(self, op, opinfo, optheap): + + # abstract methods + + def _get_rhs_from_set_op(self, op): + """ given a set(field or arrayitem) op, return the rhs argument """ + raise NotImplementedError("abstract method") + + def put_field_back_to_info(self, op, opinfo, optheap): + """ this method is called just after a lazy setfield was ommitted. it + puts the information of the lazy setfield back into the proper cache in + the info. """ + raise NotImplementedError("abstract method") + + def _getfield(self, opinfo, descr, optheap, true_force=True): + raise NotImplementedError("abstract method") + + def invalidate(self, descr): + """ clear all the cached knowledge in the infos in self.cached_infos. + """ + raise NotImplementedError("abstract method") + + +class CachedField(AbstractCachedEntry): + def _get_rhs_from_set_op(self, op): + return op.getarg(1) + + def put_field_back_to_info(self, op, opinfo, optheap): arg = optheap.get_box_replacement(op.getarg(1)) struct = optheap.get_box_replacement(op.getarg(0)) - opinfo.setfield(op.getdescr(), struct, arg, optheap, self) + opinfo.setfield(op.getdescr(), struct, arg, optheap=optheap, cf=self) + + def _getfield(self, opinfo, descr, optheap, true_force=True): + res = opinfo.getfield(descr, optheap) + if not we_are_translated() and res: + if isinstance(opinfo, info.AbstractStructPtrInfo): + assert opinfo in self.cached_infos + if isinstance(res, PreambleOp): + if not true_force: + return res.op + res = optheap.optimizer.force_op_from_preamble(res) + opinfo.setfield(descr, None, res, optheap=optheap) + return res + + def invalidate(self, descr): + if descr.is_always_pure(): + return + for opinfo in self.cached_infos: + assert isinstance(opinfo, info.AbstractStructPtrInfo) + opinfo._fields[descr.get_index()] = None + self.cached_infos = [] + self.cached_structs = [] -class ArrayCachedField(CachedField): + +class ArrayCachedItem(AbstractCachedEntry): def __init__(self, index): self.index = index - CachedField.__init__(self) + AbstractCachedEntry.__init__(self) - def _getvalue(self, op): + def _get_rhs_from_set_op(self, op): return op.getarg(2) def _getfield(self, opinfo, descr, optheap, true_force=True): res = opinfo.getitem(descr, self.index, optheap) + if not we_are_translated() and res: + if isinstance(opinfo, info.ArrayPtrInfo): + assert opinfo in self.cached_infos if (isinstance(res, PreambleOp) and optheap.optimizer.cpu.supports_guard_gc_type): if not true_force: @@ -179,10 +214,10 @@ opinfo.setitem(descr, index, None, res, optheap=optheap) return res - def _setfield(self, op, opinfo, optheap): + def put_field_back_to_info(self, op, opinfo, optheap): arg = optheap.get_box_replacement(op.getarg(2)) struct = optheap.get_box_replacement(op.getarg(0)) - opinfo.setitem(op.getdescr(), self.index, struct, arg, self, optheap) + opinfo.setitem(op.getdescr(), self.index, struct, arg, optheap=optheap, cf=self) def invalidate(self, descr): for opinfo in self.cached_infos: @@ -201,15 +236,11 @@ self.postponed_op = None - # XXXX the rest is old - # cached array items: {array descr: {index: CachedField}} - #self.cached_arrayitems = {} # cached dict items: {dict descr: {(optval, index): box-or-const}} self.cached_dict_reads = {} # cache of corresponding {array descrs: dict 'entries' field descr} self.corresponding_array_descrs = {} # - self._lazy_setfields_and_arrayitems = [] self._remove_guard_not_invalidated = False self._seen_guard_not_invalidated = False @@ -221,7 +252,7 @@ def flush(self): self.cached_dict_reads.clear() self.corresponding_array_descrs.clear() - self.force_all_lazy_setfields_and_arrayitems() + self.force_all_lazy_sets() self.emit_postponed_op() def emit_postponed_op(self): @@ -234,7 +265,7 @@ descrkeys = self.cached_fields.keys() if not we_are_translated(): # XXX Pure operation of boxes that are cached in several places will - # only be removed from the peeled loop when red from the first + # only be removed from the peeled loop when read from the first # place discovered here. This is far from ideal, as it makes # the effectiveness of our optimization a bit random. It should # howevere always generate correct results. For tests we dont @@ -249,14 +280,7 @@ d.produce_potential_short_preamble_ops(self.optimizer, sb, descr, index) - def register_dirty_field(self, descr, op, info): - self.field_cache(descr).register_dirty_field(op, info) - - def register_dirty_array_field(self, arraydescr, op, index, info): - self.arrayitem_cache(arraydescr, index).register_dirty_field(op, info) - def clean_caches(self): - del self._lazy_setfields_and_arrayitems[:] items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) @@ -285,7 +309,7 @@ try: cf = submap[index] except KeyError: - cf = submap[index] = ArrayCachedField(index) + cf = submap[index] = ArrayCachedItem(index) return cf def emit_operation(self, op): @@ -304,7 +328,7 @@ return if op.is_guard(): self.optimizer.pendingfields = ( - self.force_lazy_setfields_and_arrayitems_for_guard()) + self.force_lazy_sets_for_guard()) return opnum = op.getopnum() if (opnum == rop.SETFIELD_GC or # handled specially @@ -332,7 +356,7 @@ if not effectinfo.has_random_effects(): self.force_from_effectinfo(effectinfo) return - self.force_all_lazy_setfields_and_arrayitems() + self.force_all_lazy_sets() self.clean_caches() def optimize_CALL_I(self, op): @@ -410,7 +434,7 @@ # XXX we can get the wrong complexity here, if the lists # XXX stored on effectinfo are large for fielddescr in effectinfo.readonly_descrs_fields: - self.force_lazy_setfield(fielddescr) + self.force_lazy_set(fielddescr) for arraydescr in effectinfo.readonly_descrs_arrays: self.force_lazy_setarrayitem(arraydescr) for fielddescr in effectinfo.write_descrs_fields: @@ -420,7 +444,7 @@ del self.cached_dict_reads[fielddescr] except KeyError: pass - self.force_lazy_setfield(fielddescr, can_cache=False) + self.force_lazy_set(fielddescr, can_cache=False) for arraydescr in effectinfo.write_descrs_arrays: self.force_lazy_setarrayitem(arraydescr, can_cache=False) if arraydescr in self.corresponding_array_descrs: @@ -431,16 +455,16 @@ pass # someone did it already if effectinfo.check_forces_virtual_or_virtualizable(): vrefinfo = self.optimizer.metainterp_sd.virtualref_info - self.force_lazy_setfield(vrefinfo.descr_forced) + self.force_lazy_set(vrefinfo.descr_forced) # ^^^ we only need to force this field; the other fields # of virtualref_info and virtualizable_info are not gcptrs. - def force_lazy_setfield(self, descr, can_cache=True): + def force_lazy_set(self, descr, can_cache=True): try: cf = self.cached_fields[descr] except KeyError: return - cf.force_lazy_setfield(self, descr, can_cache) + cf.force_lazy_set(self, descr, can_cache) def force_lazy_setarrayitem(self, arraydescr, indexb=None, can_cache=True): try: @@ -449,35 +473,35 @@ return for idx, cf in submap.iteritems(): if indexb is None or indexb.contains(idx): - cf.force_lazy_setfield(self, None, can_cache) + cf.force_lazy_set(self, None, can_cache) - def force_all_lazy_setfields_and_arrayitems(self): + def force_all_lazy_sets(self): items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) for descr, cf in items: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) for submap in self.cached_arrayitems.itervalues(): for index, cf in submap.iteritems(): - cf.force_lazy_setfield(self, None) + cf.force_lazy_set(self, None) - def force_lazy_setfields_and_arrayitems_for_guard(self): + def force_lazy_sets_for_guard(self): pendingfields = [] items = self.cached_fields.items() if not we_are_translated(): items.sort(key=str, reverse=True) for descr, cf in items: - op = cf._lazy_setfield + op = cf._lazy_set if op is None: continue val = op.getarg(1) if self.optimizer.is_virtual(val): pendingfields.append(op) continue - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) for descr, submap in self.cached_arrayitems.iteritems(): for index, cf in submap.iteritems(): - op = cf._lazy_setfield + op = cf._lazy_set if op is None: continue # the only really interesting case that we need to handle in the @@ -489,13 +513,18 @@ if self.optimizer.is_virtual(op.getarg(2)): pendingfields.append(op) else: - cf.force_lazy_setfield(self, descr) + cf.force_lazy_set(self, descr) return pendingfields def optimize_GETFIELD_GC_I(self, op): + descr = op.getdescr() + if descr.is_always_pure() and self.get_constant_box(op.getarg(0)) is not None: + resbox = self.optimizer.constant_fold(op) + self.optimizer.make_constant(op, resbox) + return structinfo = self.ensure_ptr_info_arg0(op) - cf = self.field_cache(op.getdescr()) - field = cf.getfield_from_cache(self, structinfo, op.getdescr()) + cf = self.field_cache(descr) + field = cf.getfield_from_cache(self, structinfo, descr) if field is not None: self.make_equal_to(op, field) return @@ -503,23 +532,10 @@ self.make_nonnull(op.getarg(0)) self.emit_operation(op) # then remember the result of reading the field - structinfo.setfield(op.getdescr(), op.getarg(0), op, self, cf) + structinfo.setfield(descr, op.getarg(0), op, optheap=self, cf=cf) optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I - def optimize_GETFIELD_GC_PURE_I(self, op): - structinfo = self.ensure_ptr_info_arg0(op) - cf = self.field_cache(op.getdescr()) - field = cf.getfield_from_cache(self, structinfo, op.getdescr()) - if field is not None: - self.make_equal_to(op, field) - return - # default case: produce the operation - self.make_nonnull(op.getarg(0)) - self.emit_operation(op) - optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_PURE_I - optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_PURE_I - def optimize_SETFIELD_GC(self, op): self.setfield(op) #opnum = OpHelpers.getfield_pure_for_descr(op.getdescr()) @@ -554,12 +570,12 @@ # default case: produce the operation self.make_nonnull(op.getarg(0)) self.emit_operation(op) - # the remember the result of reading the array item + # then remember the result of reading the array item if cf is not None: arrayinfo.setitem(op.getdescr(), indexb.getint(), self.get_box_replacement(op.getarg(0)), - self.get_box_replacement(op), cf, - self) + self.get_box_replacement(op), optheap=self, + cf=cf) optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I @@ -609,12 +625,12 @@ def optimize_QUASIIMMUT_FIELD(self, op): # Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr) - # x = GETFIELD_GC_PURE(s, descr='inst_x') + # x = GETFIELD_GC(s, descr='inst_x') # pure # If 's' is a constant (after optimizations) we rely on the rest of the - # optimizations to constant-fold the following getfield_gc_pure. + # optimizations to constant-fold the following pure getfield_gc. # in addition, we record the dependency here to make invalidation work # correctly. - # NB: emitting the GETFIELD_GC_PURE is only safe because the + # NB: emitting the pure GETFIELD_GC is only safe because the # QUASIIMMUT_FIELD is also emitted to make sure the dependency is # registered. structvalue = self.ensure_ptr_info_arg0(op) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/info.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/info.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/info.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/info.py 2016-03-19 16:40:11.000000000 +0000 @@ -196,28 +196,28 @@ def all_items(self): return self._fields - def setfield(self, descr, struct, op, optheap=None, cf=None): - self.init_fields(descr.get_parent_descr(), descr.get_index()) + def setfield(self, fielddescr, struct, op, optheap=None, cf=None): + self.init_fields(fielddescr.get_parent_descr(), fielddescr.get_index()) assert isinstance(op, AbstractValue) - self._fields[descr.get_index()] = op + self._fields[fielddescr.get_index()] = op if cf is not None: assert not self.is_virtual() assert struct is not None - cf.register_dirty_field(struct, self) + cf.register_info(struct, self) - def getfield(self, descr, optheap=None): - self.init_fields(descr.get_parent_descr(), descr.get_index()) - return self._fields[descr.get_index()] + def getfield(self, fielddescr, optheap=None): + self.init_fields(fielddescr.get_parent_descr(), fielddescr.get_index()) + return self._fields[fielddescr.get_index()] def _force_elements(self, op, optforce, descr): if self._fields is None: return - for i, flddescr in enumerate(descr.get_all_fielddescrs()): + for i, fielddescr in enumerate(descr.get_all_fielddescrs()): fld = self._fields[i] if fld is not None: subbox = optforce.force_box(fld) setfieldop = ResOperation(rop.SETFIELD_GC, [op, subbox], - descr=flddescr) + descr=fielddescr) self._fields[i] = None optforce.emit_operation(setfieldop) @@ -249,16 +249,16 @@ if fieldinfo and fieldinfo.is_virtual(): fieldinfo.visitor_walk_recursive(op, visitor, optimizer) - def produce_short_preamble_ops(self, structbox, descr, index, optimizer, + def produce_short_preamble_ops(self, structbox, fielddescr, index, optimizer, shortboxes): if self._fields is None: return - if descr.get_index() >= len(self._fields): + if fielddescr.get_index() >= len(self._fields): # we don't know about this item return - op = optimizer.get_box_replacement(self._fields[descr.get_index()]) - opnum = OpHelpers.getfield_for_descr(descr) - getfield_op = ResOperation(opnum, [structbox], descr=descr) + op = optimizer.get_box_replacement(self._fields[fielddescr.get_index()]) + opnum = OpHelpers.getfield_for_descr(fielddescr) + getfield_op = ResOperation(opnum, [structbox], descr=fielddescr) shortboxes.add_heap_op(op, getfield_op) def _is_immutable_and_filled_with_constants(self, optimizer, memo=None): @@ -294,12 +294,12 @@ return True def _force_elements_immutable(self, descr, constptr, optforce): - for i, flddescr in enumerate(descr.get_all_fielddescrs()): + for i, fielddescr in enumerate(descr.get_all_fielddescrs()): fld = self._fields[i] subbox = optforce.force_box(fld) assert isinstance(subbox, Const) execute(optforce.optimizer.cpu, None, rop.SETFIELD_GC, - flddescr, constptr, subbox) + fielddescr, constptr, subbox) class InstancePtrInfo(AbstractStructPtrInfo): _attrs_ = ('_known_class',) @@ -505,6 +505,7 @@ info._items = self._items[:] def _force_elements(self, op, optforce, descr): + # XXX descr = op.getdescr() const = optforce.new_const_item(self.descr) for i in range(self.length): @@ -523,15 +524,16 @@ optforce.emit_operation(setop) optforce.pure_from_args(rop.ARRAYLEN_GC, [op], ConstInt(len(self._items))) - def setitem(self, descr, index, struct, op, cf=None, optheap=None): + def setitem(self, descr, index, struct, op, optheap=None, cf=None): if self._items is None: self._items = [None] * (index + 1) if index >= len(self._items): + assert not self.is_virtual() self._items = self._items + [None] * (index - len(self._items) + 1) self._items[index] = op if cf is not None: assert not self.is_virtual() - cf.register_dirty_field(struct, self) + cf.register_info(struct, self) def getitem(self, descr, index, optheap=None): if self._items is None or index >= len(self._items): @@ -626,13 +628,13 @@ i = 0 fielddescrs = op.getdescr().get_all_fielddescrs() for index in range(self.length): - for flddescr in fielddescrs: + for fielddescr in fielddescrs: fld = self._items[i] if fld is not None: subbox = optforce.force_box(fld) setfieldop = ResOperation(rop.SETINTERIORFIELD_GC, [op, ConstInt(index), subbox], - descr=flddescr) + descr=fielddescr) optforce.emit_operation(setfieldop) # heapcache does not work for interiorfields # if it does, we would need a fix here @@ -645,7 +647,7 @@ fielddescrs = self.descr.get_all_fielddescrs() i = 0 for index in range(self.getlength()): - for flddescr in fielddescrs: + for fielddescr in fielddescrs: itemop = self._items[i] if (itemop is not None and not isinstance(itemop, Const)): @@ -691,21 +693,21 @@ optheap.const_infos[ref] = info return info - def getfield(self, descr, optheap=None): - info = self._get_info(descr.get_parent_descr(), optheap) - return info.getfield(descr) + def getfield(self, fielddescr, optheap=None): + info = self._get_info(fielddescr.get_parent_descr(), optheap) + return info.getfield(fielddescr) def getitem(self, descr, index, optheap=None): info = self._get_array_info(descr, optheap) return info.getitem(descr, index) - def setitem(self, descr, index, struct, op, cf=None, optheap=None): + def setitem(self, descr, index, struct, op, optheap=None, cf=None): info = self._get_array_info(descr, optheap) - info.setitem(descr, index, struct, op, cf) + info.setitem(descr, index, struct, op, optheap=optheap, cf=cf) - def setfield(self, descr, struct, op, optheap=None, cf=None): - info = self._get_info(descr.get_parent_descr(), optheap) - info.setfield(descr, struct, op, optheap, cf) + def setfield(self, fielddescr, struct, op, optheap=None, cf=None): + info = self._get_info(fielddescr.get_parent_descr(), optheap) + info.setfield(fielddescr, struct, op, optheap=optheap, cf=cf) def is_null(self): return not bool(self._const.getref_base()) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/optimizer.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/optimizer.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/optimizer.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/optimizer.py 2016-03-19 16:40:11.000000000 +0000 @@ -10,6 +10,7 @@ from rpython.jit.metainterp.typesystem import llhelper from rpython.rlib.objectmodel import specialize, we_are_translated from rpython.rlib.debug import debug_print +from rpython.jit.metainterp.optimize import SpeculativeError @@ -374,6 +375,7 @@ if (box.type == 'i' and box.get_forwarded() and box.get_forwarded().is_constant()): return ConstInt(box.get_forwarded().getint()) + return None #self.ensure_imported(value) def get_newoperations(self): @@ -736,12 +738,64 @@ self.emit_operation(op) def constant_fold(self, op): + self.protect_speculative_operation(op) argboxes = [self.get_constant_box(op.getarg(i)) for i in range(op.numargs())] return execute_nonspec_const(self.cpu, None, op.getopnum(), argboxes, op.getdescr(), op.type) + def protect_speculative_operation(self, op): + """When constant-folding a pure operation that reads memory from + a gcref, make sure that the gcref is non-null and of a valid type. + Otherwise, raise SpeculativeError. This should only occur when + unrolling and optimizing the unrolled loop. Note that if + cpu.supports_guard_gc_type is false, we can't really do this + check at all, but then we don't unroll in that case. + """ + opnum = op.getopnum() + cpu = self.cpu + + if OpHelpers.is_pure_getfield(opnum, op.getdescr()): + fielddescr = op.getdescr() + ref = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_field(ref, fielddescr) + return + + elif (opnum == rop.GETARRAYITEM_GC_PURE_I or + opnum == rop.GETARRAYITEM_GC_PURE_R or + opnum == rop.GETARRAYITEM_GC_PURE_F or + opnum == rop.ARRAYLEN_GC): + arraydescr = op.getdescr() + array = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_array(array, arraydescr) + if opnum == rop.ARRAYLEN_GC: + return + arraylength = cpu.bh_arraylen_gc(array, arraydescr) + + elif (opnum == rop.STRGETITEM or + opnum == rop.STRLEN): + string = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_string(string) + if opnum == rop.STRLEN: + return + arraylength = cpu.bh_strlen(string) + + elif (opnum == rop.UNICODEGETITEM or + opnum == rop.UNICODELEN): + unicode = self.get_constant_box(op.getarg(0)).getref_base() + cpu.protect_speculative_unicode(unicode) + if opnum == rop.UNICODELEN: + return + arraylength = cpu.bh_unicodelen(unicode) + + else: + return + + index = self.get_constant_box(op.getarg(1)).getint() + if not (0 <= index < arraylength): + raise SpeculativeError + def is_virtual(self, op): if op.type == 'r': opinfo = self.getptrinfo(op) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/pure.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/pure.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/pure.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/pure.py 2016-03-19 16:40:11.000000000 +0000 @@ -94,7 +94,6 @@ break else: # all constant arguments: constant-fold away - self.protect_speculative_operation(op) resbox = self.optimizer.constant_fold(op) # note that INT_xxx_OVF is not done from here, and the # overflows in the INT_xxx operations are ignored @@ -119,59 +118,6 @@ if nextop: self.emit_operation(nextop) - def protect_speculative_operation(self, op): - """When constant-folding a pure operation that reads memory from - a gcref, make sure that the gcref is non-null and of a valid type. - Otherwise, raise SpeculativeError. This should only occur when - unrolling and optimizing the unrolled loop. Note that if - cpu.supports_guard_gc_type is false, we can't really do this - check at all, but then we don't unroll in that case. - """ - opnum = op.getopnum() - cpu = self.optimizer.cpu - - if (opnum == rop.GETFIELD_GC_PURE_I or - opnum == rop.GETFIELD_GC_PURE_R or - opnum == rop.GETFIELD_GC_PURE_F): - fielddescr = op.getdescr() - ref = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_field(ref, fielddescr) - return - - elif (opnum == rop.GETARRAYITEM_GC_PURE_I or - opnum == rop.GETARRAYITEM_GC_PURE_R or - opnum == rop.GETARRAYITEM_GC_PURE_F or - opnum == rop.ARRAYLEN_GC): - arraydescr = op.getdescr() - array = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_array(array, arraydescr) - if opnum == rop.ARRAYLEN_GC: - return - arraylength = cpu.bh_arraylen_gc(array, arraydescr) - - elif (opnum == rop.STRGETITEM or - opnum == rop.STRLEN): - string = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_string(string) - if opnum == rop.STRLEN: - return - arraylength = cpu.bh_strlen(string) - - elif (opnum == rop.UNICODEGETITEM or - opnum == rop.UNICODELEN): - unicode = self.get_constant_box(op.getarg(0)).getref_base() - cpu.protect_speculative_unicode(unicode) - if opnum == rop.UNICODELEN: - return - arraylength = cpu.bh_unicodelen(unicode) - - else: - return - - index = self.get_constant_box(op.getarg(1)).getint() - if not (0 <= index < arraylength): - raise SpeculativeError - def getrecentops(self, opnum): if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: opnum = opnum - rop._OVF_FIRST diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/rewrite.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/rewrite.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/rewrite.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/rewrite.py 2016-03-19 16:40:11.000000000 +0000 @@ -380,7 +380,7 @@ raise InvalidLoop("promote of a virtual") old_guard_op = info.get_last_guard(self.optimizer) if old_guard_op is not None: - op = self.replace_guard_class_with_guard_value(op, info, + op = self.replace_old_guard_with_guard_value(op, info, old_guard_op) elif arg0.type == 'f': arg0 = self.get_box_replacement(arg0) @@ -390,11 +390,26 @@ assert isinstance(constbox, Const) self.optimize_guard(op, constbox) - def replace_guard_class_with_guard_value(self, op, info, old_guard_op): - if old_guard_op.opnum != rop.GUARD_NONNULL: - previous_classbox = info.get_known_class(self.optimizer.cpu) - expected_classbox = self.optimizer.cpu.ts.cls_of_box(op.getarg(1)) - assert previous_classbox is not None + def replace_old_guard_with_guard_value(self, op, info, old_guard_op): + # there already has been a guard_nonnull or guard_class or + # guard_nonnull_class on this value, which is rather silly. + # This function replaces the original guard with a + # guard_value. Must be careful: doing so is unsafe if the + # original guard checks for something inconsistent, + # i.e. different than what it would give if the guard_value + # passed (this is a rare case, but possible). If we get + # inconsistent results in this way, then we must not do the + # replacement, otherwise we'd put guard_value up there but all + # intermediate ops might be executed by assuming something + # different, from the old guard that is now removed... + + c_value = op.getarg(1) + if not c_value.nonnull(): + raise InvalidLoop('A GUARD_VALUE(..., NULL) follows some other ' + 'guard that it is not NULL') + previous_classbox = info.get_known_class(self.optimizer.cpu) + if previous_classbox is not None: + expected_classbox = self.optimizer.cpu.ts.cls_of_box(c_value) assert expected_classbox is not None if not previous_classbox.same_constant( expected_classbox): diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/schedule.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/schedule.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/schedule.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/schedule.py 2016-03-19 16:40:11.000000000 +0000 @@ -1,7 +1,8 @@ from rpython.jit.metainterp.history import (VECTOR, FLOAT, INT, ConstInt, ConstFloat, TargetToken) from rpython.jit.metainterp.resoperation import (rop, ResOperation, - GuardResOp, VecOperation, OpHelpers, VecOperationNew) + GuardResOp, VecOperation, OpHelpers, VecOperationNew, + VectorizationInfo) from rpython.jit.metainterp.optimizeopt.dependency import (DependencyGraph, MemoryRef, Node, IndexVar) from rpython.jit.metainterp.optimizeopt.renamer import Renamer @@ -14,6 +15,16 @@ from rpython.rtyper.lltypesystem import lltype +def forwarded_vecinfo(op): + fwd = op.get_forwarded() + if fwd is None or not isinstance(fwd, VectorizationInfo): + # the optimizer clears getforwarded AFTER + # vectorization, it happens that this is not clean + fwd = VectorizationInfo(op) + if not op.is_constant(): + op.set_forwarded(fwd) + return fwd + class SchedulerState(object): def __init__(self, graph): self.renamer = Renamer() @@ -227,28 +238,29 @@ return self.count == TypeRestrict.ANY_COUNT def check(self, value): - assert value.datatype != '\x00' + vecinfo = forwarded_vecinfo(value) + assert vecinfo.datatype != '\x00' if self.type != TypeRestrict.ANY_TYPE: - if self.type != value.datatype: + if self.type != vecinfo.datatype: msg = "type mismatch %s != %s" % \ - (self.type, value.datatype) + (self.type, vecinfo.datatype) failnbail_transformation(msg) - assert value.bytesize > 0 + assert vecinfo.bytesize > 0 if not self.any_size(): - if self.bytesize != value.bytesize: + if self.bytesize != vecinfo.bytesize: msg = "bytesize mismatch %s != %s" % \ - (self.bytesize, value.bytesize) + (self.bytesize, vecinfo.bytesize) failnbail_transformation(msg) - assert value.count > 0 + assert vecinfo.count > 0 if self.count != TypeRestrict.ANY_COUNT: - if value.count < self.count: + if vecinfo.count < self.count: msg = "count mismatch %s < %s" % \ - (self.count, value.count) + (self.count, vecinfo.count) failnbail_transformation(msg) if self.sign != TypeRestrict.ANY_SIGN: - if bool(self.sign) == value.sign: + if bool(self.sign) == vecinfo.sign: msg = "sign mismatch %s < %s" % \ - (self.sign, value.sign) + (self.sign, vecinfo.sign) failnbail_transformation(msg) def max_input_count(self, count): @@ -269,7 +281,8 @@ def must_crop_vector(self, op, index): restrict = self.argument_restrictions[index] - size = op.getarg(index).bytesize + vecinfo = forwarded_vecinfo(op.getarg(index)) + size = vecinfo.bytesize newsize = self.crop_to_size(op, index) return not restrict.any_size() and newsize != size @@ -288,12 +301,14 @@ return size // op.cast_from_bytesize() else: return vec_reg_size // op.cast_to_bytesize() - return vec_reg_size // op.bytesize + vecinfo = forwarded_vecinfo(op) + return vec_reg_size // vecinfo.bytesize class GuardRestrict(OpRestrict): def opcount_filling_vector_register(self, op, vec_reg_size): arg = op.getarg(0) - return vec_reg_size // arg.bytesize + vecinfo = forwarded_vecinfo(arg) + return vec_reg_size // vecinfo.bytesize class LoadRestrict(OpRestrict): def opcount_filling_vector_register(self, op, vec_reg_size): @@ -306,8 +321,9 @@ self.argument_restrictions = argument_restris def must_crop_vector(self, op, index): - size = op.getarg(index).bytesize - return self.crop_to_size(op, index) != size + vecinfo = forwarded_vecinfo(op.getarg(index)) + bytesize = vecinfo.bytesize + return self.crop_to_size(op, index) != bytesize @always_inline def crop_to_size(self, op, index): @@ -323,19 +339,22 @@ class OpMatchSizeTypeFirst(OpRestrict): def check_operation(self, state, pack, op): i = 0 + infos = [forwarded_vecinfo(o) for o in op.getarglist()] arg0 = op.getarg(i) while arg0.is_constant() and i < op.numargs(): i += 1 arg0 = op.getarg(i) - bytesize = arg0.bytesize - datatype = arg0.datatype + vecinfo = forwarded_vecinfo(arg0) + bytesize = vecinfo.bytesize + datatype = vecinfo.datatype for arg in op.getarglist(): if arg.is_constant(): continue - if arg.bytesize != bytesize: + curvecinfo = forwarded_vecinfo(arg) + if curvecinfo.bytesize != bytesize: raise NotAVectorizeableLoop() - if arg.datatype != datatype: + if curvecinfo.datatype != datatype: raise NotAVectorizeableLoop() class trans(object): @@ -382,8 +401,8 @@ rop.VEC_GETARRAYITEM_GC_I: LOAD_RESTRICT, rop.VEC_GETARRAYITEM_GC_F: LOAD_RESTRICT, - rop.GUARD_TRUE: GUARD_RESTRICT, - rop.GUARD_FALSE: GUARD_RESTRICT, + rop.VEC_GUARD_TRUE: GUARD_RESTRICT, + rop.VEC_GUARD_FALSE: GUARD_RESTRICT, ## irregular rop.VEC_INT_SIGNEXT: OpRestrict([TR_ANY_INTEGER]), @@ -428,6 +447,7 @@ if left.is_guard(): prepare_fail_arguments(state, pack, left, vecop) state.oplist.append(vecop) + assert vecop.count >= 1 def prepare_arguments(state, pack, args): # Transforming one argument to a vector box argument @@ -484,15 +504,16 @@ def crop_vector(state, oprestrict, restrict, pack, args, i): # convert size i64 -> i32, i32 -> i64, ... arg = args[i] - size = arg.bytesize + vecinfo = forwarded_vecinfo(arg) + size = vecinfo.bytesize left = pack.leftmost() if oprestrict.must_crop_vector(left, i): newsize = oprestrict.crop_to_size(left, i) assert arg.type == 'i' state._prevent_signext(newsize, size) - count = arg.count + count = vecinfo.count vecop = VecOperationNew(rop.VEC_INT_SIGNEXT, [arg, ConstInt(newsize)], - 'i', newsize, arg.signed, count) + 'i', newsize, vecinfo.signed, count) state.oplist.append(vecop) state.costmodel.record_cast_int(size, newsize, count) args[i] = vecop @@ -513,15 +534,19 @@ i = 1 while i < len(vectors): (newarg_pos, newarg) = vectors[i] - if arg.count + newarg.count <= count: - arg = pack_into_vector(state, arg, arg.count, newarg, newarg_pos, newarg.count) + vecinfo = forwarded_vecinfo(arg) + newvecinfo = forwarded_vecinfo(newarg) + if vecinfo.count + newvecinfo.count <= count: + arg = pack_into_vector(state, arg, vecinfo.count, newarg, newarg_pos, newvecinfo.count) i += 1 return arg @always_inline def position_values(state, restrict, pack, args, index, position): arg = args[index] - newcount, count = restrict.count, arg.count + vecinfo = forwarded_vecinfo(arg) + count = vecinfo.count + newcount = restrict.count if not restrict.any_count() and newcount != count: if position == 0: pass @@ -530,13 +555,15 @@ # The vector box is at a position != 0 but it # is required to be at position 0. Unpack it! arg = args[index] - count = restrict.max_input_count(arg.count) + vecinfo = forwarded_vecinfo(arg) + count = restrict.max_input_count(vecinfo.count) args[index] = unpack_from_vector(state, arg, position, count) state.remember_args_in_vector(pack, index, args[index]) def check_if_pack_supported(state, pack): left = pack.leftmost() - insize = left.bytesize + vecinfo = forwarded_vecinfo(left) + insize = vecinfo.bytesize if left.is_typecast(): # prohibit the packing of signext calls that # cast to int16/int8. @@ -550,10 +577,11 @@ def unpack_from_vector(state, arg, index, count): """ Extract parts of the vector box into another vector box """ assert count > 0 - assert index + count <= arg.count + vecinfo = forwarded_vecinfo(arg) + assert index + count <= vecinfo.count args = [arg, ConstInt(index), ConstInt(count)] - vecop = OpHelpers.create_vec_unpack(arg.type, args, arg.bytesize, - arg.signed, count) + vecop = OpHelpers.create_vec_unpack(arg.type, args, vecinfo.bytesize, + vecinfo.signed, count) state.costmodel.record_vector_unpack(arg, index, count) state.oplist.append(vecop) return vecop @@ -564,9 +592,10 @@ new_box = [1,2,3,4,5,6,_,_] after the operation, tidx=4, scount=2 """ assert sidx == 0 # restriction - newcount = tgt.count + scount + vecinfo = forwarded_vecinfo(tgt) + newcount = vecinfo.count + scount args = [tgt, src, ConstInt(tidx), ConstInt(scount)] - vecop = OpHelpers.create_vec_pack(tgt.type, args, tgt.bytesize, tgt.signed, newcount) + vecop = OpHelpers.create_vec_pack(tgt.type, args, vecinfo.bytesize, vecinfo.signed, newcount) state.oplist.append(vecop) state.costmodel.record_vector_pack(src, sidx, scount) if not we_are_translated(): @@ -582,14 +611,16 @@ assert arg0.is_vector() assert index.is_constant() assert isinstance(count, ConstInt) - assert arg0.bytesize == op.bytesize + vecinfo = forwarded_vecinfo(op) + argvecinfo = forwarded_vecinfo(arg0) + assert argvecinfo.bytesize == vecinfo.bytesize if arg1.is_vector(): - assert arg1.bytesize == op.bytesize + assert argvecinfo.bytesize == vecinfo.bytesize else: assert count.value == 1 - assert index.value < op.count - assert index.value + count.value <= op.count - assert op.count > arg0.count + assert index.value < vecinfo.count + assert index.value + count.value <= vecinfo.count + assert vecinfo.count > argvecinfo.count def expand(state, pack, args, arg, index): """ Expand a value into a vector box. useful for arith metic @@ -618,7 +649,8 @@ args[index] = vecop return vecop left = pack.leftmost() - vecop = OpHelpers.create_vec_expand(arg, left.bytesize, left.signed, pack.numops()) + vecinfo = forwarded_vecinfo(left) + vecop = OpHelpers.create_vec_expand(arg, vecinfo.bytesize, vecinfo.signed, pack.numops()) ops.append(vecop) if variables is not None: variables.append(vecop) @@ -633,15 +665,16 @@ args[index] = vecop return vecop - - vecop = OpHelpers.create_vec(arg.type, arg.bytesize, arg.signed, pack.opnum()) + arg_vecinfo = forwarded_vecinfo(arg) + vecop = OpHelpers.create_vec(arg.type, arg_vecinfo.bytesize, arg_vecinfo.signed, pack.opnum()) ops.append(vecop) for i,node in enumerate(pack.operations): op = node.getoperation() arg = op.getarg(index) arguments = [vecop, arg, ConstInt(i), ConstInt(1)] - vecop = OpHelpers.create_vec_pack(arg.type, arguments, vecop.bytesize, - vecop.signed, vecop.count+1) + vecinfo = forwarded_vecinfo(vecop) + vecop = OpHelpers.create_vec_pack(arg.type, arguments, vecinfo.bytesize, + vecinfo.signed, vecinfo.count+1) ops.append(vecop) state.expand(expandargs, vecop) @@ -793,8 +826,9 @@ if arg in self.accumulation: return arg args = [var, ConstInt(pos), ConstInt(1)] - vecop = OpHelpers.create_vec_unpack(var.type, args, var.bytesize, - var.signed, 1) + vecinfo = forwarded_vecinfo(var) + vecop = OpHelpers.create_vec_unpack(var.type, args, vecinfo.bytesize, + vecinfo.signed, 1) self.renamer.start_renaming(arg, vecop) self.seen[vecop] = None self.costmodel.record_vector_unpack(var, pos, 1) @@ -813,14 +847,16 @@ def setvector_of_box(self, var, off, vector): if var.returns_void(): assert 0, "not allowed to rename void resop" - assert off < vector.count + vecinfo = forwarded_vecinfo(vector) + assert off < vecinfo.count assert not var.is_vector() self.box_to_vbox[var] = (off, vector) def remember_args_in_vector(self, pack, index, box): arguments = [op.getoperation().getarg(index) for op in pack.operations] for i,arg in enumerate(arguments): - if i >= box.count: + vecinfo = forwarded_vecinfo(arg) + if i >= vecinfo.count: break self.setvector_of_box(arg, i, box) @@ -894,7 +930,8 @@ else: assert left.is_guard() and left.getopnum() in \ (rop.GUARD_TRUE, rop.GUARD_FALSE) - bytesize = left.getarg(0).bytesize + vecinfo = forwarded_vecinfo(left.getarg(0)) + bytesize = vecinfo.bytesize return bytesize * self.numops() - vec_reg_size return 0 if self.numops() == 0: @@ -909,7 +946,8 @@ # size is increased #size = left.cast_input_bytesize(vec_reg_size) return left.cast_to_bytesize() * self.numops() - vec_reg_size - return left.bytesize * self.numops() - vec_reg_size + vecinfo = forwarded_vecinfo(left) + return vecinfo.bytesize * self.numops() - vec_reg_size def is_full(self, vec_reg_size): """ If one input element times the opcount is equal @@ -1034,11 +1072,13 @@ def getdatatype(self): accum = self.leftmost().getarg(self.position) - return accum.datatype + vecinfo = forwarded_vecinfo(accum) + return vecinfo.datatype def getbytesize(self): accum = self.leftmost().getarg(self.position) - return accum.bytesize + vecinfo = forwarded_vecinfo(accum) + return vecinfo.bytesize def getleftmostseed(self): return self.leftmost().getarg(self.position) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/shortpreamble.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/shortpreamble.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/shortpreamble.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/shortpreamble.py 2016-03-19 16:40:11.000000000 +0000 @@ -81,7 +81,7 @@ assert index >= 0 cf = optheap.arrayitem_cache(descr, index) opinfo.setitem(self.getfield_op.getdescr(), index, self.res, - pop, cf, optheap=optheap) + pop, optheap, cf) def repr(self, memo): return "HeapOp(%s, %s)" % (self.res.repr(memo), @@ -455,8 +455,7 @@ self.extra_same_as = self.sb.extra_same_as self.target_token = target_token - def setup(self, inputargs, jump_args, short, label_args): - self.inputargs = inputargs + def setup(self, jump_args, short, label_args): self.jump_args = jump_args self.short = short self.label_args = label_args diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_dependency.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_dependency.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_dependency.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_dependency.py 2016-03-19 16:40:11.000000000 +0000 @@ -144,6 +144,7 @@ # arguments [str(arg) for arg in loop.inputargs] loop.graph = FakeDependencyGraph(loop) + loop.setup_vectorization() return loop @@ -520,8 +521,8 @@ def test_getfield(self): graph = self.build_dependency(""" [p0, p1] # 0: 1,2,5 - p2 = getfield_gc_r(p0) # 1: 3,5 - p3 = getfield_gc_r(p0) # 2: 4 + p2 = getfield_gc_r(p0, descr=valuedescr) # 1: 3,5 + p3 = getfield_gc_r(p0, descr=valuedescr) # 2: 4 guard_nonnull(p2) [p2] # 3: 4,5 guard_nonnull(p3) [p3] # 4: 5 jump(p0,p2) # 5: @@ -531,10 +532,10 @@ def test_cyclic(self): graph = self.build_dependency(""" [p0, p1, p5, p6, p7, p9, p11, p12] # 0: 1,6 - p13 = getfield_gc_r(p9) # 1: 2,5 + p13 = getfield_gc_r(p9, descr=valuedescr) # 1: 2,5 guard_nonnull(p13) [] # 2: 4,5 - i14 = getfield_gc_i(p9) # 3: 5 - p15 = getfield_gc_r(p13) # 4: 5 + i14 = getfield_gc_i(p9, descr=valuedescr) # 3: 5 + p15 = getfield_gc_r(p13, descr=valuedescr) # 4: 5 guard_class(p15, 14073732) [p1, p0, p9, i14, p15, p13, p5, p6, p7] # 5: 6 jump(p0,p1,p5,p6,p7,p9,p11,p12) # 6: """) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py 2016-03-19 16:40:12.000000000 +0000 @@ -10,9 +10,13 @@ from rpython.jit.metainterp import executor, compile, resume from rpython.jit.metainterp.resoperation import rop, ResOperation, InputArgInt,\ OpHelpers, InputArgRef +from rpython.jit.metainterp.resumecode import unpack_numbering from rpython.rlib.rarithmetic import LONG_BIT from rpython.jit.tool.oparser import parse +class FakeJitCode(object): + index = 0 + def test_store_final_boxes_in_guard(): from rpython.jit.metainterp.compile import ResumeGuardDescr from rpython.jit.metainterp.resume import tag, TAGBOX @@ -22,20 +26,20 @@ None, None) op = ResOperation(rop.GUARD_TRUE, [ConstInt(1)], None) # setup rd data - fi0 = resume.FrameInfo(None, "code0", 11) + fi0 = resume.FrameInfo(None, FakeJitCode(), 11) snapshot0 = resume.Snapshot(None, [b0]) - op.rd_snapshot = resume.Snapshot(snapshot0, [b1]) - op.rd_frame_info_list = resume.FrameInfo(fi0, "code1", 33) + op.rd_snapshot = resume.TopSnapshot(snapshot0, [], [b1]) + op.rd_frame_info_list = resume.FrameInfo(fi0, FakeJitCode(), 33) # opt.store_final_boxes_in_guard(op, []) fdescr = op.getdescr() - if op.getfailargs() == [b0, b1]: - assert list(fdescr.rd_numb.nums) == [tag(1, TAGBOX)] - assert list(fdescr.rd_numb.prev.nums) == [tag(0, TAGBOX)] - else: - assert op.getfailargs() == [b1, b0] - assert list(fdescr.rd_numb.nums) == [tag(0, TAGBOX)] - assert list(fdescr.rd_numb.prev.nums) == [tag(1, TAGBOX)] + #if op.getfailargs() == [b0, b1]: + # assert list(fdescr.rd_numb.nums) == [tag(1, TAGBOX)] + # assert list(fdescr.rd_numb.prev.nums) == [tag(0, TAGBOX)] + #else: + # assert op.getfailargs() == [b1, b0] + # assert list(fdescr.rd_numb.nums) == [tag(0, TAGBOX)] + # assert list(fdescr.rd_numb.prev.nums) == [tag(1, TAGBOX)] assert fdescr.rd_virtuals is None assert fdescr.rd_consts == [] @@ -692,58 +696,6 @@ # ---------- - def test_virtual_1(self): - ops = """ - [i, p0] - i0 = getfield_gc(p0, descr=valuedescr) - i1 = int_add(i0, i) - setfield_gc(p0, i1, descr=valuedescr) - jump(i, p0) - """ - expected = """ - [i, i2] - i1 = int_add(i2, i) - jump(i, i1) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, Virtual(node_vtable, valuedescr=Not)', - expected) - - def test_virtual_float(self): - ops = """ - [f, p0] - f0 = getfield_gc(p0, descr=floatdescr) - f1 = float_add(f0, f) - setfield_gc(p0, f1, descr=floatdescr) - jump(f, p0) - """ - expected = """ - [f, f2] - f1 = float_add(f2, f) - jump(f, f1) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, Virtual(node_vtable, floatdescr=Not)', - expected) - - def test_virtual_2(self): - py.test.skip("XXX") - ops = """ - [i, p0] - i0 = getfield_gc(p0, descr=valuedescr) - i1 = int_add(i0, i) - p1 = new_with_vtable(ConstClass(node_vtable)) - setfield_gc(p1, i1, descr=valuedescr) - jump(i, p1) - """ - expected = """ - [i, i2] - i1 = int_add(i2, i) - jump(i, i1) - """ - self.optimize_loop(ops, 'Not, Virtual(node_vtable, valuedescr=Not)', - expected) - def test_virtual_oois(self): ops = """ [p0, p1, p2] @@ -770,20 +722,6 @@ guard_false(i12) [] jump(p0, p1, p2) """ - expected = """ - [p2] - # all constant-folded :-) - jump(p2) - """ - py.test.skip("XXX") - self.optimize_loop(ops, '''Virtual(node_vtable), - Virtual(node_vtable), - Not''', - expected) - # - # to be complete, we also check the no-opt case where most comparisons - # are not removed. The exact set of comparisons removed depends on - # the details of the algorithm... expected2 = """ [p0, p1, p2] guard_nonnull(p0) [] @@ -797,26 +735,6 @@ """ self.optimize_loop(ops, expected2) - def test_virtual_default_field(self): - py.test.skip("XXX") - ops = """ - [p0] - i0 = getfield_gc(p0, descr=valuedescr) - guard_value(i0, 0) [] - p1 = new_with_vtable(ConstClass(node_vtable)) - # the field 'value' has its default value of 0 - jump(p1) - """ - expected = """ - [i] - guard_value(i, 0) [] - jump(0) - """ - # the 'expected' is sub-optimal, but it should be done by another later - # optimization step. See test_find_nodes_default_field() for why. - self.optimize_loop(ops, 'Virtual(node_vtable, valuedescr=Not)', - expected) - def test_virtual_3(self): ops = """ [i] @@ -833,55 +751,6 @@ """ self.optimize_loop(ops, expected) - def test_virtual_4(self): - py.test.skip("XXX") - ops = """ - [i0, p0] - guard_class(p0, ConstClass(node_vtable)) [] - i1 = getfield_gc(p0, descr=valuedescr) - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i2, descr=valuedescr) - jump(i3, p1) - """ - expected = """ - [i0, i1] - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - jump(i3, i2) - """ - self.optimize_loop(ops, 'Not, Virtual(node_vtable, valuedescr=Not)', - expected) - - def test_virtual_5(self): - py.test.skip("XXX") - ops = """ - [i0, p0] - guard_class(p0, ConstClass(node_vtable)) [] - i1 = getfield_gc(p0, descr=valuedescr) - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - p2 = new_with_vtable(descr=nodesize2) - setfield_gc(p2, i1, descr=valuedescr) - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i2, descr=valuedescr) - setfield_gc(p1, p2, descr=nextdescr) - jump(i3, p1) - """ - expected = """ - [i0, i1, i1bis] - i2 = int_sub(i1, 1) - i3 = int_add(i0, i1) - jump(i3, i2, i1) - """ - self.optimize_loop(ops, - '''Not, Virtual(node_vtable, - valuedescr=Not, - nextdescr=Virtual(node_vtable2, - valuedescr=Not))''', - expected) - def test_virtual_constant_isnull(self): ops = """ [i0] @@ -1086,12 +955,12 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_1(self): + def test_getfield_gc_1(self): ops = """ [i] - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i, descr=valuedescr) - i1 = getfield_gc_pure_i(p1, descr=valuedescr) + p1 = new_with_vtable(descr=nodesize3) + setfield_gc(p1, i, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) jump(i1) """ expected = """ @@ -1100,17 +969,16 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_2(self): + def test_getfield_gc_2(self): ops = """ [i] - i1 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i1 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) jump(i1) """ expected = """ [i] - jump(5) + jump(7) """ - self.node.value = 5 self.optimize_loop(ops, expected) def test_getfield_gc_nonpure_2(self): @@ -1205,27 +1073,6 @@ """ self.optimize_loop(ops, expected) - def test_varray_2(self): - ops = """ - [i0, p1] - i1 = getarrayitem_gc(p1, 0, descr=arraydescr) - i2 = getarrayitem_gc(p1, 1, descr=arraydescr) - i3 = int_sub(i1, i2) - guard_value(i3, 15) [] - p2 = new_array(2, descr=arraydescr) - setarrayitem_gc(p2, 1, i0, descr=arraydescr) - setarrayitem_gc(p2, 0, 20, descr=arraydescr) - jump(i0, p2) - """ - expected = """ - [i0, i1, i2] - i3 = int_sub(i1, i2) - guard_value(i3, 15) [] - jump(i0, 20, i0) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, VArray(arraydescr, Not, Not)', expected) - def test_p123_array(self): ops = """ [i1, p2, p3] @@ -1260,23 +1107,6 @@ """ self.optimize_loop(ops, expected) - def test_vstruct_1(self): - py.test.skip("XXX") - ops = """ - [i1, p2] - i2 = getfield_gc(p2, descr=adescr) - escape_n(i2) - p3 = new(descr=ssize) - setfield_gc(p3, i1, descr=adescr) - jump(i1, p3) - """ - expected = """ - [i1, i2] - escape_n(i2) - jump(i1, i1) - """ - self.optimize_loop(ops, 'Not, VStruct(ssize, adescr=Not)', expected) - def test_p123_vstruct(self): ops = """ [i1, p2, p3] @@ -1439,26 +1269,6 @@ """ self.optimize_loop(ops, expected) - def test_duplicate_getfield_guard_value_const(self): - ops = """ - [p1] - guard_value(p1, ConstPtr(myptr)) [] - i1 = getfield_gc_i(p1, descr=valuedescr) - i2 = getfield_gc_i(ConstPtr(myptr), descr=valuedescr) - escape_n(i1) - escape_n(i2) - jump(p1) - """ - expected = """ - [] - i1 = getfield_gc_i(ConstPtr(myptr), descr=valuedescr) - escape_n(i1) - escape_n(i1) - jump() - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Constant(myptr)', expected) - def test_duplicate_getfield_sideeffects_1(self): ops = """ [p1] @@ -1532,7 +1342,7 @@ setfield_gc(p1, i1, descr=valuedescr) # # some operations on which the above setfield_gc cannot have effect - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) setarrayitem_gc(p3, 0, i5, descr=arraydescr) @@ -1544,7 +1354,7 @@ expected = """ [p1, i1, i2, p3] # - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -1684,12 +1494,12 @@ jump(p1, i1, i2) """ expected = """ - [i1, i2] + [p1, i1, i2] + guard_value(p1, ConstPtr(myptr)) [] setfield_gc(ConstPtr(myptr), i2, descr=valuedescr) - jump(i1, i2) + jump(ConstPtr(myptr), i1, i2) """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Constant(myptr), Not, Not', expected) + self.optimize_loop(ops, expected) def test_duplicate_getarrayitem_1(self): ops = """ @@ -1786,7 +1596,7 @@ ops = """ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) - i4 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i4 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) p5 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) escape_n(i4) @@ -1797,7 +1607,7 @@ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) - escape_n(5) + escape_n(7) escape_n(p3) jump(p1, p2) """ @@ -1866,163 +1676,7 @@ """ self.optimize_loop(ops, expected) - def test_bug_1(self): - ops = """ - [i0, p1] - p4 = getfield_gc_r(p1, descr=nextdescr) - guard_nonnull(p4) [] - escape_n(p4) - # - p2 = new_with_vtable(descr=nodesize) - p3 = escape_r() - setfield_gc(p2, p3, descr=nextdescr) - jump(i0, p2) - """ - expected = """ - [i0, p4] - guard_nonnull(p4) [] - escape_n(p4) - # - p3 = escape_r() - jump(i0, p3) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, Virtual(node_vtable, nextdescr=Not)', - expected) - - def test_bug_2(self): - ops = """ - [i0, p1] - p4 = getarrayitem_gc(p1, 0, descr=arraydescr2) - guard_nonnull(p4) [] - escape_n(p4) - # - p2 = new_array(1, descr=arraydescr2) - p3 = escape_r() - setarrayitem_gc(p2, 0, p3, descr=arraydescr2) - jump(i0, p2) - """ - expected = """ - [i0, p4] - guard_nonnull(p4) [] - escape_n(p4) - # - p3 = escape_r() - jump(i0, p3) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Not, VArray(arraydescr2, Not)', - expected) - - def test_bug_3(self): - ops = """ - [p1] - guard_nonnull(p1) [] - guard_class(p1, ConstClass(node_vtable2)) [] - p2 = getfield_gc_r(p1, descr=nextdescr) - guard_nonnull(12) [] - guard_class(p2, ConstClass(node_vtable)) [] - p3 = getfield_gc_r(p1, descr=otherdescr) - guard_nonnull(12) [] - guard_class(p3, ConstClass(node_vtable)) [] - setfield_gc(p3, p2, descr=otherdescr) - p1a = new_with_vtable(ConstClass(node_vtable2)) - p2a = new_with_vtable(descr=nodesize) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - setfield_gc(p1a, p2a, descr=nextdescr) - setfield_gc(p1a, p3a, descr=otherdescr) - jump(p1a) - """ - expected = """ - [p2, p3] - guard_class(p2, ConstClass(node_vtable)) [] - guard_class(p3, ConstClass(node_vtable)) [] - setfield_gc(p3, p2, descr=otherdescr) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - p2a = new_with_vtable(descr=nodesize) - jump(p2a, p3a) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Virtual(node_vtable2, nextdescr=Not, otherdescr=Not)', expected) - - def test_bug_3bis(self): - ops = """ - [p1] - guard_nonnull(p1) [] - guard_class(p1, ConstClass(node_vtable2)) [] - p2 = getfield_gc_r(p1, descr=nextdescr) - guard_nonnull(12) [] - guard_class(p2, ConstClass(node_vtable)) [] - p3 = getfield_gc_r(p1, descr=otherdescr) - guard_nonnull(12) [] - guard_class(p3, ConstClass(node_vtable)) [] - p1a = new_with_vtable(ConstClass(node_vtable2)) - p2a = new_with_vtable(descr=nodesize) - setfield_gc(p3, p2a, descr=otherdescr) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - setfield_gc(p1a, p2a, descr=nextdescr) - setfield_gc(p1a, p3a, descr=otherdescr) - jump(p1a) - """ - expected = """ - [p2, p3] - guard_class(p2, ConstClass(node_vtable)) [] - guard_class(p3, ConstClass(node_vtable)) [] - p2a = new_with_vtable(descr=nodesize) - setfield_gc(p3, p2a, descr=otherdescr) - p3a = new_with_vtable(descr=nodesize) - escape_n(p3a) - jump(p2a, p3a) - """ - py.test.skip("XXX") - self.optimize_loop(ops, 'Virtual(node_vtable2, nextdescr=Not, otherdescr=Not)', expected) - - def test_invalid_loop_1(self): - ops = """ - [p1] - guard_isnull(p1) [] - # - p2 = new_with_vtable(descr=nodesize) - jump(p2) - """ - py.test.skip("XXX") - py.test.raises(InvalidLoop, self.optimize_loop, - ops, 'Virtual(node_vtable)', None) - - def test_invalid_loop_2(self): - py.test.skip("this would fail if we had Fixed again in the specnodes") - ops = """ - [p1] - guard_class(p1, ConstClass(node_vtable2)) [] - # - p2 = new_with_vtable(descr=nodesize) - escape_n(p2) # prevent it from staying Virtual - jump(p2) - """ - py.test.raises(InvalidLoop, self.optimize_loop, - ops, '...', None) - - def test_invalid_loop_3(self): - ops = """ - [p1] - p2 = getfield_gc_r(p1, descr=nextdescr) - guard_isnull(p2) [] - # - p3 = new_with_vtable(descr=nodesize) - p4 = new_with_vtable(descr=nodesize) - setfield_gc(p3, p4, descr=nextdescr) - jump(p3) - """ - py.test.skip("XXX") - py.test.raises(InvalidLoop, self.optimize_loop, ops, - 'Virtual(node_vtable, nextdescr=Virtual(node_vtable))', - None) - def test_merge_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_class(p1, ConstClass(node_vtable)) [i0] @@ -2056,7 +1710,6 @@ self.check_expanded_fail_descr("i0", rop.GUARD_NONNULL_CLASS) def test_merge_guard_nonnull_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -2074,7 +1727,6 @@ self.check_expanded_fail_descr("i0", rop.GUARD_VALUE) def test_merge_guard_nonnull_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -2621,26 +2273,6 @@ where p2 is a node_vtable, valuedescr=i2, nextdescr=p1 ''', rop.GUARD_TRUE) - def test_expand_fail_6(self): - ops = """ - [p0, i0, i1] - guard_true(i0) [p0] - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i1, descr=valuedescr) - jump(p1, i1, i1) - """ - expected = """ - [i1b, i0, i1] - guard_true(i0) [i1b] - jump(i1, i1, i1) - """ - py.test.skip("XXX") - self.optimize_loop(ops, '''Virtual(node_vtable, valuedescr=Not), - Not, Not''', expected) - self.check_expanded_fail_descr('''p0 - where p0 is a node_vtable, valuedescr=i1b - ''', rop.GUARD_TRUE) - def test_expand_fail_varray(self): ops = """ [i1] @@ -2682,47 +2314,6 @@ where p2 is a vstruct ssize, adescr=i1, bdescr=p1 ''', rop.GUARD_TRUE) - def test_expand_fail_v_all_1(self): - ops = """ - [i1, p1a, i2] - p6s = getarrayitem_gc(p1a, 0, descr=arraydescr2) - p7v = getfield_gc_r(p6s, descr=bdescr) - p5s = new(descr=ssize) - setfield_gc(p5s, i2, descr=adescr) - setfield_gc(p5s, p7v, descr=bdescr) - setarrayitem_gc(p1a, 1, p5s, descr=arraydescr2) - guard_true(i1) [p1a] - p2s = new(descr=ssize) - p3v = new_with_vtable(descr=nodesize) - p4a = new_array(2, descr=arraydescr2) - setfield_gc(p2s, i1, descr=adescr) - setfield_gc(p2s, p3v, descr=bdescr) - setfield_gc(p3v, i2, descr=valuedescr) - setarrayitem_gc(p4a, 0, p2s, descr=arraydescr2) - jump(i1, p4a, i2) - """ - expected = """ - [i1, ia, iv, pnull, i2] - guard_true(i1) [ia, iv, i2] - jump(1, 1, i2, NULL, i2) - """ - py.test.skip("XXX") - self.optimize_loop(ops, ''' - Not, - VArray(arraydescr2, - VStruct(ssize, - adescr=Not, - bdescr=Virtual(node_vtable, - valuedescr=Not)), - Not), - Not''', expected) - self.check_expanded_fail_descr('''p1a - where p1a is a varray arraydescr2: p6s, p5s - where p6s is a vstruct ssize, adescr=ia, bdescr=p7v - where p5s is a vstruct ssize, adescr=i2, bdescr=p7v - where p7v is a node_vtable, valuedescr=iv - ''', rop.GUARD_TRUE) - def test_expand_fail_lazy_setfield_1(self): ops = """ [p1, i2, i3] @@ -5175,6 +4766,8 @@ """ self.optimize_loop(ops, expected) + def test_intmod_bounds_harder(self): + py.test.skip("harder") # Of course any 'maybe-negative % power-of-two' can be turned into # int_and(), but that's a bit harder to detect here because it turns # into several operations, and of course it is wrong to just turn @@ -5192,7 +4785,6 @@ i4 = int_and(i0, 15) finish(i4) """ - py.test.skip("harder") self.optimize_loop(ops, expected) def test_intmod_bounds_bug1(self): @@ -5353,7 +4945,7 @@ i5 = int_lt(i2, i1) guard_true(i5) [] - i6 = getarrayitem_gc(p0, i2) + i6 = getarrayitem_gc_i(p0, i2, descr=chararraydescr) finish(i6) """ expected = """ @@ -5364,7 +4956,7 @@ i4 = int_lt(i2, i0) guard_true(i4) [] - i6 = getarrayitem_gc(p0, i3) + i6 = getarrayitem_gc_i(p0, i3, descr=chararraydescr) finish(i6) """ self.optimize_loop(ops, expected) @@ -5483,7 +5075,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i0 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i0 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) i1 = call_pure_i(123, i0, descr=nonwritedescr) finish(i1) """ @@ -5869,15 +5461,15 @@ def test_getarrayitem_gc_pure_not_invalidated(self): ops = """ [p0] - i1 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i1 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(p0) - i2 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i2 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(i2) jump(p0) """ expected = """ [p0] - i1 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i1 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(p0) escape_n(i1) jump(p0) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,5 +1,6 @@ import py, sys from rpython.rlib.objectmodel import instantiate +from rpython.rtyper.lltypesystem import lltype from rpython.jit.metainterp import compile, resume from rpython.jit.metainterp.history import AbstractDescr, ConstInt, TreeLoop from rpython.jit.metainterp.optimize import InvalidLoop @@ -1408,12 +1409,12 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_1(self): + def test_pure_getfield_gc_1(self): ops = """ [i] p1 = new_with_vtable(descr=nodesize) setfield_gc(p1, i, descr=valuedescr) - i1 = getfield_gc_pure_i(p1, descr=valuedescr) + i1 = getfield_gc_i(p1, descr=valuedescr) jump(i1) """ expected = """ @@ -1422,10 +1423,10 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_2(self): + def test_pure_getfield_gc_2(self): ops = """ [i] - i1 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i1 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) jump(i1) """ expected = """ @@ -1435,20 +1436,20 @@ self.node.value = 5 self.optimize_loop(ops, expected) - def test_getfield_gc_pure_3(self): + def test_pure_getfield_gc_3(self): ops = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p2) - p3 = getfield_gc_pure_r(p1, descr=nextdescr) + p3 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p3) jump() """ expected = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p2) escape_n(p2) jump() @@ -1940,7 +1941,6 @@ self.optimize_loop(ops, expected) def test_virtual_raw_malloc_virtualstate(self): - py.test.skip("bogus test maybe?") ops = """ [i0] i1 = getarrayitem_raw_i(i0, 0, descr=rawarraydescr) @@ -2319,7 +2319,7 @@ setfield_gc(p1, i1, descr=valuedescr) # # some operations on which the above setfield_gc cannot have effect - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) setarrayitem_gc(p3, 0, i5, descr=arraydescr) @@ -2332,7 +2332,7 @@ preamble = """ [p1, i1, i2, p3] # - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -2340,11 +2340,12 @@ setfield_gc(p1, i4, descr=nextdescr) setarrayitem_gc(p3, 0, i5, descr=arraydescr) escape_n() - jump(p1, i1, i2, p3, i3) + jump(p1, i1, i2, p3) """ expected = """ - [p1, i1, i2, p3, i3] + [p1, i1, i2, p3] # + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -2352,8 +2353,7 @@ setfield_gc(p1, i4, descr=nextdescr) setarrayitem_gc(p3, 0, i5, descr=arraydescr) escape_n() - ifoo = arraylen_gc(p3, descr=arraydescr) # killed by the backend - jump(p1, i1, i2, p3, i3) + jump(p1, i1, i2, p3) """ self.optimize_loop(ops, expected, preamble) @@ -2669,7 +2669,7 @@ ops = """ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) - i4 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i4 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) p5 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) escape_n(i4) @@ -2680,7 +2680,7 @@ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) - escape_n(5) + escape_n(7) escape_n(p3) jump(p1, p2) """ @@ -2969,7 +2969,6 @@ assert "promote of a virtual" in exc.msg def test_merge_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_class(p1, ConstClass(node_vtable)) [i0] @@ -3015,7 +3014,6 @@ #self.check_expanded_fail_descr("i0", rop.GUARD_NONNULL_CLASS) def test_merge_guard_nonnull_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -3039,7 +3037,6 @@ #self.check_expanded_fail_descr("i0", rop.GUARD_VALUE) def test_merge_guard_nonnull_guard_class_guard_value(self): - py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -3066,6 +3063,16 @@ self.optimize_loop(ops, expected, preamble) #self.check_expanded_fail_descr("i0", rop.GUARD_VALUE) + def test_invalid_guard_value_after_guard_class(self): + ops = """ + [p1, i0, i1, i2, p2] + guard_class(p1, ConstClass(node_vtable)) [i0] + i3 = int_add(i1, i2) + guard_value(p1, NULL) [i1] + jump(p2, i0, i1, i3, p2) + """ + self.raises(InvalidLoop, self.optimize_loop, ops, ops) + def test_guard_class_oois(self): ops = """ [p1] @@ -3305,8 +3312,8 @@ [p8, p11, i24] p26 = new(descr=ssize) setfield_gc(p26, i24, descr=adescr) - i34 = getfield_gc_pure_i(p11, descr=abisdescr) - i35 = getfield_gc_pure_i(p26, descr=adescr) + i34 = getfield_gc_i(p11, descr=abisdescr) + i35 = getfield_gc_i(p26, descr=adescr) i36 = int_add_ovf(i34, i35) guard_no_overflow() [] jump(p8, p11, i35) @@ -3333,8 +3340,8 @@ setfield_gc(p26, i24, descr=adescr) i28 = int_add(i17, 1) setfield_gc(p8, i28, descr=valuedescr) - i34 = getfield_gc_pure_i(p11, descr=valuedescr3) - i35 = getfield_gc_pure_i(p26, descr=adescr) + i34 = getfield_gc_i(p11, descr=valuedescr3) + i35 = getfield_gc_i(p26, descr=adescr) guard_nonnull(p12) [] i36 = int_add_ovf(i34, i35) guard_no_overflow() [] @@ -3525,14 +3532,14 @@ def test_residual_call_does_not_invalidate_immutable_caches(self): ops = """ [p1] - i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) i2 = call_i(i1, descr=writevalue3descr) - i3 = getfield_gc_pure_i(p1, descr=valuedescr3) + i3 = getfield_gc_i(p1, descr=valuedescr3) jump(p1) """ expected_preamble = """ [p1] - i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) i2 = call_i(i1, descr=writevalue3descr) jump(p1, i1) """ @@ -4881,11 +4888,11 @@ def test_add_sub_ovf_virtual_unroll(self): ops = """ [p15] - i886 = getfield_gc_pure_i(p15, descr=valuedescr) + i886 = getfield_gc_i(p15, descr=valuedescr) i888 = int_sub_ovf(i886, 1) guard_no_overflow() [] escape_n(i888) - i4360 = getfield_gc_pure_i(p15, descr=valuedescr) + i4360 = getfield_gc_i(p15, descr=valuedescr) i4362 = int_add_ovf(i4360, 1) guard_no_overflow() [] i4360p = int_sub_ovf(i4362, 1) @@ -4975,18 +4982,16 @@ def test_pure(self): ops = """ [p42] - p53 = getfield_gc_r(ConstPtr(myptr), descr=nextdescr) - p59 = getfield_gc_pure_r(p53, descr=valuedescr) + p53 = getfield_gc_r(ConstPtr(myptr3), descr=nextdescr3) + p59 = getfield_gc_r(p53, descr=valuedescr3) i61 = call_i(1, p59, descr=nonwritedescr) jump(p42) """ expected = """ - [p42, p59] - i61 = call_i(1, p59, descr=nonwritedescr) - jump(p42, p59) - + [p42] + i61 = call_i(1, 7, descr=nonwritedescr) + jump(p42) """ - self.node.value = 5 self.optimize_loop(ops, expected) def test_complains_getfieldpure_setfield(self): @@ -4995,7 +5000,7 @@ ops = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr) setfield_gc(p1, p3, descr=nextdescr) jump(p3) """ @@ -5005,7 +5010,7 @@ ops = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) setfield_gc(p1, p3, descr=otherdescr) escape_n(p2) jump(p3) @@ -5013,7 +5018,7 @@ expected = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr3) setfield_gc(p1, p3, descr=otherdescr) escape_n(p2) jump(p3) @@ -5024,7 +5029,7 @@ ops = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr) p3 = escape_r() setfield_gc(p3, p1, descr=nextdescr) jump() @@ -6170,14 +6175,14 @@ def test_bug_unroll_with_immutables(self): ops = """ [p0] - i2 = getfield_gc_pure_i(p0, descr=immut_intval) + i2 = getfield_gc_i(p0, descr=immut_intval) p1 = new_with_vtable(descr=immut_descr) setfield_gc(p1, 1242, descr=immut_intval) jump(p1) """ preamble = """ [p0] - i2 = getfield_gc_pure_i(p0, descr=immut_intval) + i2 = getfield_gc_i(p0, descr=immut_intval) jump() """ expected = """ @@ -6295,6 +6300,26 @@ """ self.optimize_strunicode_loop(ops, ops, ops) + def test_str_slice_bug(self): + ops = """ + [] + p1066 = newstr(8) + escape_n(p1066) # should initialize the string's content + p1134 = call_pure_r(0, p1066, 0, 4, descr=strslicedescr) + escape_n(p1134) + jump() + """ + expected = """ + [] + p1 = newstr(8) + escape_n(p1) + p2 = newstr(4) + copystrcontent(p1, p2, 0, 0, 4) + escape_n(p2) + jump() + """ + self.optimize_strunicode_loop(ops, expected, expected) + # XXX Should some of the call's below now be call_pure? def test_str_concat_1(self): @@ -6434,6 +6459,59 @@ """ self.optimize_strunicode_loop(ops, expected, expected) + def test_str_concat_optimize_fully_initialized(self): + ops = """ + [i0, i1] + p1 = newstr(2) + strsetitem(p1, 0, i0) + strsetitem(p1, 1, i1) + escape_n(p1) + p3 = call_pure_r(0, p1, p1, descr=strconcatdescr) + escape_n(p3) + jump(i0, i1) + """ + expected = """ + [i0, i1] + p1 = newstr(2) + strsetitem(p1, 0, i0) + strsetitem(p1, 1, i1) + escape_n(p1) + p3 = newstr(4) + strsetitem(p3, 0, i0) + strsetitem(p3, 1, i1) + strsetitem(p3, 2, i0) + strsetitem(p3, 3, i1) + escape_n(p3) + jump(i0, i1) + """ + self.optimize_strunicode_loop(ops, expected, expected) + + def test_str_concat_cant_optimize_partialy_uninitialized(self): + ops = """ + [i0] + p1 = newstr(2) + strsetitem(p1, 0, i0) # p1[1] is set by the escape below + escape_n(p1) + p3 = call_pure_r(0, p1, p1, descr=strconcatdescr) + escape_n(p3) + jump(i0) + """ + expected = """ + [i0] + p1 = newstr(2) + strsetitem(p1, 0, i0) + escape_n(p1) + p3 = newstr(4) + strsetitem(p3, 0, i0) + i1 = strgetitem(p1, 1) + strsetitem(p3, 1, i1) + strsetitem(p3, 2, i0) + strsetitem(p3, 3, i1) + escape_n(p3) + jump(i0) + """ + self.optimize_strunicode_loop(ops, expected, expected) + def test_str_slice_len_surviving1(self): ops = """ [p1, i1, i2, i3] @@ -7159,13 +7237,13 @@ [p0, p1, i0] quasiimmut_field(p0, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(p0, descr=quasifielddescr) + i1 = getfield_gc_i(p0, descr=quasifielddescr) escape_n(i1) jump(p1, p0, i1) """ expected = """ [p0, p1, i0] - i1 = getfield_gc_pure_i(p0, descr=quasifielddescr) + i1 = getfield_gc_i(p0, descr=quasifielddescr) escape_n(i1) jump(p1, p0, i1) """ @@ -7176,7 +7254,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) escape_n(i1) jump() """ @@ -7228,11 +7306,11 @@ [i0a, i0b] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) call_may_force_n(i0b, descr=mayforcevirtdescr) quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i2 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) i3 = escape_i(i1) i4 = escape_i(i2) jump(i3, i4) @@ -7255,11 +7333,11 @@ setfield_gc(p, 421, descr=quasifielddescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(p, descr=quasifielddescr) + i1 = getfield_gc_i(p, descr=quasifielddescr) call_may_force_n(i0b, descr=mayforcevirtdescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc_pure_i(p, descr=quasifielddescr) + i2 = getfield_gc_i(p, descr=quasifielddescr) i3 = escape_i(i1) i4 = escape_i(i2) jump(i3, i4) @@ -7498,7 +7576,7 @@ def test_forced_virtual_pure_getfield(self): ops = """ [p0] - p1 = getfield_gc_pure_r(p0, descr=valuedescr) + p1 = getfield_gc_r(p0, descr=valuedescr3) jump(p1) """ self.optimize_loop(ops, ops) @@ -7508,7 +7586,7 @@ p1 = new_with_vtable(descr=nodesize3) setfield_gc(p1, p0, descr=valuedescr3) escape_n(p1) - p2 = getfield_gc_pure_r(p1, descr=valuedescr3) + p2 = getfield_gc_r(p1, descr=valuedescr3) escape_n(p2) jump(p0) """ @@ -7782,14 +7860,14 @@ def test_loopinvariant_getarrayitem_gc_pure(self): ops = """ [p9, i1] - i843 = getarrayitem_gc_pure_i(p9, i1, descr=arraydescr) + i843 = getarrayitem_gc_pure_i(p9, i1, descr=arrayimmutdescr) call_n(i843, descr=nonwritedescr) jump(p9, i1) """ expected = """ [p9, i1, i843] call_n(i843, descr=nonwritedescr) - ifoo = arraylen_gc(p9, descr=arraydescr) + ifoo = arraylen_gc(p9, descr=arrayimmutdescr) jump(p9, i1, i843) """ self.optimize_loop(ops, expected) @@ -7798,7 +7876,7 @@ ops = """ [p0] p1 = getfield_gc_r(p0, descr=nextdescr) - p2 = getarrayitem_gc_pure_r(p1, 7, descr=gcarraydescr) + p2 = getarrayitem_gc_r(p1, 7, descr=gcarraydescr) call_n(p2, descr=nonwritedescr) jump(p0) """ @@ -7813,14 +7891,14 @@ i1 = arraylen_gc(p1, descr=gcarraydescr) i2 = int_ge(i1, 8) guard_true(i2) [] - p2 = getarrayitem_gc_pure_r(p1, 7, descr=gcarraydescr) - jump(p2, p1) + p2 = getarrayitem_gc_r(p1, 7, descr=gcarraydescr) + jump(p1, p2) """ expected = """ - [p0, p2, p1] + [p0, p1, p2] call_n(p2, descr=nonwritedescr) i3 = arraylen_gc(p1, descr=gcarraydescr) # Should be killed by backend - jump(p0, p2, p1) + jump(p0, p1, p2) """ self.optimize_loop(ops, expected, expected_short=short) @@ -7995,7 +8073,7 @@ def test_dont_mixup_equal_boxes(self): ops = """ [p8] - i9 = getfield_gc_pure_i(p8, descr=valuedescr) + i9 = getfield_gc_i(p8, descr=valuedescr3) i10 = int_gt(i9, 0) guard_true(i10) [] i29 = int_lshift(i9, 1) @@ -8090,9 +8168,9 @@ py.test.skip("would be fixed by make heap optimizer aware of virtual setfields") ops = """ [p5, p8] - i9 = getfield_gc_pure_i(p5, descr=valuedescr) + i9 = getfield_gc_i(p5, descr=valuedescr) call_n(i9, descr=nonwritedescr) - i11 = getfield_gc_pure_i(p8, descr=valuedescr) + i11 = getfield_gc_i(p8, descr=valuedescr) i13 = int_add_ovf(i11, 1) guard_no_overflow() [] p22 = new_with_vtable(descr=nodesize) @@ -8131,14 +8209,14 @@ ops = """ [p0] p10 = getfield_gc_r(ConstPtr(myptr), descr=otherdescr) - guard_value(p10, ConstPtr(myptr2)) [] + guard_value(p10, ConstPtr(myptrb)) [] call_n(p10, descr=nonwritedescr) - setfield_gc(ConstPtr(myptr), ConstPtr(myptr2), descr=otherdescr) + setfield_gc(ConstPtr(myptr), ConstPtr(myptrb), descr=otherdescr) jump(p0) """ expected = """ [p0] - call_n(ConstPtr(myptr2), descr=nonwritedescr) + call_n(ConstPtr(myptrb), descr=nonwritedescr) jump(p0) """ self.optimize_loop(ops, expected) @@ -8162,14 +8240,14 @@ ops = """ [p0] p10 = getfield_gc_r(p0, descr=otherdescr) - guard_value(p10, ConstPtr(myptr2)) [] + guard_value(p10, ConstPtr(myptrb)) [] call_n(p10, descr=nonwritedescr) - setfield_gc(p0, ConstPtr(myptr2), descr=otherdescr) + setfield_gc(p0, ConstPtr(myptrb), descr=otherdescr) jump(p0) """ expected = """ [p0] - call_n(ConstPtr(myptr2), descr=nonwritedescr) + call_n(ConstPtr(myptrb), descr=nonwritedescr) jump(p0) """ self.optimize_loop(ops, expected) @@ -8554,17 +8632,17 @@ [p10] p52 = getfield_gc_r(p10, descr=nextdescr) # inst_storage p54 = getarrayitem_gc_r(p52, 0, descr=arraydescr) - p69 = getfield_gc_pure_r(p54, descr=otherdescr) # inst_w_function + p69 = getfield_gc_r(p54, descr=otherdescr) # inst_w_function quasiimmut_field(p69, descr=quasiimmutdescr) guard_not_invalidated() [] - p71 = getfield_gc_pure_r(p69, descr=quasifielddescr) # inst_code + p71 = getfield_gc_r(p69, descr=quasifielddescr) # inst_code guard_value(p71, -4247) [] p106 = new_with_vtable(descr=nodesize) p108 = new_array(3, descr=arraydescr) p110 = new_with_vtable(descr=nodesize) - setfield_gc(p110, ConstPtr(myptr2), descr=otherdescr) # inst_w_function + setfield_gc(p110, ConstPtr(myptrb), descr=otherdescr) # inst_w_function setarrayitem_gc(p108, 0, p110, descr=arraydescr) setfield_gc(p106, p108, descr=nextdescr) # inst_storage jump(p106) @@ -8580,7 +8658,7 @@ [p69] quasiimmut_field(p69, descr=quasiimmutdescr) guard_not_invalidated() [] - p71 = getfield_gc_pure_r(p69, descr=quasifielddescr) # inst_code + p71 = getfield_gc_r(p69, descr=quasifielddescr) # inst_code guard_value(p71, -4247) [] jump(ConstPtr(myptr)) """ @@ -8782,13 +8860,13 @@ def test_virtual_back_and_forth(self): ops = """ [p0] - p1 = getfield_gc_pure_r(p0, descr=bdescr) + p1 = getfield_gc_r(p0, descr=nextdescr3) ptemp = new_with_vtable(descr=nodesize) setfield_gc(ptemp, p1, descr=nextdescr) p2 = getfield_gc_r(ptemp, descr=nextdescr) - ix = getarrayitem_gc_pure_i(p2, 0, descr=arraydescr) + ix = getarrayitem_gc_pure_i(p2, 0, descr=arrayimmutdescr) pfoo = getfield_gc_r(ptemp, descr=nextdescr) - guard_value(pfoo, ConstPtr(myarray)) [] + guard_value(pfoo, ConstPtr(immutarray)) [] ifoo = int_add(ix, 13) escape_n(ix) jump(p0) @@ -8818,13 +8896,13 @@ def test_constant_float_pure(self): ops = """ [p0] - f0 = getarrayitem_gc_pure_f(p0, 3, descr=floatarraydescr) + f0 = getarrayitem_gc_pure_f(p0, 3, descr=floatarrayimmutdescr) guard_value(f0, 1.03) [] jump(p0) """ expected = """ [p0] - ifoo = arraylen_gc(p0, descr=floatarraydescr) + ifoo = arraylen_gc(p0, descr=floatarrayimmutdescr) jump(p0) """ self.optimize_loop(ops, expected) @@ -9032,7 +9110,7 @@ [p0, i1] i2 = int_gt(i1, 0) guard_true(i2) [] - getfield_gc_pure_i(p0, descr=valuedescr) + getfield_gc_i(p0, descr=valuedescr3) i3 = int_sub(i1, 1) jump(NULL, i3) """ @@ -9043,9 +9121,9 @@ [p0, i1] i2 = int_gt(i1, 0) guard_true(i2) [] - getfield_gc_pure_i(p0, descr=valuedescr) + getfield_gc_i(p0, descr=valuedescr3) i3 = int_sub(i1, 1) - jump(ConstPtr(myptr4), i3) + jump(ConstPtr(myptr2), i3) """ py.test.raises(InvalidLoop, self.optimize_loop, ops, ops) @@ -9159,6 +9237,162 @@ """ py.test.raises(InvalidLoop, self.optimize_loop, ops, ops) + def test_virtual_array_length_discovered_constant_1(self): + ops = """ + [] + i1 = escape_i() + guard_value(i1, 5) [] + p1 = new_array_clear(i1, descr=arraydescr3) + escape_n(p1) + jump() + """ + expected = """ + [] + i1 = escape_i() + guard_value(i1, 5) [] + p1 = new_array_clear(5, descr=arraydescr3) # 'i1' => '5' + escape_n(p1) + jump() + """ + self.optimize_loop(ops, expected) + + def test_virtual_array_length_discovered_constant_2(self): + ops = """ + [p0] + escape_n(p0) + i1 = escape_i() + guard_value(i1, 5) [] + p1 = new_array_clear(i1, descr=arraydescr3) + jump(p1) + """ + expected = """ + [] + p1 = new_array_clear(5, descr=arraydescr3) + escape_n(p1) + i1 = escape_i() + guard_value(i1, 5) [] + jump() + """ + a = lltype.malloc(lltype.GcArray(lltype.Ptr(self.NODE3)), 5, zero=True) + self.optimize_loop(ops, expected, jump_values=[a]) + + def test_large_number_of_immutable_references(self): + ops = """ + [p0] + i0 = getfield_gc_i(p0, descr=bigadescr) + i1 = getfield_gc_i(p0, descr=bigbdescr) + i2 = getfield_gc_i(p0, descr=bigcdescr) + i3 = getfield_gc_i(p0, descr=bigddescr) + i4 = getfield_gc_i(p0, descr=bigedescr) + i5 = getfield_gc_i(p0, descr=bigfdescr) + i6 = getfield_gc_i(p0, descr=biggdescr) + i7 = getfield_gc_i(p0, descr=bighdescr) + i8 = getfield_gc_i(p0, descr=bigidescr) + i9 = getfield_gc_i(p0, descr=bigjdescr) + i10 = getfield_gc_i(p0, descr=bigkdescr) + i11 = getfield_gc_i(p0, descr=bigldescr) + i12 = getfield_gc_i(p0, descr=bigmdescr) + i13 = getfield_gc_i(p0, descr=bigndescr) + i14 = getfield_gc_i(p0, descr=bigodescr) + i15 = getfield_gc_i(p0, descr=bigpdescr) + i16 = getfield_gc_i(p0, descr=bigqdescr) + i17 = getfield_gc_i(p0, descr=bigrdescr) + i18 = getfield_gc_i(p0, descr=bigsdescr) + i19 = getfield_gc_i(p0, descr=bigtdescr) + i20 = getfield_gc_i(p0, descr=bigudescr) + i21 = getfield_gc_i(p0, descr=bigvdescr) + i22 = getfield_gc_i(p0, descr=bigwdescr) + i23 = getfield_gc_i(p0, descr=bigxdescr) + i24 = getfield_gc_i(p0, descr=bigydescr) + i25 = getfield_gc_i(p0, descr=bigzdescr) + i27 = getfield_gc_i(p0, descr=bigbdescr) + i28 = getfield_gc_i(p0, descr=bigcdescr) + i29 = getfield_gc_i(p0, descr=bigddescr) + i30 = getfield_gc_i(p0, descr=bigedescr) + i31 = getfield_gc_i(p0, descr=bigfdescr) + i32 = getfield_gc_i(p0, descr=biggdescr) + i33 = getfield_gc_i(p0, descr=bighdescr) + i34 = getfield_gc_i(p0, descr=bigidescr) + i35 = getfield_gc_i(p0, descr=bigjdescr) + i36 = getfield_gc_i(p0, descr=bigkdescr) + i37 = getfield_gc_i(p0, descr=bigldescr) + i38 = getfield_gc_i(p0, descr=bigmdescr) + i39 = getfield_gc_i(p0, descr=bigndescr) + i40 = getfield_gc_i(p0, descr=bigodescr) + i41 = getfield_gc_i(p0, descr=bigpdescr) + i42 = getfield_gc_i(p0, descr=bigqdescr) + i43 = getfield_gc_i(p0, descr=bigrdescr) + i44 = getfield_gc_i(p0, descr=bigsdescr) + i45 = getfield_gc_i(p0, descr=bigtdescr) + i46 = getfield_gc_i(p0, descr=bigudescr) + i47 = getfield_gc_i(p0, descr=bigvdescr) + i48 = getfield_gc_i(p0, descr=bigwdescr) + i49 = getfield_gc_i(p0, descr=bigxdescr) + i50 = getfield_gc_i(p0, descr=bigydescr) + i51 = getfield_gc_i(p0, descr=bigzdescr) + i26 = getfield_gc_i(p0, descr=bigadescr) + i99 = int_add(i26, i51) + escape_i(i27) + escape_i(i28) + escape_i(i29) + escape_i(i30) + escape_i(i31) + escape_i(i32) + escape_i(i33) + escape_i(i34) + escape_i(i35) + escape_i(i36) + escape_i(i37) + escape_i(i38) + escape_i(i39) + escape_i(i40) + escape_i(i41) + escape_i(i42) + escape_i(i43) + escape_i(i44) + escape_i(i45) + escape_i(i46) + escape_i(i47) + escape_i(i48) + escape_i(i49) + escape_i(i50) + escape_i(i51) + escape_i(i26) + escape_i(i99) + jump(p0) + """ + expected = """ + [p0,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23,i24,i25,i0,i99] + escape_i(i1) + escape_i(i2) + escape_i(i3) + escape_i(i4) + escape_i(i5) + escape_i(i6) + escape_i(i7) + escape_i(i8) + escape_i(i9) + escape_i(i10) + escape_i(i11) + escape_i(i12) + escape_i(i13) + escape_i(i14) + escape_i(i15) + escape_i(i16) + escape_i(i17) + escape_i(i18) + escape_i(i19) + escape_i(i20) + escape_i(i21) + escape_i(i22) + escape_i(i23) + escape_i(i24) + escape_i(i25) + escape_i(i0) + escape_i(i99) + jump(p0,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23,i24,i25,i0,i99) + """ + self.optimize_loop(ops, expected) class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_schedule.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_schedule.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_schedule.py 2015-11-19 19:21:39.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_schedule.py 2016-03-19 16:40:12.000000000 +0000 @@ -12,7 +12,7 @@ from rpython.jit.metainterp.optimizeopt.test.test_dependency import (DependencyBaseTest) from rpython.jit.metainterp.optimizeopt.test.test_vecopt import (FakeMetaInterpStaticData, FakeJitDriverStaticData, FakePackSet) -from rpython.jit.metainterp.resoperation import rop, ResOperation +from rpython.jit.metainterp.resoperation import rop, ResOperation, VectorizationInfo from rpython.jit.tool.oparser import parse as opparse from rpython.jit.tool.oparser_model import get_model from rpython.jit.backend.detect_cpu import getcpuclass @@ -207,6 +207,12 @@ """, additional_args=['v10[2xi64]']) pack1 = self.pack(loop1, 0, 2) var = loop1.inputargs[-1] + vi = VectorizationInfo(None) + vi.datatype = 'i' + vi.bytesize = 8 + vi.count = 2 + vi.signed = True + var.set_forwarded(vi) loop2 = self.schedule(loop1, [pack1], prepend_invariant=True, overwrite_funcs = { 'getvector_of_box': lambda v: (0, var), @@ -332,7 +338,7 @@ v9[2xi64] = vec_expand_i(255) v10[2xi64] = vec_raw_load_i(p0, i1, descr=long) v11[2xi64] = vec_int_and(v10[2xi64], v9[2xi64]) - guard_true(v11[2xi64]) [] + vec_guard_true(v11[2xi64]) [] """, False) self.assert_equal(loop2, loop3) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_unroll.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_unroll.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_unroll.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_unroll.py 2016-03-19 16:40:12.000000000 +0000 @@ -220,16 +220,16 @@ def test_double_getfield_plus_pure(self): loop = """ [p0] - pc = getfield_gc_pure_r(p0, descr=nextdescr) + pc = getfield_gc_r(p0, descr=nextdescr3) escape_n(p0) # that should flush the caches - p1 = getfield_gc_r(pc, descr=nextdescr) - i0 = getfield_gc_i(p1, descr=valuedescr) + p1 = getfield_gc_r(pc, descr=nextdescr3) + i0 = getfield_gc_i(p1, descr=valuedescr3) jump(p0) """ es, loop, preamble = self.optimize(loop) assert len(es.short_boxes) == 4 # both getfields are available as - # well as getfield_gc_pure + # well as getfield_gc def test_p123_anti_nested(self): loop = """ diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_util.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_util.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_util.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_util.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,4 +1,4 @@ -import py, random +import py, random, string from rpython.rlib.debug import debug_print from rpython.rtyper.lltypesystem import lltype, llmemory, rffi @@ -122,7 +122,14 @@ ('value', lltype.Signed), ('next', lltype.Ptr(NODE3)), hints={'immutable': True})) - + + big_fields = [('big' + i, lltype.Signed) for i in string.ascii_lowercase] + BIG = lltype.GcForwardReference() + BIG.become(lltype.GcStruct('BIG', *big_fields, hints={'immutable': True})) + + for field, _ in big_fields: + locals()[field + 'descr'] = cpu.fielddescrof(BIG, field) + node = lltype.malloc(NODE) node.value = 5 node.next = node @@ -133,16 +140,25 @@ node2.parent.parent.typeptr = node_vtable2 node2addr = lltype.cast_opaque_ptr(llmemory.GCREF, node2) myptr = lltype.cast_opaque_ptr(llmemory.GCREF, node) - mynode2 = lltype.malloc(NODE) + mynodeb = lltype.malloc(NODE) myarray = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(lltype.GcArray(lltype.Signed), 13, zero=True)) - mynode2.parent.typeptr = node_vtable - myptr2 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode2) - mynode3 = lltype.malloc(NODE2) - mynode3.parent.parent.typeptr = node_vtable2 + mynodeb.parent.typeptr = node_vtable + myptrb = lltype.cast_opaque_ptr(llmemory.GCREF, mynodeb) + myptr2 = lltype.malloc(NODE2) + myptr2.parent.parent.typeptr = node_vtable2 + myptr2 = lltype.cast_opaque_ptr(llmemory.GCREF, myptr2) + nullptr = lltype.nullptr(llmemory.GCREF.TO) + + mynode3 = lltype.malloc(NODE3) + mynode3.parent.typeptr = node_vtable3 + mynode3.value = 7 + mynode3.next = mynode3 myptr3 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode3) # a NODE2 mynode4 = lltype.malloc(NODE3) mynode4.parent.typeptr = node_vtable3 myptr4 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode4) # a NODE3 + + nullptr = lltype.nullptr(llmemory.GCREF.TO) #nodebox2 = InputArgRef(lltype.cast_opaque_ptr(llmemory.GCREF, node2)) nodesize = cpu.sizeof(NODE, node_vtable) @@ -203,7 +219,6 @@ arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) int32arraydescr = cpu.arraydescrof(lltype.GcArray(rffi.INT)) int16arraydescr = cpu.arraydescrof(lltype.GcArray(rffi.SHORT)) - floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) float32arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.SingleFloat)) arraydescr_tid = arraydescr.get_type_id() array = lltype.malloc(lltype.GcArray(lltype.Signed), 15, zero=True) @@ -212,6 +227,12 @@ array2ref = lltype.cast_opaque_ptr(llmemory.GCREF, array2) gcarraydescr = cpu.arraydescrof(lltype.GcArray(llmemory.GCREF)) gcarraydescr_tid = gcarraydescr.get_type_id() + floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) + + arrayimmutdescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed, hints={"immutable": True})) + immutarray = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(arrayimmutdescr.A, 13, zero=True)) + gcarrayimmutdescr = cpu.arraydescrof(lltype.GcArray(llmemory.GCREF, hints={"immutable": True})) + floatarrayimmutdescr = cpu.arraydescrof(lltype.GcArray(lltype.Float, hints={"immutable": True})) # a GcStruct not inheriting from OBJECT tpl = lltype.malloc(S, zero=True) @@ -244,7 +265,7 @@ tsize = cpu.sizeof(T, None) cdescr = cpu.fielddescrof(T, 'c') ddescr = cpu.fielddescrof(T, 'd') - arraydescr3 = cpu.arraydescrof(lltype.GcArray(lltype.Ptr(NODE))) + arraydescr3 = cpu.arraydescrof(lltype.GcArray(lltype.Ptr(NODE3))) U = lltype.GcStruct('U', ('parent', OBJECT), @@ -481,15 +502,19 @@ return self.oparse.parse() def postprocess(self, op): + class FakeJitCode(object): + index = 0 + if op.is_guard(): - op.rd_snapshot = resume.Snapshot(None, op.getfailargs()) - op.rd_frame_info_list = resume.FrameInfo(None, "code", 11) + op.rd_snapshot = resume.TopSnapshot( + resume.Snapshot(None, op.getfailargs()), [], []) + op.rd_frame_info_list = resume.FrameInfo(None, FakeJitCode(), 11) def add_guard_future_condition(self, res): # invent a GUARD_FUTURE_CONDITION to not have to change all tests if res.operations[-1].getopnum() == rop.JUMP: guard = ResOperation(rop.GUARD_FUTURE_CONDITION, [], None) - guard.rd_snapshot = resume.Snapshot(None, []) + guard.rd_snapshot = resume.TopSnapshot(None, [], []) res.operations.insert(-1, guard) def assert_equal(self, optimized, expected, text_right=None): @@ -562,6 +587,10 @@ else: for i, box in enumerate(jump_op.getarglist()): if box.type == 'r' and not box.is_constant(): + # NOTE: we arbitrarily set the box contents to a NODE2 + # object here. If you need something different, you + # need to pass a 'jump_values' argument to e.g. + # optimize_loop() box.setref_base(self.nodefulladdr) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py 2016-03-19 16:40:12.000000000 +0000 @@ -105,6 +105,7 @@ def vectoroptimizer_unrolled(self, loop, unroll_factor = -1): opt = self.vectoroptimizer(loop) opt.linear_find_smallest_type(loop) + loop.setup_vectorization() if unroll_factor == -1 and opt.smallest_type_bytes == 0: raise NotAVectorizeableLoop() if unroll_factor == -1: @@ -309,7 +310,7 @@ 'v10[4xi32] = vec_getarrayitem_raw_i(p0,i0,descr=int32arraydescr)', 'v11[4xi32] = vec_int_is_true(v10[4xi32])', 'i100 = vec_unpack_i(v11[4xi32], 0, 1)', - 'guard_true(v11[4xi32]) [i100]', + 'vec_guard_true(v11[4xi32]) [i100]', ], trace) def test_vectorize_skip(self): diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py 2016-03-19 16:40:12.000000000 +0000 @@ -1103,8 +1103,8 @@ jump(p0) """ self.optimize_bridge(loops, bridge, loops[0], 'Loop0', [self.myptr]) - self.optimize_bridge(loops, bridge, loops[1], 'Loop1', [self.myptr3]) - self.optimize_bridge(loops[0], bridge, 'RETRACE', [self.myptr3]) + self.optimize_bridge(loops, bridge, loops[1], 'Loop1', [self.myptr2]) + self.optimize_bridge(loops[0], bridge, 'RETRACE', [self.myptr2]) self.optimize_bridge(loops, loops[0], loops[0], 'Loop0', [self.nullptr]) self.optimize_bridge(loops, loops[1], loops[1], 'Loop1', [self.nullptr]) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/unroll.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/unroll.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/unroll.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/unroll.py 2016-03-19 16:40:12.000000000 +0000 @@ -180,7 +180,7 @@ if not inline_short_preamble: self.jump_to_preamble(celltoken, end_jump, info) - return (UnrollInfo(target_token, label_op, [], + return (UnrollInfo(target_token, label_op, extra_same_as, self.optimizer.quasi_immutable_deps), self.optimizer._newoperations) @@ -314,9 +314,16 @@ args, virtuals = target_virtual_state.make_inputargs_and_virtuals( args, self.optimizer) short_preamble = target_token.short_preamble - extra = self.inline_short_preamble(args + virtuals, args, - short_preamble, self.optimizer.patchguardop, - target_token, label_op) + try: + extra = self.inline_short_preamble(args + virtuals, args, + short_preamble, self.optimizer.patchguardop, + target_token, label_op) + except KeyError: + # SHOULD NOT OCCUR BUT DOES: WHY?? issue #2185 + self.optimizer.metainterp_sd.logger_ops.log_short_preamble([], + short_preamble, {}) + raise + self.send_extra_operation(jump_op.copy_and_change(rop.JUMP, args=args + extra, descr=target_token)) @@ -335,26 +342,32 @@ patchguardop, target_token, label_op): short_inputargs = short[0].getarglist() short_jump_args = short[-1].getarglist() - if (self.short_preamble_producer and - self.short_preamble_producer.target_token is target_token): - # this means we're inlining the short preamble that's being - # built. Make sure we modify the correct things in-place - # THIS WILL MODIFY ALL THE LISTS PROVIDED, POTENTIALLY - self.short_preamble_producer.setup(short_inputargs, short_jump_args, - short, label_op.getarglist()) - if 1: # (keep indentation) + sb = self.short_preamble_producer + if sb is not None: + assert isinstance(sb, ExtendedShortPreambleBuilder) + if sb.target_token is target_token: + # this means we're inlining the short preamble that's being + # built. Make sure we modify the correct things in-place + self.short_preamble_producer.setup(short_jump_args, + short, label_op.getarglist()) + # after this call, THE REST OF THIS FUNCTION WILL MODIFY ALL + # THE LISTS PROVIDED, POTENTIALLY + + # We need to make a list of fresh new operations corresponding + # to the short preamble operations. We could temporarily forward + # the short operations to the fresh ones, but there are obscure + # issues: send_extra_operation() below might occasionally invoke + # use_box(), which assumes the short operations are not forwarded. + # So we avoid such temporary forwarding and just use a dict here. + assert len(short_inputargs) == len(jump_args) + mapping = {} + for i in range(len(jump_args)): + mapping[short_inputargs[i]] = jump_args[i] + + # a fix-point loop, runs only once in almost all cases + i = 1 + while 1: self._check_no_forwarding([short_inputargs, short], False) - assert len(short_inputargs) == len(jump_args) - # We need to make a list of fresh new operations corresponding - # to the short preamble operations. We could temporarily forward - # the short operations to the fresh ones, but there are obscure - # issues: send_extra_operation() below might occasionally invoke - # use_box(), which assumes the short operations are not forwarded. - # So we avoid such temporary forwarding and just use a dict here. - mapping = {} - for i in range(len(jump_args)): - mapping[short_inputargs[i]] = jump_args[i] - i = 1 while i < len(short) - 1: sop = short[i] arglist = self._map_args(mapping, sop.getarglist()) @@ -373,8 +386,12 @@ for arg in args_no_virtuals + short_jump_args: self.optimizer.force_box(self.get_box_replacement(arg)) self.optimizer.flush() - return [self.get_box_replacement(box) - for box in self._map_args(mapping, short_jump_args)] + # done unless "short" has grown again + if i == len(short) - 1: + break + + return [self.get_box_replacement(box) + for box in self._map_args(mapping, short_jump_args)] def _expand_info(self, arg, infos): if isinstance(arg, AbstractResOp) and arg.is_same_as(): diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/util.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/util.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/util.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/util.py 2016-03-19 16:40:12.000000000 +0000 @@ -8,7 +8,7 @@ from rpython.jit.metainterp import resoperation from rpython.rlib.debug import make_sure_not_resized from rpython.jit.metainterp.resoperation import rop -from rpython.jit.metainterp.resume import Snapshot, AccumInfo +from rpython.jit.metainterp.resume import AccumInfo # ____________________________________________________________ # Misc. utilities diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/vector.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/vector.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/vector.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/vector.py 2016-03-19 16:40:12.000000000 +0000 @@ -19,10 +19,10 @@ MemoryRef, Node, IndexVar) from rpython.jit.metainterp.optimizeopt.version import LoopVersionInfo from rpython.jit.metainterp.optimizeopt.schedule import (VecScheduleState, - SchedulerState, Scheduler, Pack, Pair, AccumPack) + SchedulerState, Scheduler, Pack, Pair, AccumPack, forwarded_vecinfo) from rpython.jit.metainterp.optimizeopt.guard import GuardStrengthenOpt from rpython.jit.metainterp.resoperation import (rop, ResOperation, GuardResOp, - OpHelpers, VecOperation) + OpHelpers, VecOperation, VectorizationInfo) from rpython.rlib import listsort from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import debug_print, debug_start, debug_stop @@ -31,6 +31,13 @@ from rpython.jit.backend.llsupport.symbolic import (WORD as INT_WORD, SIZEOF_FLOAT as FLOAT_WORD) +def copy_resop(op): + newop = op.copy() + fwd = op.get_forwarded() + if fwd is not None and isinstance(fwd, VectorizationInfo): + newop.set_forwarded(fwd) + return newop + class VectorLoop(object): def __init__(self, label, oplist, jump): self.label = label @@ -42,6 +49,14 @@ self.jump = jump assert self.jump.getopnum() == rop.JUMP + def setup_vectorization(self): + for op in self.operations: + op.set_forwarded(VectorizationInfo(op)) + + def teardown_vectorization(self): + for op in self.operations: + op.set_forwarded(None) + def finaloplist(self, jitcell_token=None, reset_label_token=True, label=False): oplist = [] if jitcell_token: @@ -71,28 +86,28 @@ def clone(self): renamer = Renamer() - label = self.label.copy() + label = copy_resop(self.label) prefix = [] for op in self.prefix: - newop = op.copy() + newop = copy_resop(op) renamer.rename(newop) if not newop.returns_void(): renamer.start_renaming(op, newop) prefix.append(newop) prefix_label = None if self.prefix_label: - prefix_label = self.prefix_label.copy() + prefix_label = copy_resop(self.prefix_label) renamer.rename(prefix_label) oplist = [] for op in self.operations: - newop = op.copy() + newop = copy_resop(op) renamer.rename(newop) if not newop.returns_void(): renamer.start_renaming(op, newop) oplist.append(newop) - jump = self.jump.copy() + jump = copy_resop(self.jump) renamer.rename(jump) - loop = VectorLoop(self.label.copy(), oplist, jump) + loop = VectorLoop(copy_resop(self.label), oplist, jump) loop.prefix = prefix loop.prefix_label = prefix_label return loop @@ -110,6 +125,7 @@ # the original loop (output of optimize_unroll) info = LoopVersionInfo(loop_info) version = info.snapshot(loop) + loop.setup_vectorization() try: debug_start("vec-opt-loop") metainterp_sd.logger_noopt.log_loop([], loop.finaloplist(label=True), -2, None, None, "pre vectorize") @@ -148,6 +164,8 @@ llop.debug_print_traceback(lltype.Void) else: raise + finally: + loop.teardown_vectorization() return loop_info, loop_ops def user_loop_bail_fast_path(loop, warmstate): @@ -262,7 +280,7 @@ for i, op in enumerate(operations): if op.getopnum() in prohibit_opnums: continue # do not unroll this operation twice - copied_op = op.copy() + copied_op = copy_resop(op) if not copied_op.returns_void(): # every result assigns a new box, thus creates an entry # to the rename map. @@ -593,7 +611,8 @@ self.savings += -count def record_vector_pack(self, src, index, count): - if src.datatype == FLOAT: + vecinfo = forwarded_vecinfo(src) + if vecinfo.datatype == FLOAT: if index == 1 and count == 1: self.savings -= 2 return @@ -607,7 +626,9 @@ See limintations (vectorization.rst). """ if l_op.getopnum() == r_op.getopnum(): - return l_op.bytesize == r_op.bytesize + l_vecinfo = forwarded_vecinfo(l_op) + r_vecinfo = forwarded_vecinfo(r_op) + return l_vecinfo.bytesize == r_vecinfo.bytesize return False class PackSet(object): @@ -728,7 +749,9 @@ size = INT_WORD if left.type == 'f': size = FLOAT_WORD - if not (left.bytesize == right.bytesize and left.bytesize == size): + l_vecinfo = forwarded_vecinfo(left) + r_vecinfo = forwarded_vecinfo(right) + if not (l_vecinfo.bytesize == r_vecinfo.bytesize and l_vecinfo.bytesize == size): # do not support if if the type size is smaller # than the cpu word size. # WHY? diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/virtualize.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/virtualize.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/virtualize.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/virtualize.py 2016-03-19 16:40:12.000000000 +0000 @@ -29,7 +29,11 @@ const = self.new_const_item(arraydescr) opinfo = info.ArrayPtrInfo(arraydescr, const, size, clear, is_virtual=True) - newop = self.replace_op_with(source_op, source_op.getopnum()) + # Replace 'source_op' with a version in which the length is + # given as directly a Const, without relying on forwarding. + # See test_virtual_array_length_discovered_constant_2. + newop = self.replace_op_with(source_op, source_op.getopnum(), + args=[ConstInt(size)]) newop.set_forwarded(opinfo) return opinfo @@ -42,13 +46,15 @@ def make_virtual_raw_memory(self, size, source_op): opinfo = info.RawBufferPtrInfo(self.optimizer.cpu, size) - newop = self.replace_op_with(source_op, source_op.getopnum()) + newop = self.replace_op_with(source_op, source_op.getopnum(), + args=[source_op.getarg(0), ConstInt(size)]) newop.set_forwarded(opinfo) return opinfo def make_virtual_raw_slice(self, offset, parent, source_op): opinfo = info.RawSlicePtrInfo(offset, parent) - newop = self.replace_op_with(source_op, source_op.getopnum()) + newop = self.replace_op_with(source_op, source_op.getopnum(), + args=[source_op.getarg(0), ConstInt(offset)]) newop.set_forwarded(opinfo) return opinfo @@ -182,12 +188,6 @@ optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I - # note: the following line does not mean that the two operations are - # completely equivalent, because GETFIELD_GC_PURE is_always_pure(). - optimize_GETFIELD_GC_PURE_I = optimize_GETFIELD_GC_I - optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_I - optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_I - def optimize_SETFIELD_GC(self, op): struct = op.getarg(0) opinfo = self.getptrinfo(struct) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/vstring.py pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/vstring.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/optimizeopt/vstring.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/optimizeopt/vstring.py 2016-03-19 16:40:12.000000000 +0000 @@ -188,7 +188,7 @@ def string_copy_parts(self, op, string_optimizer, targetbox, offsetbox, mode): - if not self.is_virtual(): # and not self.is_completely_initialized(): + if not self.is_virtual(): return StrPtrInfo.string_copy_parts(self, op, string_optimizer, targetbox, offsetbox, mode) else: @@ -643,12 +643,15 @@ vstart = self.getintbound(op.getarg(2)) vstop = self.getintbound(op.getarg(3)) # - if (isinstance(vstr, VStringPlainInfo) and vstart.is_constant() - and vstop.is_constant()): - value = self.make_vstring_plain(op, mode, -1) - value.setup_slice(vstr._chars, vstart.getint(), - vstop.getint()) - return True + #---The following looks reasonable, but see test_str_slice_bug: + # the problem is what occurs if the source string has been forced + # but still contains None in its _chars + #if (isinstance(vstr, VStringPlainInfo) and vstart.is_constant() + # and vstop.is_constant()): + # value = self.make_vstring_plain(op, mode, -1) + # value.setup_slice(vstr._chars, vstart.getint(), + # vstop.getint()) + # return True # startbox = op.getarg(2) strbox = op.getarg(1) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/pyjitpl.py pypy-5.0.1+dfsg/rpython/jit/metainterp/pyjitpl.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/pyjitpl.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/pyjitpl.py 2016-03-19 16:40:12.000000000 +0000 @@ -653,46 +653,37 @@ @arguments("box", "descr") def opimpl_getfield_gc_i(self, box, fielddescr): - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_I, box, fielddescr, 'i') - @arguments("box", "descr") - def opimpl_getfield_gc_r(self, box, fielddescr): - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_R, box, fielddescr, 'r') - @arguments("box", "descr") - def opimpl_getfield_gc_f(self, box, fielddescr): - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_F, box, fielddescr, 'f') - - @arguments("box", "descr") - def opimpl_getfield_gc_i_pure(self, box, fielddescr): - if isinstance(box, ConstPtr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): # if 'box' is directly a ConstPtr, bypass the heapcache completely resbox = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_PURE_I, fielddescr, box) + rop.GETFIELD_GC_I, fielddescr, box) return ConstInt(resbox) return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_PURE_I, box, fielddescr, 'i') + rop.GETFIELD_GC_I, box, fielddescr, 'i') @arguments("box", "descr") - def opimpl_getfield_gc_f_pure(self, box, fielddescr): - if isinstance(box, ConstPtr): + def opimpl_getfield_gc_f(self, box, fielddescr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): # if 'box' is directly a ConstPtr, bypass the heapcache completely resvalue = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_PURE_F, fielddescr, box) + rop.GETFIELD_GC_F, fielddescr, box) return ConstFloat(resvalue) return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_PURE_F, box, fielddescr, 'f') + rop.GETFIELD_GC_F, box, fielddescr, 'f') @arguments("box", "descr") - def opimpl_getfield_gc_r_pure(self, box, fielddescr): - if isinstance(box, ConstPtr): + def opimpl_getfield_gc_r(self, box, fielddescr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): # if 'box' is directly a ConstPtr, bypass the heapcache completely val = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_PURE_R, fielddescr, box) + rop.GETFIELD_GC_R, fielddescr, box) return ConstPtr(val) return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_PURE_R, box, fielddescr, 'r') + rop.GETFIELD_GC_R, box, fielddescr, 'r') + + opimpl_getfield_gc_i_pure = opimpl_getfield_gc_i + opimpl_getfield_gc_r_pure = opimpl_getfield_gc_r + opimpl_getfield_gc_f_pure = opimpl_getfield_gc_f @arguments("box", "box", "descr") def opimpl_getinteriorfield_gc_i(self, array, index, descr): @@ -733,7 +724,7 @@ @arguments("box", "descr", "orgpc") def _opimpl_getfield_gc_greenfield_any(self, box, fielddescr, pc): ginfo = self.metainterp.jitdriver_sd.greenfield_info - opnum = OpHelpers.getfield_pure_for_descr(fielddescr) + opnum = OpHelpers.getfield_for_descr(fielddescr) if (ginfo is not None and fielddescr in ginfo.green_field_descrs and not self._nonstandard_virtualizable(pc, box, fielddescr)): # fetch the result, but consider it as a Const box and don't @@ -810,6 +801,27 @@ return self.execute_with_descr(rop.RAW_LOAD_F, arraydescr, addrbox, offsetbox) + def _remove_symbolics(self, c): + if not we_are_translated(): + from rpython.rtyper.lltypesystem import ll2ctypes + assert isinstance(c, ConstInt) + c = ConstInt(ll2ctypes.lltype2ctypes(c.value)) + return c + + @arguments("box", "box", "box", "box", "box") + def opimpl_gc_load_indexed_i(self, addrbox, indexbox, + scalebox, baseofsbox, bytesbox): + return self.execute(rop.GC_LOAD_INDEXED_I, addrbox, indexbox, + self._remove_symbolics(scalebox), + self._remove_symbolics(baseofsbox), bytesbox) + + @arguments("box", "box", "box", "box", "box") + def opimpl_gc_load_indexed_f(self, addrbox, indexbox, + scalebox, baseofsbox, bytesbox): + return self.execute(rop.GC_LOAD_INDEXED_F, addrbox, indexbox, + self._remove_symbolics(scalebox), + self._remove_symbolics(baseofsbox), bytesbox) + @arguments("box") def opimpl_hint_force_virtualizable(self, box): self.metainterp.gen_store_back_in_vable(box) @@ -1346,6 +1358,17 @@ self.metainterp.attach_debug_info(op) @arguments("box") + def opimpl_jit_enter_portal_frame(self, uniqueidbox): + unique_id = uniqueidbox.getint() + jd_no = self.metainterp.jitdriver_sd.mainjitcode.index # fish + self.metainterp.enter_portal_frame(jd_no, unique_id) + + @arguments() + def opimpl_jit_leave_portal_frame(self): + jd_no = self.metainterp.jitdriver_sd.mainjitcode.index # fish + self.metainterp.leave_portal_frame(jd_no) + + @arguments("box") def _opimpl_assert_green(self, box): if not isinstance(box, Const): msg = "assert_green failed at %s:%d" % ( @@ -1569,27 +1592,29 @@ return resbox self.metainterp.vable_and_vrefs_before_residual_call() tp = descr.get_normalized_result_type() + resbox = NOT_HANDLED if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: resbox = self.metainterp.direct_libffi_call(allboxes, descr, tp) - elif effectinfo.is_call_release_gil(): - resbox = self.metainterp.direct_call_release_gil(allboxes, - descr, tp) - elif tp == 'i': - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_I, allboxes, descr=descr) - elif tp == 'r': - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_R, allboxes, descr=descr) - elif tp == 'f': - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_F, allboxes, descr=descr) - elif tp == 'v': - self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE_N, allboxes, descr=descr) - resbox = None - else: - assert False + if resbox is NOT_HANDLED: + if effectinfo.is_call_release_gil(): + resbox = self.metainterp.direct_call_release_gil(allboxes, + descr, tp) + elif tp == 'i': + resbox = self.metainterp.execute_and_record_varargs( + rop.CALL_MAY_FORCE_I, allboxes, descr=descr) + elif tp == 'r': + resbox = self.metainterp.execute_and_record_varargs( + rop.CALL_MAY_FORCE_R, allboxes, descr=descr) + elif tp == 'f': + resbox = self.metainterp.execute_and_record_varargs( + rop.CALL_MAY_FORCE_F, allboxes, descr=descr) + elif tp == 'v': + self.metainterp.execute_and_record_varargs( + rop.CALL_MAY_FORCE_N, allboxes, descr=descr) + resbox = None + else: + assert False self.metainterp.vrefs_after_residual_call() vablebox = None if assembler_call: @@ -1889,6 +1914,9 @@ self.box_names_memo = {} + self.aborted_tracing_jitdriver = None + self.aborted_tracing_greenkey = None + def retrace_needed(self, trace, exported_state): self.partial_trace = trace self.retracing_from = len(self.history.operations) - 1 @@ -1903,7 +1931,9 @@ raise ChangeFrame def is_main_jitcode(self, jitcode): - return self.jitdriver_sd is not None and jitcode is self.jitdriver_sd.mainjitcode + return (jitcode.jitdriver_sd is not None and + jitcode.jitdriver_sd.jitdriver.is_recursive) + #return self.jitdriver_sd is not None and jitcode is self.jitdriver_sd.mainjitcode def newframe(self, jitcode, greenkey=None): if jitcode.jitdriver_sd: @@ -1918,7 +1948,7 @@ self.current_call_id += 1 if greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( - (greenkey, len(self.history.operations))) + (jitcode.jitdriver_sd, greenkey, len(self.history.operations))) if len(self.free_frames_list) > 0: f = self.free_frames_list.pop() else: @@ -1945,7 +1975,7 @@ self.call_ids.pop() if frame.greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( - (None, len(self.history.operations))) + (jitcode.jitdriver_sd, None, len(self.history.operations))) # we save the freed MIFrames to avoid needing to re-create new # MIFrame objects all the time; they are a bit big, with their # 3*256 register entries. @@ -2076,17 +2106,7 @@ profiler = self.staticdata.profiler profiler.count_ops(opnum) resvalue = executor.execute(self.cpu, self, opnum, descr, *argboxes) - # - is_pure = rop._ALWAYS_PURE_FIRST <= opnum <= rop._ALWAYS_PURE_LAST - if not is_pure: - if (opnum == rop.GETFIELD_RAW_I or - opnum == rop.GETFIELD_RAW_R or - opnum == rop.GETFIELD_RAW_F or - opnum == rop.GETARRAYITEM_RAW_I or - opnum == rop.GETARRAYITEM_RAW_F): - is_pure = descr.is_always_pure() - # - if is_pure: + if OpHelpers.is_pure_with_descr(opnum, descr): return self._record_helper_pure(opnum, resvalue, descr, *argboxes) if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: return self._record_helper_ovf(opnum, resvalue, descr, *argboxes) @@ -2226,17 +2246,28 @@ self.staticdata.logger_ops._make_log_operations( self.box_names_memo), self.history.operations) + if self.aborted_tracing_jitdriver is not None: + jd_sd = self.aborted_tracing_jitdriver + greenkey = self.aborted_tracing_greenkey + self.staticdata.warmrunnerdesc.hooks.on_trace_too_long( + jd_sd.jitdriver, greenkey, + jd_sd.warmstate.get_location_str(greenkey)) + # no ops for now + self.aborted_tracing_jitdriver = None + self.aborted_tracing_greenkey = None self.staticdata.stats.aborted() def blackhole_if_trace_too_long(self): warmrunnerstate = self.jitdriver_sd.warmstate if len(self.history.operations) > warmrunnerstate.trace_limit: - greenkey_of_huge_function = self.find_biggest_function() + jd_sd, greenkey_of_huge_function = self.find_biggest_function() self.staticdata.stats.record_aborted(greenkey_of_huge_function) self.portal_trace_positions = None if greenkey_of_huge_function is not None: - warmrunnerstate.disable_noninlinable_function( + jd_sd.warmstate.disable_noninlinable_function( greenkey_of_huge_function) + self.aborted_tracing_jitdriver = jd_sd + self.aborted_tracing_greenkey = greenkey_of_huge_function if self.current_merge_points: jd_sd = self.jitdriver_sd greenkey = self.current_merge_points[0][0][:jd_sd.num_green_args] @@ -2551,7 +2582,6 @@ self.jitdriver_sd.warmstate.attach_procedure_to_interp(greenkey, target_token.targeting_jitcell_token) self.staticdata.stats.add_jitcell_token(target_token.targeting_jitcell_token) - if target_token is not None: # raise if it *worked* correctly assert isinstance(target_token, TargetToken) jitcell_token = target_token.targeting_jitcell_token @@ -2912,37 +2942,41 @@ start_stack = [] max_size = 0 max_key = None - warmstate = self.jitdriver_sd.warmstate + max_jdsd = None r = '' debug_start("jit-abort-longest-function") - for pair in self.portal_trace_positions: - key, pos = pair + for elem in self.portal_trace_positions: + jitdriver_sd, key, pos = elem if key is not None: - start_stack.append(pair) + start_stack.append(elem) else: - greenkey, startpos = start_stack.pop() + jitdriver_sd, greenkey, startpos = start_stack.pop() + warmstate = jitdriver_sd.warmstate size = pos - startpos if size > max_size: if warmstate is not None: r = warmstate.get_location_str(greenkey) debug_print("found new longest: %s %d" % (r, size)) max_size = size + max_jdsd = jitdriver_sd max_key = greenkey if start_stack: - key, pos = start_stack[0] + jitdriver_sd, key, pos = start_stack[0] + warmstate = jitdriver_sd.warmstate size = len(self.history.operations) - pos if size > max_size: if warmstate is not None: - r = self.jitdriver_sd.warmstate.get_location_str(key) + r = warmstate.get_location_str(key) debug_print("found new longest: %s %d" % (r, size)) max_size = size + max_jdsd = jitdriver_sd max_key = key - if warmstate is not None: # tests + if self.portal_trace_positions: # tests self.staticdata.logger_ops.log_abort_loop(self.history.inputargs, self.history.operations, self.box_names_memo) debug_stop("jit-abort-longest-function") - return max_key + return max_jdsd, max_key def record_result_of_call_pure(self, op): """ Patch a CALL into a CALL_PURE. @@ -3008,7 +3042,7 @@ # box_cif_description = argboxes[1] if not isinstance(box_cif_description, ConstInt): - return + return NOT_HANDLED cif_description = box_cif_description.getint() cif_description = llmemory.cast_int_to_adr(cif_description) cif_description = llmemory.cast_adr_to_ptr(cif_description, @@ -3016,7 +3050,7 @@ extrainfo = orig_calldescr.get_extra_info() calldescr = self.cpu.calldescrof_dynamic(cif_description, extrainfo) if calldescr is None: - return + return NOT_HANDLED # box_exchange_buffer = argboxes[3] arg_boxes = [] @@ -3137,6 +3171,8 @@ # is supposed to be raised. The default False means that it # should just be copied into the blackhole interp, but not raised. +NOT_HANDLED = history.CONST_FALSE + # ____________________________________________________________ def _get_opimpl_method(name, argcodes): diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/quasiimmut.py pypy-5.0.1+dfsg/rpython/jit/metainterp/quasiimmut.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/quasiimmut.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/quasiimmut.py 2016-03-19 16:40:12.000000000 +0000 @@ -51,6 +51,7 @@ class QuasiImmut(object): llopaque = True compress_limit = 30 + looptokens_wrefs = None def __init__(self, cpu): self.cpu = cpu @@ -75,7 +76,7 @@ def compress_looptokens_list(self): self.looptokens_wrefs = [wref for wref in self.looptokens_wrefs if wref() is not None] - # NB. we must keep around the looptoken_wrefs that are + # NB. we must keep around the looptokens_wrefs that are # already invalidated; see below self.compress_limit = (len(self.looptokens_wrefs) + 15) * 2 @@ -83,6 +84,9 @@ # When this is called, all the loops that we record become # invalid: all GUARD_NOT_INVALIDATED in these loops (and # in attached bridges) must now fail. + if self.looptokens_wrefs is None: + # can't happen, but helps compiled tests + return wrefs = self.looptokens_wrefs self.looptokens_wrefs = [] for wref in wrefs: diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/resoperation.py pypy-5.0.1+dfsg/rpython/jit/metainterp/resoperation.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/resoperation.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/resoperation.py 2016-03-19 16:40:15.000000000 +0000 @@ -28,11 +28,7 @@ _repr_memo = CountingDict() is_info_class = False namespace = None - _attrs_ = ('datatype', 'bytesize', 'signed', 'count') - datatype = '\x00' - bytesize = -1 # -1 means the biggest size known to the machine - signed = True - count = -1 + _attrs_ = () def _get_hash_(self): return compute_identity_hash(self) @@ -96,104 +92,159 @@ elif op.is_guard(): assert not descr.final_descr op.setdescr(descr) - op.inittype() return op def VecOperation(opnum, args, baseop, count, descr=None): - datatype = baseop.datatype - bytesize = baseop.bytesize + vecinfo = baseop.get_forwarded() + assert vecinfo is not None + assert isinstance(vecinfo, VectorizationInfo) + datatype = vecinfo.datatype + bytesize = vecinfo.bytesize + signed = vecinfo.signed if baseop.is_typecast(): ft,tt = baseop.cast_types() datatype = tt bytesize = baseop.cast_to_bytesize() - return VecOperationNew(opnum, args, datatype, bytesize, baseop.signed, count, descr) + return VecOperationNew(opnum, args, datatype, bytesize, signed, count, descr) -def VecOperationNew(opnum, args, datateyp, bytesize, signed, count, descr=None): +def VecOperationNew(opnum, args, datatype, bytesize, signed, count, descr=None): op = ResOperation(opnum, args, descr) - op.datatype = datateyp - op.bytesize = bytesize - op.signed = signed - op.count = count + vecinfo = VectorizationInfo(None) + vecinfo.setinfo(datatype, bytesize, signed) + vecinfo.count = count + op.set_forwarded(vecinfo) + if isinstance(op,VectorOp): + op.datatype = datatype + op.bytesize = bytesize + op.signed = signed + op.count = count + else: + assert isinstance(op, VectorGuardOp) + op.datatype = datatype + op.bytesize = bytesize + op.signed = signed + op.count = count + assert op.count > 0 + + if not we_are_translated(): + # for the test suite + op._vec_debug_info = vecinfo return op -class Typed(object): - _mixin_ = True +def vector_repr(self, num): + if we_are_translated(): + # the set_forwarded solution is volatile, we CANNOT acquire + # the information (e.g. count, bytesize) here easily + return 'v' + str(num) + if hasattr(self, '_vec_debug_info'): + vecinfo = self._vec_debug_info + count = vecinfo.count + datatype = vecinfo.datatype + bytesize = vecinfo.bytesize + elif self.vector == -2: + count = self.count + datatype = self.datatype + bytesize = self.bytesize + else: + assert 0, "cannot debug print variable" + if self.opnum in (rop.VEC_UNPACK_I, rop.VEC_UNPACK_F): + return self.type + str(num) + return 'v%d[%dx%s%d]' % (num, count, datatype, + bytesize * 8) - def inittype(self): - if self.is_primitive_array_access(): +class VectorizationInfo(AbstractValue): + _attrs_ = ('datatype', 'bytesize', 'signed', 'count') + datatype = '\x00' + bytesize = -1 # -1 means the biggest size known to the machine + signed = True + count = -1 + + def __init__(self, op): + if op is None: + return + from rpython.jit.metainterp.history import Const + if isinstance(op, Const) or isinstance(op, AbstractInputArg): + self.setinfo(op.type, -1, op.type == 'i') + return + if op.is_primitive_array_access(): from rpython.jit.backend.llsupport.descr import ArrayDescr - descr = self.getdescr() + descr = op.getdescr() if not we_are_translated(): from rpython.jit.backend.llgraph.runner import _getdescr - descr = _getdescr(self) - type = self.type - self.bytesize = descr.get_item_size_in_bytes() - self.signed = descr.is_item_signed() - self.datatype = type - elif self.opnum == rop.INT_SIGNEXT: + descr = _getdescr(op) + type = op.type + bytesize = descr.get_item_size_in_bytes() + signed = descr.is_item_signed() + datatype = type + self.setinfo(datatype, bytesize, signed) + elif op.opnum == rop.INT_SIGNEXT: from rpython.jit.metainterp import history - arg0 = self.getarg(0) - arg1 = self.getarg(1) + arg0 = op.getarg(0) + arg1 = op.getarg(1) assert isinstance(arg1, history.ConstInt) - signed = True - if not arg0.is_constant(): - signed = arg0.signed - self.setdatatype('i', arg1.value, True) - elif self.is_typecast(): - ft,tt = self.cast_types() - self.setdatatype(tt, self.cast_to_bytesize(), tt == 'i') + self.setinfo('i', arg1.value, True) + elif op.is_typecast(): + ft,tt = op.cast_types() + bytesize = op.cast_to_bytesize() + self.setinfo(tt, bytesize, True) else: # pass through the type of the first input argument - type = self.type + type = op.type signed = type == 'i' bytesize = -1 - if self.numargs() > 0: + if op.numargs() > 0: i = 0 - arg = self.getarg(i) - while arg.is_constant() and i+1 < self.numargs(): + arg = op.getarg(i) + while arg.is_constant() and i+1 < op.numargs(): i += 1 - arg = self.getarg(i) - if arg.datatype != '\x00' and \ - arg.bytesize != -1: - type = arg.datatype - signed = arg.signed - bytesize = arg.bytesize - if self.returns_bool_result(): + arg = op.getarg(i) + if not arg.is_constant(): + vecinfo = arg.get_forwarded() + if vecinfo is not None and isinstance(vecinfo, VectorizationInfo): + if vecinfo.datatype != '\x00' and \ + vecinfo.bytesize != -1: + type = vecinfo.datatype + signed = vecinfo.signed + bytesize = vecinfo.bytesize + if op.returns_bool_result(): type = 'i' - self.setdatatype(type, bytesize, signed) - assert self.datatype != '\x00' + self.setinfo(type, bytesize, signed) - def setdatatype(self, data_type, bytesize, signed): - self.datatype = data_type + def setinfo(self, datatype, bytesize, signed): + self.datatype = datatype if bytesize == -1: - if data_type == 'i': + if datatype == 'i': bytesize = INT_WORD - elif data_type == 'f': + elif datatype == 'f': bytesize = FLOAT_WORD - elif data_type == 'v': + elif datatype == 'r': + bytesize = INT_WORD + elif datatype == 'v': bytesize = 0 + elif datatype == 'V': # input arg vector + bytesize = INT_WORD + else: + assert 0, "unknown datasize" self.bytesize = bytesize self.signed = signed - def typestr(self): - sign = '-' - if not self.signed: - sign = '+' - return 'Type(%s%s, %d)' % (sign, self.type, self.bytesize) -class AbstractResOpOrInputArg(AbstractValue, Typed): +class AbstractResOpOrInputArg(AbstractValue): _attrs_ = ('_forwarded',) - _forwarded = None # either another resop or OptInfo + _forwarded = None # either another resop or OptInfo def get_forwarded(self): return self._forwarded -def vector_repr(self, num): - if self.opnum in (rop.VEC_UNPACK_I, rop.VEC_UNPACK_F): - return self.type + str(num) - return 'v%d[%dx%s%d]' % (num, self.count, self.datatype, - self.bytesize * 8) + def set_forwarded(self, forwarded_to): + assert forwarded_to is not self + self._forwarded = forwarded_to + + def getdescr(self): + return None + def forget_value(self): + pass class AbstractResOp(AbstractResOpOrInputArg): """The central ResOperation class, representing one operation.""" @@ -209,7 +260,7 @@ boolreflex = -1 boolinverse = -1 vector = -1 # -1 means, no vector equivalent, -2 it is a vector statement - casts = ('\x00', -1, '\x00', -1, -1) + cls_casts = ('\x00', -1, '\x00', -1, -1) def getopnum(self): return self.opnum @@ -219,10 +270,6 @@ # return self is other or self.getarg(0).same_box(other) # return self is other - def set_forwarded(self, forwarded_to): - assert forwarded_to is not self - self._forwarded = forwarded_to - # methods implemented by the arity mixins # --------------------------------------- @@ -257,8 +304,8 @@ # methods implemented by ResOpWithDescr # ------------------------------------- - def getdescr(self): - return None + #def getdescr(self): -- in the base class, AbstractResOpOrInputArg + # return None def setdescr(self, descr): raise NotImplementedError @@ -284,10 +331,6 @@ if descr is DONT_CHANGE: descr = None newop = ResOperation(opnum, args, descr) - newop.datatype = self.datatype - newop.count = self.count - newop.bytesize = self.bytesize - newop.signed = self.signed if self.type != 'v': newop.copy_value_from(self) return newop @@ -369,6 +412,8 @@ return rop._JIT_DEBUG_FIRST <= self.getopnum() <= rop._JIT_DEBUG_LAST def is_always_pure(self): + # Tells whether an operation is pure based solely on the opcode. + # Other operations (e.g. getfield ops) may be pure in some cases are well. return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST def has_no_side_effect(self): @@ -391,9 +436,7 @@ return self.opnum in (rop.SAME_AS_I, rop.SAME_AS_F, rop.SAME_AS_R) def is_getfield(self): - return self.opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, - rop.GETFIELD_GC_R, rop.GETFIELD_GC_PURE_I, - rop.GETFIELD_GC_PURE_R, rop.GETFIELD_GC_PURE_F) + return self.opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R) def is_getarrayitem(self): return self.opnum in (rop.GETARRAYITEM_GC_I, rop.GETARRAYITEM_GC_F, @@ -472,19 +515,14 @@ def returns_bool_result(self): return self._cls_has_bool_result - def forget_value(self): - pass + #def forget_value(self): -- in the base class, AbstractResOpOrInputArg + # pass def is_label(self): return self.getopnum() == rop.LABEL def is_vector(self): - if self.getopnum() in (rop.VEC_UNPACK_I, rop.VEC_UNPACK_F): - arg = self.getarg(2) - from rpython.jit.metainterp.history import ConstInt - assert isinstance(arg, ConstInt) - return arg.value > 1 - return self.vector == -2 + return False def returns_void(self): return self.type == 'v' @@ -496,16 +534,16 @@ return False def cast_count(self, vec_reg_size): - return self.casts[4] + return self.cls_casts[4] def cast_types(self): - return self.casts[0], self.casts[2] + return self.cls_casts[0], self.cls_casts[2] def cast_to_bytesize(self): - return self.casts[3] + return self.cls_casts[3] def cast_from_bytesize(self): - return self.casts[1] + return self.cls_casts[1] def casts_up(self): return self.cast_to_bytesize() > self.cast_from_bytesize() @@ -514,7 +552,6 @@ # includes the cast as noop return self.cast_to_bytesize() <= self.cast_from_bytesize() - # =================== # Top of the hierachy # =================== @@ -573,6 +610,60 @@ newop.rd_frame_info_list = self.rd_frame_info_list return newop +class VectorGuardOp(GuardResOp): + bytesize = 0 + datatype = '\x00' + signed = True + count = 0 + + def copy_and_change(self, opnum, args=None, descr=None): + newop = GuardResOp.copy_and_change(self, opnum, args, descr) + assert isinstance(newop, VectorGuardOp) + newop.datatype = self.datatype + newop.bytesize = self.bytesize + newop.signed = self.signed + newop.count = self.count + return newop + +class VectorOp(ResOpWithDescr): + bytesize = 0 + datatype = '\x00' + signed = True + count = 0 + + def is_vector(self): + if self.getopnum() in (rop.VEC_UNPACK_I, rop.VEC_UNPACK_F): + arg = self.getarg(2) + from rpython.jit.metainterp.history import ConstInt + assert isinstance(arg, ConstInt) + return arg.value > 1 + return True + + def copy_and_change(self, opnum, args=None, descr=None): + newop = ResOpWithDescr.copy_and_change(self, opnum, args, descr) + assert isinstance(newop, VectorOp) + newop.datatype = self.datatype + newop.bytesize = self.bytesize + newop.signed = self.signed + newop.count = self.count + return newop + + def same_shape(self, other): + """ NOT_RPYTHON """ + myvecinfo = self.get_forwarded() + othervecinfo = other.get_forwarded() + if other.is_vector() != self.is_vector(): + return False + if myvecinfo.datatype != othervecinfo.datatype: + return False + if myvecinfo.bytesize != othervecinfo.bytesize: + return False + if myvecinfo.signed != othervecinfo.signed: + return False + if myvecinfo.count != othervecinfo.count: + return False + return True + # =========== # type mixins @@ -674,8 +765,8 @@ return True def cast_to(self): - to_type, size = self.casts[2], self.casts[3] - if self.casts[3] == 0: + to_type, size = self.cls_casts[2], self.cls_casts[3] + if self.cls_casts[3] == 0: if self.getopnum() == rop.INT_SIGNEXT: from rpython.jit.metainterp.history import ConstInt arg = self.getarg(1) @@ -686,7 +777,7 @@ return (to_type,size) def cast_from(self): - type, size, a, b = self.casts + type, size, a, b = self.cls_casts if size == -1: return self.bytesize return (type, size) @@ -703,7 +794,7 @@ return True def cast_types(self): - return self.casts[0], self.casts[2] + return self.cls_casts[0], self.cls_casts[2] def cast_to_bytesize(self): from rpython.jit.metainterp.history import ConstInt @@ -713,36 +804,16 @@ def cast_from_bytesize(self): arg = self.getarg(0) - return arg.bytesize + vecinfo = arg.get_forwarded() + if vecinfo is None or not isinstance(vecinfo, VectorizationInfo): + vecinfo = VectorizationInfo(arg) + return vecinfo.bytesize def cast_input_bytesize(self, vec_reg_size): return vec_reg_size # self.cast_from_bytesize() * self.cast_count(vec_reg_size) -class VectorOp(object): - _mixin_ = True - - def vector_bytesize(self): - assert self.count > 0 - return self.byte_size * self.count - - def same_shape(self, other): - """ NOT_RPYTHON """ - if other.is_vector() != self.is_vector(): - return False - if self.datatype != other.datatype: - return False - if self.bytesize != other.bytesize: - return False - if self.signed != other.signed: - return False - if self.count != other.count: - return False - return True - class AbstractInputArg(AbstractResOpOrInputArg): - def set_forwarded(self, forwarded_to): - self._forwarded = forwarded_to def repr(self, memo): try: @@ -755,18 +826,9 @@ def __repr__(self): return self.repr(self._repr_memo) - def getdescr(self): - return None - - def forget_value(self): - pass - def is_inputarg(self): return True - def initinputtype(self, cpu): - pass - class InputArgInt(IntOp, AbstractInputArg): def __init__(self, intval=0): self.setint(intval) @@ -793,7 +855,7 @@ def reset_value(self): self.setref_base(lltype.nullptr(llmemory.GCREF.TO)) -class InputArgVector(VectorOp, AbstractInputArg): +class InputArgVector(AbstractInputArg): type = 'V' def __init__(self): pass @@ -968,6 +1030,8 @@ '_GUARD_FOLDABLE_FIRST', 'GUARD_TRUE/1d/n', 'GUARD_FALSE/1d/n', + 'VEC_GUARD_TRUE/1d/n', + 'VEC_GUARD_FALSE/1d/n', 'GUARD_VALUE/2d/n', 'GUARD_CLASS/2d/n', 'GUARD_NONNULL/1d/n', @@ -1090,7 +1154,6 @@ 'ARRAYLEN_GC/1d/i', 'STRLEN/1/i', 'STRGETITEM/2/i', - 'GETFIELD_GC_PURE/1d/rfi', 'GETARRAYITEM_GC_PURE/2d/rfi', #'GETFIELD_RAW_PURE/1d/rfi', these two operations not useful and #'GETARRAYITEM_RAW_PURE/2d/fi', dangerous when unrolling speculatively @@ -1099,6 +1162,20 @@ # '_ALWAYS_PURE_LAST', # ----- end of always_pure operations ----- + # parameters GC_LOAD + # 1: pointer to complex object + # 2: integer describing the offset + # 3: constant integer. byte size of datatype to load (negative if it is signed) + 'GC_LOAD/3/rfi', + # parameters GC_LOAD_INDEXED + # 1: pointer to complex object + # 2: integer describing the index + # 3: constant integer scale factor + # 4: constant integer base offset (final offset is 'base + scale * index') + # 5: constant integer. byte size of datatype to load (negative if it is signed) + # (GC_LOAD is equivalent to GC_LOAD_INDEXED with arg3==1, arg4==0) + 'GC_LOAD_INDEXED/5/rfi', + '_RAW_LOAD_FIRST', 'GETARRAYITEM_GC/2d/rfi', 'VEC_GETARRAYITEM_GC/2d/fi', @@ -1125,6 +1202,16 @@ # must be forced, however we need to execute it anyway '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- + # same paramters as GC_LOAD, but one additional for the value to store + # note that the itemsize is not signed (always > 0) + # (gcptr, index, value, [scale, base_offset,] itemsize) + # invariants for GC_STORE: index is constant, but can be large + # invariants for GC_STORE_INDEXED: index is a non-constant box; + # scale is a constant; + # base_offset is a small constant + 'GC_STORE/4d/n', + 'GC_STORE_INDEXED/6d/n', + 'INCREMENT_DEBUG_COUNTER/1/n', '_RAW_STORE_FIRST', 'SETARRAYITEM_GC/3d/n', @@ -1137,8 +1224,6 @@ 'SETINTERIORFIELD_GC/3d/n', 'SETINTERIORFIELD_RAW/3d/n', # right now, only used by tests 'SETFIELD_GC/2d/n', - 'ZERO_PTR_FIELD/2/n', # only emitted by the rewrite, clears a pointer field - # at a given constant offset, no descr 'ZERO_ARRAY/3d/n', # only emitted by the rewrite, clears (part of) an array # [arraygcptr, firstindex, length], descr=ArrayDescr 'SETFIELD_RAW/2d/n', @@ -1284,16 +1369,20 @@ } is_guard = name.startswith('GUARD') - if is_guard: + if name.startswith('VEC'): + if name.startswith('VEC_GUARD'): + baseclass = VectorGuardOp + else: + baseclass = VectorOp + elif is_guard: assert withdescr baseclass = GuardResOp elif withdescr: baseclass = ResOpWithDescr else: baseclass = PlainResOp + mixins = [arity2mixin.get(arity, N_aryOp)] - if name.startswith('VEC'): - mixins.append(VectorOp) if result_type == 'i': mixins.append(IntOp) elif result_type == 'f': @@ -1403,8 +1492,8 @@ rop.CAST_FLOAT_TO_INT: rop.VEC_CAST_FLOAT_TO_INT, # guard - rop.GUARD_TRUE: rop.GUARD_TRUE, - rop.GUARD_FALSE: rop.GUARD_FALSE, + rop.GUARD_TRUE: rop.VEC_GUARD_TRUE, + rop.GUARD_FALSE: rop.VEC_GUARD_FALSE, } def setup2(): @@ -1420,7 +1509,7 @@ if opnum in _opvector: cls.vector = _opvector[opnum] if name in _cast_ops: - cls.casts = _cast_ops[name] + cls.cls_casts = _cast_ops[name] if name.startswith('VEC'): cls.vector = -2 setup2() @@ -1512,14 +1601,6 @@ return rop.CALL_LOOPINVARIANT_N @staticmethod - def getfield_pure_for_descr(descr): - if descr.is_pointer_field(): - return rop.GETFIELD_GC_PURE_R - elif descr.is_float_field(): - return rop.GETFIELD_GC_PURE_F - return rop.GETFIELD_GC_PURE_I - - @staticmethod def getfield_for_descr(descr): if descr.is_pointer_field(): return rop.GETFIELD_GC_R @@ -1603,6 +1684,26 @@ opnum == rop.CALL_RELEASE_GIL_N) @staticmethod + def get_gc_load(tp): + if tp == 'i': + return rop.GC_LOAD_I + elif tp == 'f': + return rop.GC_LOAD_F + else: + assert tp == 'r' + return rop.GC_LOAD_R + + @staticmethod + def get_gc_load_indexed(tp): + if tp == 'i': + return rop.GC_LOAD_INDEXED_I + elif tp == 'f': + return rop.GC_LOAD_INDEXED_F + else: + assert tp == 'r' + return rop.GC_LOAD_INDEXED_R + + @staticmethod def inputarg_from_tp(tp): if tp == 'i': return InputArgInt() @@ -1650,4 +1751,26 @@ opnum = rop.VEC_UNPACK_F return VecOperationNew(opnum, args, datatype, bytesize, signed, count) + @staticmethod + def is_pure_getfield(opnum, descr): + if (opnum == rop.GETFIELD_GC_I or + opnum == rop.GETFIELD_GC_F or + opnum == rop.GETFIELD_GC_R): + return descr is not None and descr.is_always_pure() + return False + + @staticmethod + def is_pure_with_descr(opnum, descr): + is_pure = rop._ALWAYS_PURE_FIRST <= opnum <= rop._ALWAYS_PURE_LAST + if not is_pure: + if (opnum == rop.GETFIELD_RAW_I or + opnum == rop.GETFIELD_RAW_R or + opnum == rop.GETFIELD_RAW_F or + opnum == rop.GETFIELD_GC_I or + opnum == rop.GETFIELD_GC_R or + opnum == rop.GETFIELD_GC_F or + opnum == rop.GETARRAYITEM_RAW_I or + opnum == rop.GETARRAYITEM_RAW_F): + is_pure = descr.is_always_pure() + return is_pure diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/resumecode.py pypy-5.0.1+dfsg/rpython/jit/metainterp/resumecode.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/resumecode.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/resumecode.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,79 @@ + +""" Resume bytecode. It goes as following: + + [ ] if vinfo is not None + -OR- + [1 ] if ginfo is not None + -OR- + [0] if both are None + + [ ] for virtualrefs + + [ ] the frames + [ ] + ... + + until the length of the array. +""" + +from rpython.rtyper.lltypesystem import rffi, lltype + +NUMBERINGP = lltype.Ptr(lltype.GcForwardReference()) +NUMBERING = lltype.GcStruct('Numbering', + ('code', lltype.Array(rffi.UCHAR))) +NUMBERINGP.TO.become(NUMBERING) +NULL_NUMBER = lltype.nullptr(NUMBERING) + +def create_numbering(lst): + result = [] + for item in lst: + item = rffi.cast(lltype.Signed, item) + item *= 2 + if item < 0: + item = -1 - item + + assert item >= 0 + if item < 2**7: + result.append(rffi.cast(rffi.UCHAR, item)) + elif item < 2**14: + result.append(rffi.cast(rffi.UCHAR, item | 0x80)) + result.append(rffi.cast(rffi.UCHAR, item >> 7)) + else: + assert item < 2**16 + result.append(rffi.cast(rffi.UCHAR, item | 0x80)) + result.append(rffi.cast(rffi.UCHAR, (item >> 7) | 0x80)) + result.append(rffi.cast(rffi.UCHAR, item >> 14)) + + numb = lltype.malloc(NUMBERING, len(result)) + for i in range(len(result)): + numb.code[i] = result[i] + return numb + +def numb_next_item(numb, index): + value = rffi.cast(lltype.Signed, numb.code[index]) + index += 1 + if value & (2**7): + value &= 2**7 - 1 + value |= rffi.cast(lltype.Signed, numb.code[index]) << 7 + index += 1 + if value & (2**14): + value &= 2**14 - 1 + value |= rffi.cast(lltype.Signed, numb.code[index]) << 14 + index += 1 + if value & 1: + value = -1 - value + value >>= 1 + return value, index + +def numb_next_n_items(numb, size, index): + for i in range(size): + _, index = numb_next_item(numb, index) + return index + +def unpack_numbering(numb): + l = [] + i = 0 + while i < len(numb.code): + next, i = numb_next_item(numb, i) + l.append(next) + return l diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/resume.py pypy-5.0.1+dfsg/rpython/jit/metainterp/resume.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/resume.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/resume.py 2016-03-19 16:40:12.000000000 +0000 @@ -12,6 +12,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr from rpython.rtyper.rclass import OBJECTPTR from rpython.jit.metainterp.walkvirtual import VirtualVisitor +from rpython.jit.metainterp import resumecode # Logic to encode the chain of frames and the state of the boxes at a @@ -26,13 +27,28 @@ self.prev = prev self.boxes = boxes +class TopSnapshot(Snapshot): + __slots__ = ('vable_boxes',) + + def __init__(self, prev, boxes, vable_boxes): + Snapshot.__init__(self, prev, boxes) + self.vable_boxes = vable_boxes + +def combine_uint(index1, index2): + assert 0 <= index1 < 65536 + assert 0 <= index2 < 65536 + return index1 << 16 | index2 # it's ok to return signed here, + # we need only 32bit, but 64 is ok for now + +def unpack_uint(packed): + return (packed >> 16) & 0xffff, packed & 0xffff + class FrameInfo(object): - __slots__ = ('prev', 'jitcode', 'pc') + __slots__ = ('prev', 'packed_jitcode_pc') def __init__(self, prev, jitcode, pc): self.prev = prev - self.jitcode = jitcode - self.pc = pc + self.packed_jitcode_pc = combine_uint(jitcode.index, pc) class VectorInfo(object): """ @@ -102,7 +118,8 @@ return back = framestack[n - 1] if target.parent_resumedata_frame_info_list is not None: - assert target.parent_resumedata_frame_info_list.pc == back.pc + _, pc = unpack_uint(target.parent_resumedata_frame_info_list.packed_jitcode_pc) + assert pc == back.pc return _ensure_parent_resumedata(framestack, n - 1) target.parent_resumedata_frame_info_list = FrameInfo( @@ -117,9 +134,11 @@ snapshot_storage): n = len(framestack) - 1 if virtualizable_boxes is not None: - boxes = virtualref_boxes + virtualizable_boxes + virtualizable_boxes = ([virtualizable_boxes[-1]] + + virtualizable_boxes[:-1]) else: - boxes = virtualref_boxes[:] + virtualizable_boxes = [] + virtualref_boxes = virtualref_boxes[:] if n >= 0: top = framestack[n] _ensure_parent_resumedata(framestack, n) @@ -128,27 +147,12 @@ snapshot_storage.rd_frame_info_list = frame_info_list snapshot = Snapshot(top.parent_resumedata_snapshot, top.get_list_of_active_boxes(False)) - snapshot = Snapshot(snapshot, boxes) + snapshot = TopSnapshot(snapshot, virtualref_boxes, virtualizable_boxes) snapshot_storage.rd_snapshot = snapshot else: snapshot_storage.rd_frame_info_list = None - snapshot_storage.rd_snapshot = Snapshot(None, boxes) - -# -# The following is equivalent to the RPython-level declaration: -# -# class Numbering: __slots__ = ['prev', 'nums'] -# -# except that it is more compact in translated programs, because the -# array 'nums' is inlined in the single NUMBERING object. This is -# important because this is often the biggest single consumer of memory -# in a pypy-c-jit. -# -NUMBERINGP = lltype.Ptr(lltype.GcForwardReference()) -NUMBERING = lltype.GcStruct('Numbering', - ('prev', NUMBERINGP), - ('nums', lltype.Array(rffi.SHORT))) -NUMBERINGP.TO.become(NUMBERING) + snapshot_storage.rd_snapshot = TopSnapshot(None, virtualref_boxes, + virtualizable_boxes) PENDINGFIELDSTRUCT = lltype.Struct('PendingField', ('lldescr', OBJECTPTR), @@ -195,7 +199,28 @@ UNASSIGNEDVIRTUAL = tag(-1 << 13, TAGVIRTUAL) NULLREF = tag(-1, TAGCONST) UNINITIALIZED = tag(-2, TAGCONST) # used for uninitialized string characters +TAG_CONST_OFFSET = 0 +class NumberingState(object): + def __init__(self, snapshot_list): + self.liveboxes = {} + self.current = [rffi.cast(rffi.SHORT, 0)] * self.count_boxes(snapshot_list) + self.position = len(self.current) + self.n = 0 + self.v = 0 + + def count_boxes(self, lst): + snapshot = lst[0] + assert isinstance(snapshot, TopSnapshot) + c = len(snapshot.vable_boxes) + for snapshot in lst: + c += len(snapshot.boxes) + c += 2 * (len(lst) - 1) + 1 + 1 + return c + + def append(self, item): + self.current[self.position] = item + self.position += 1 class ResumeDataLoopMemo(object): @@ -242,24 +267,19 @@ return self._newconst(const) def _newconst(self, const): - result = tag(len(self.consts), TAGCONST) + result = tag(len(self.consts) + TAG_CONST_OFFSET, TAGCONST) self.consts.append(const) return result # env numbering - def number(self, optimizer, snapshot): - if snapshot is None: - return lltype.nullptr(NUMBERING), {}, 0 - if snapshot in self.numberings: - numb, liveboxes, v = self.numberings[snapshot] - return numb, liveboxes.copy(), v - - numb1, liveboxes, v = self.number(optimizer, snapshot.prev) - n = len(liveboxes) - v - boxes = snapshot.boxes + def _number_boxes(self, boxes, optimizer, state): + """ Number boxes from one snapshot + """ + n = state.n + v = state.v + liveboxes = state.liveboxes length = len(boxes) - numb = lltype.malloc(NUMBERING, length) for i in range(length): box = boxes[i] box = optimizer.get_box_replacement(box) @@ -283,12 +303,50 @@ tagged = tag(n, TAGBOX) n += 1 liveboxes[box] = tagged - numb.nums[i] = tagged - # - numb.prev = numb1 - self.numberings[snapshot] = numb, liveboxes, v - return numb, liveboxes.copy(), v + state.append(tagged) + state.n = n + state.v = v + + def number(self, optimizer, topsnapshot, frameinfo): + # flatten the list + cur = topsnapshot.prev + snapshot_list = [topsnapshot] + framestack_list = [] + while cur: + framestack_list.append(frameinfo) + frameinfo = frameinfo.prev + snapshot_list.append(cur) + cur = cur.prev + state = NumberingState(snapshot_list) + + # we want to number snapshots starting from the back, but ending + # with a forward list + for i in range(len(snapshot_list) - 1, 0, -1): + state.position -= len(snapshot_list[i].boxes) + 2 + frameinfo = framestack_list[i - 1] + jitcode_pos, pc = unpack_uint(frameinfo.packed_jitcode_pc) + state.append(rffi.cast(rffi.SHORT, jitcode_pos)) + state.append(rffi.cast(rffi.SHORT, pc)) + self._number_boxes(snapshot_list[i].boxes, optimizer, state) + state.position -= len(snapshot_list[i].boxes) + 2 + + assert isinstance(topsnapshot, TopSnapshot) + special_boxes_size = (1 + len(topsnapshot.vable_boxes) + + 1 + len(topsnapshot.boxes)) + assert state.position == special_boxes_size + + state.position = 0 + state.append(rffi.cast(rffi.SHORT, len(topsnapshot.vable_boxes))) + self._number_boxes(topsnapshot.vable_boxes, optimizer, state) + n = len(topsnapshot.boxes) + assert not (n & 1) + state.append(rffi.cast(rffi.SHORT, n >> 1)) + self._number_boxes(topsnapshot.boxes, optimizer, state) + assert state.position == special_boxes_size + numb = resumecode.create_numbering(state.current) + return numb, state.liveboxes, state.v + def forget_numberings(self): # XXX ideally clear only the affected numberings self.numberings.clear() @@ -430,13 +488,14 @@ assert not storage.rd_numb snapshot = self.snapshot_storage.rd_snapshot assert snapshot is not None # is that true? - numb, liveboxes_from_env, v = self.memo.number(optimizer, snapshot) + # count stack depth + numb, liveboxes_from_env, v = self.memo.number(optimizer, snapshot, + self.snapshot_storage.rd_frame_info_list) self.liveboxes_from_env = liveboxes_from_env self.liveboxes = {} storage.rd_numb = numb self.snapshot_storage.rd_snapshot = None - storage.rd_frame_info_list = self.snapshot_storage.rd_frame_info_list - + # collect liveboxes and virtuals n = len(liveboxes_from_env) - v liveboxes = [None] * n @@ -531,7 +590,7 @@ if self._invalidation_needed(len(liveboxes), nholes): memo.clear_box_virtual_numbers() - + def _invalidation_needed(self, nliveboxes, nholes): memo = self.memo # xxx heuristic a bit out of thin air @@ -921,7 +980,8 @@ def _init(self, cpu, storage): self.cpu = cpu - self.cur_numb = storage.rd_numb + self.numb = storage.rd_numb + self.cur_index = 0 self.count = storage.rd_count self.consts = storage.rd_consts @@ -929,6 +989,16 @@ self._prepare_virtuals(storage.rd_virtuals) self._prepare_pendingfields(storage.rd_pendingfields) + def read_jitcode_pos_pc(self): + jitcode_pos, self.cur_index = resumecode.numb_next_item(self.numb, + self.cur_index) + pc, self.cur_index = resumecode.numb_next_item(self.numb, + self.cur_index) + return jitcode_pos, pc + + def done_reading(self): + return self.cur_index >= len(self.numb.code) + def getvirtual_ptr(self, index): # Returns the index'th virtual, building it lazily if needed. # Note that this may be called recursively; that's why the @@ -1004,23 +1074,29 @@ def _prepare_next_section(self, info): # Use info.enumerate_vars(), normally dispatching to # rpython.jit.codewriter.jitcode. Some tests give a different 'info'. - info.enumerate_vars(self._callback_i, - self._callback_r, - self._callback_f, - self.unique_id) # <-- annotation hack - self.cur_numb = self.cur_numb.prev + self.cur_index = info.enumerate_vars(self._callback_i, + self._callback_r, + self._callback_f, + self.unique_id, # <-- annotation hack + self.cur_index) def _callback_i(self, index, register_index): - value = self.decode_int(self.cur_numb.nums[index]) + item, index = resumecode.numb_next_item(self.numb, index) + value = self.decode_int(item) self.write_an_int(register_index, value) + return index def _callback_r(self, index, register_index): - value = self.decode_ref(self.cur_numb.nums[index]) + item, index = resumecode.numb_next_item(self.numb, index) + value = self.decode_ref(item) self.write_a_ref(register_index, value) + return index def _callback_f(self, index, register_index): - value = self.decode_float(self.cur_numb.nums[index]) + item, index = resumecode.numb_next_item(self.numb, index) + value = self.decode_float(item) self.write_a_float(register_index, value) + return index # ---------- when resuming for pyjitpl.py, make boxes ---------- @@ -1030,15 +1106,13 @@ boxes = resumereader.consume_vref_and_vable_boxes(virtualizable_info, greenfield_info) virtualizable_boxes, virtualref_boxes = boxes - frameinfo = storage.rd_frame_info_list - while True: - f = metainterp.newframe(frameinfo.jitcode) - f.setup_resume_at_op(frameinfo.pc) + while not resumereader.done_reading(): + jitcode_pos, pc = resumereader.read_jitcode_pos_pc() + jitcode = metainterp.staticdata.jitcodes[jitcode_pos] + f = metainterp.newframe(jitcode) + f.setup_resume_at_op(pc) resumereader.consume_boxes(f.get_current_position_info(), f.registers_i, f.registers_r, f.registers_f) - frameinfo = frameinfo.prev - if frameinfo is None: - break metainterp.framestack.reverse() return resumereader.liveboxes, virtualizable_boxes, virtualref_boxes @@ -1060,36 +1134,42 @@ self.boxes_f = boxes_f self._prepare_next_section(info) - def consume_virtualizable_boxes(self, vinfo, numb): + def consume_virtualizable_boxes(self, vinfo, index): # we have to ignore the initial part of 'nums' (containing vrefs), # find the virtualizable from nums[-1], and use it to know how many # boxes of which type we have to return. This does not write # anything into the virtualizable. - index = len(numb.nums) - 1 - virtualizablebox = self.decode_ref(numb.nums[index]) + numb = self.numb + item, index = resumecode.numb_next_item(numb, index) + virtualizablebox = self.decode_ref(item) virtualizable = vinfo.unwrap_virtualizable_box(virtualizablebox) - return vinfo.load_list_of_boxes(virtualizable, self, numb) + return vinfo.load_list_of_boxes(virtualizable, self, virtualizablebox, + numb, index) - def consume_virtualref_boxes(self, numb, end): + def consume_virtualref_boxes(self, index): # Returns a list of boxes, assumed to be all BoxPtrs. # We leave up to the caller to call vrefinfo.continue_tracing(). - assert (end & 1) == 0 - return [self.decode_ref(numb.nums[i]) for i in range(end)] + size, index = resumecode.numb_next_item(self.numb, index) + if size == 0: + return [], index + lst = [] + for i in range(size * 2): + item, index = resumecode.numb_next_item(self.numb, index) + lst.append(self.decode_ref(item)) + return lst, index def consume_vref_and_vable_boxes(self, vinfo, ginfo): - numb = self.cur_numb - self.cur_numb = numb.prev + vable_size, index = resumecode.numb_next_item(self.numb, 0) if vinfo is not None: - virtualizable_boxes = self.consume_virtualizable_boxes(vinfo, numb) - end = len(numb.nums) - len(virtualizable_boxes) + virtualizable_boxes, index = self.consume_virtualizable_boxes(vinfo, + index) elif ginfo is not None: - index = len(numb.nums) - 1 - virtualizable_boxes = [self.decode_ref(numb.nums[index])] - end = len(numb.nums) - 1 + item, index = resumecode.numb_next_item(self.numb, index) + virtualizable_boxes = [self.decode_ref(item)] else: virtualizable_boxes = None - end = len(numb.nums) - virtualref_boxes = self.consume_virtualref_boxes(numb, end) + virtualref_boxes, index = self.consume_virtualref_boxes(index) + self.cur_index = index return virtualizable_boxes, virtualref_boxes def allocate_with_vtable(self, descr=None): @@ -1228,7 +1308,7 @@ if tagged_eq(tagged, NULLREF): box = self.cpu.ts.CONST_NULL else: - box = self.consts[num] + box = self.consts[num - TAG_CONST_OFFSET] elif tag == TAGVIRTUAL: if kind == INT: box = self.getvirtual_int(num) @@ -1287,7 +1367,8 @@ # ---------- when resuming for blackholing, get direct values ---------- -def blackhole_from_resumedata(blackholeinterpbuilder, jitdriver_sd, storage, +def blackhole_from_resumedata(blackholeinterpbuilder, jitcodes, + jitdriver_sd, storage, deadframe, all_virtuals=None): # The initialization is stack-critical code: it must not be interrupted by # StackOverflow, otherwise the jit_virtual_refs are left in a dangling state. @@ -1303,30 +1384,24 @@ rstack._stack_criticalcode_stop() # # First get a chain of blackhole interpreters whose length is given - # by the depth of rd_frame_info_list. The first one we get must be + # by the positions in the numbering. The first one we get must be # the bottom one, i.e. the last one in the chain, in order to make # the comment in BlackholeInterpreter.setposition() valid. - nextbh = None - frameinfo = storage.rd_frame_info_list - while True: + prevbh = None + firstbh = None + curbh = None + while not resumereader.done_reading(): curbh = blackholeinterpbuilder.acquire_interp() - curbh.nextblackholeinterp = nextbh - nextbh = curbh - frameinfo = frameinfo.prev - if frameinfo is None: - break - firstbh = nextbh - # - # Now fill the blackhole interpreters with resume data. - curbh = firstbh - frameinfo = storage.rd_frame_info_list - while True: - curbh.setposition(frameinfo.jitcode, frameinfo.pc) + if prevbh is not None: + prevbh.nextblackholeinterp = curbh + else: + firstbh = curbh + prevbh = curbh + jitcode_pos, pc = resumereader.read_jitcode_pos_pc() + jitcode = jitcodes[jitcode_pos] + curbh.setposition(jitcode, pc) resumereader.consume_one_section(curbh) - curbh = curbh.nextblackholeinterp - frameinfo = frameinfo.prev - if frameinfo is None: - break + curbh.nextblackholeinterp = None return firstbh def force_from_resumedata(metainterp_sd, storage, deadframe, vinfo, ginfo): @@ -1369,30 +1444,36 @@ info = blackholeinterp.get_current_position_info() self._prepare_next_section(info) - def consume_virtualref_info(self, vrefinfo, numb, end): + def consume_virtualref_info(self, vrefinfo, index): # we have to decode a list of references containing pairs - # [..., virtual, vref, ...] stopping at 'end' - if vrefinfo is None: - assert end == 0 - return - assert (end & 1) == 0 - for i in range(0, end, 2): - virtual = self.decode_ref(numb.nums[i]) - vref = self.decode_ref(numb.nums[i + 1]) + # [..., virtual, vref, ...] and returns the index at the end + size, index = resumecode.numb_next_item(self.numb, index) + if vrefinfo is None or size == 0: + assert size == 0 + return index + for i in range(size): + virtual_item, index = resumecode.numb_next_item( + self.numb, index) + vref_item, index = resumecode.numb_next_item( + self.numb, index) + virtual = self.decode_ref(virtual_item) + vref = self.decode_ref(vref_item) # For each pair, we store the virtual inside the vref. vrefinfo.continue_tracing(vref, virtual) + return index - def consume_vable_info(self, vinfo, numb): + def consume_vable_info(self, vinfo, index): # we have to ignore the initial part of 'nums' (containing vrefs), # find the virtualizable from nums[-1], load all other values # from the CPU stack, and copy them into the virtualizable - if vinfo is None: - return len(numb.nums) - index = len(numb.nums) - 1 - virtualizable = self.decode_ref(numb.nums[index]) + numb = self.numb + item, index = resumecode.numb_next_item(self.numb, index) + virtualizable = self.decode_ref(item) # just reset the token, we'll force it later vinfo.reset_token_gcref(virtualizable) - return vinfo.write_from_resume_data_partial(virtualizable, self, numb) + index = vinfo.write_from_resume_data_partial(virtualizable, self, + index, numb) + return index def load_value_of_type(self, TYPE, tagged): from rpython.jit.metainterp.warmstate import specialize_value @@ -1409,13 +1490,18 @@ load_value_of_type._annspecialcase_ = 'specialize:arg(1)' def consume_vref_and_vable(self, vrefinfo, vinfo, ginfo): - numb = self.cur_numb - self.cur_numb = numb.prev + vable_size, index = resumecode.numb_next_item(self.numb, 0) if self.resume_after_guard_not_forced != 2: - end_vref = self.consume_vable_info(vinfo, numb) + if vinfo is not None: + index = self.consume_vable_info(vinfo, index) if ginfo is not None: - end_vref -= 1 - self.consume_virtualref_info(vrefinfo, numb, end_vref) + _, index = resumecode.numb_next_item(self.numb, index) + index = self.consume_virtualref_info(vrefinfo, index) + else: + index = resumecode.numb_next_n_items(self.numb, vable_size, index) + vref_size, index = resumecode.numb_next_item(self.numb, index) + index = resumecode.numb_next_n_items(self.numb, vref_size * 2, index) + self.cur_index = index def allocate_with_vtable(self, descr=None): from rpython.jit.metainterp.executor import exec_new_with_vtable @@ -1534,7 +1620,7 @@ def decode_int(self, tagged): num, tag = untag(tagged) if tag == TAGCONST: - return self.consts[num].getint() + return self.consts[num - TAG_CONST_OFFSET].getint() elif tag == TAGINT: return num elif tag == TAGVIRTUAL: @@ -1550,7 +1636,7 @@ if tag == TAGCONST: if tagged_eq(tagged, NULLREF): return self.cpu.ts.NULLREF - return self.consts[num].getref_base() + return self.consts[num - TAG_CONST_OFFSET].getref_base() elif tag == TAGVIRTUAL: return self.getvirtual_ptr(num) else: @@ -1562,7 +1648,7 @@ def decode_float(self, tagged): num, tag = untag(tagged) if tag == TAGCONST: - return self.consts[num].getfloatstorage() + return self.consts[num - TAG_CONST_OFFSET].getfloatstorage() else: assert tag == TAGBOX if num < 0: diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/strategies.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/strategies.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/strategies.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/strategies.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,13 @@ + +import sys +from hypothesis import strategies +from rpython.jit.metainterp.resoperation import InputArgInt +from rpython.jit.metainterp.history import ConstInt + +machine_ints = strategies.integers(min_value=-sys.maxint - 1, + max_value=sys.maxint) +intboxes = strategies.builds(InputArgInt) +intconsts = strategies.builds(ConstInt, machine_ints) +boxes = intboxes | intconsts +boxlists = strategies.lists(boxes, min_size=1).flatmap( + lambda cis: strategies.lists(strategies.sampled_from(cis))) \ No newline at end of file diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_ajit.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_ajit.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_ajit.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_ajit.py 2016-03-19 16:40:15.000000000 +0000 @@ -320,7 +320,7 @@ assert res == 252 self.check_trace_count(1) self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, - 'getfield_gc_pure_i': 1, 'int_mul': 1, + 'getfield_gc_i': 1, 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) def test_loops_are_transient(self): @@ -1199,6 +1199,31 @@ (-sys.maxint-1) // (-6) + 100 * 8) + def test_overflow_fold_if_divisor_constant(self): + import sys + from rpython.rtyper.lltypesystem.lloperation import llop + myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res']) + def f(x, y): + res = 0 + while y > 0: + myjitdriver.can_enter_jit(x=x, y=y, res=res) + myjitdriver.jit_merge_point(x=x, y=y, res=res) + try: + res += llop.int_floordiv_ovf(lltype.Signed, + x, 2) + res += llop.int_mod_ovf(lltype.Signed, + x, 2) + x += 5 + except OverflowError: + res += 100 + y -= 1 + return res + res = self.meta_interp(f, [-41, 8]) + # the guard_true are for the loop condition + # the guard_false needed to check whether an overflow can occur have + # been folded away + self.check_resops(guard_true=2, guard_false=0) + def test_isinstance(self): class A: pass @@ -1405,7 +1430,7 @@ return tup[1] res = self.interp_operations(f, [3, 5]) assert res == 5 - self.check_operations_history(setfield_gc=2, getfield_gc_pure_i=0) + self.check_operations_history(setfield_gc=2, getfield_gc_i=0) def test_oosend_look_inside_only_one(self): class A: @@ -2522,7 +2547,7 @@ if counter > 10: return 7 assert self.meta_interp(build, []) == 7 - self.check_resops(getfield_gc_pure_r=2) + self.check_resops(getfield_gc_r=2) def test_args_becomming_equal(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', 'b']) @@ -4044,7 +4069,7 @@ self.interp_operations(f, []) def test_external_call(self): - from rpython.rlib.objectmodel import invoke_around_extcall + from rpython.rlib import rgil TIME_T = lltype.Signed # ^^^ some 32-bit platforms have a 64-bit rffi.TIME_T, but we @@ -4058,11 +4083,6 @@ pass state = State() - def before(): - if we_are_jitted(): - raise Oups - state.l.append("before") - def after(): if we_are_jitted(): raise Oups @@ -4070,14 +4090,14 @@ def f(): state.l = [] - invoke_around_extcall(before, after) + rgil.invoke_after_thread_switch(after) external(lltype.nullptr(T.TO)) return len(state.l) res = self.interp_operations(f, []) - assert res == 2 + assert res == 1 res = self.interp_operations(f, []) - assert res == 2 + assert res == 1 self.check_operations_history(call_release_gil_i=1, call_may_force_i=0) def test_unescaped_write_zero(self): @@ -4357,3 +4377,30 @@ assert res == -1 else: assert res == 4294967295 + + def test_issue2200_recursion(self): + # Reproduces issue #2200. This test contains no recursion, + # but due to an unlikely combination of factors it ends up + # creating an RPython-level recursion, one per loop iteration. + # The recursion is: blackhole interp from the failing guard -> + # does the call to enter() as a normal call -> enter() runs + # can_enter_jit() as if we're interpreted -> we enter the JIT + # again from the start of the loop -> the guard fails again + # during the next iteration -> blackhole interp. All arrows + # in the previous sentence are one or more levels of RPython + # function calls. + driver = JitDriver(greens=[], reds=["i"]) + def enter(i): + driver.can_enter_jit(i=i) + def f(): + set_param(None, 'trace_eagerness', 999999) + i = 0 + while True: + driver.jit_merge_point(i=i) + i += 1 + if i >= 300: + return i + promote(i + 1) # a failing guard + enter(i) + + self.meta_interp(f, []) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_fficall.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_fficall.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_fficall.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_fficall.py 2016-03-19 16:40:12.000000000 +0000 @@ -49,6 +49,7 @@ expected_call_release_gil_i=1, expected_call_release_gil_f=0, expected_call_release_gil_n=0, + expected_call_may_force_f=0, supports_floats=True, supports_longlong=False, supports_singlefloats=False): @@ -151,7 +152,7 @@ res = float2longlong(res) assert matching_result(res, rvalue) self.check_operations_history(call_may_force_i=0, - call_may_force_f=0, + call_may_force_f=expected_call_may_force_f, call_may_force_n=0, call_release_gil_i=expected_call_release_gil_i, call_release_gil_f=expected_call_release_gil_f, @@ -374,3 +375,13 @@ def test_simple_call_singlefloat_unsupported(self): self.test_simple_call_singlefloat(supports_singlefloats=False, expected_call_release_gil=0) + + def test_calldescrof_dynamic_returning_none(self): + from rpython.jit.backend.llgraph.runner import LLGraphCPU + old = LLGraphCPU.calldescrof_dynamic + try: + LLGraphCPU.calldescrof_dynamic = lambda *args: None + self.test_simple_call_float(expected_call_release_gil=0, + expected_call_may_force_f=1) + finally: + LLGraphCPU.calldescrof_dynamic = old diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_immutable.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_immutable.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_immutable.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_immutable.py 2016-03-19 16:40:12.000000000 +0000 @@ -19,7 +19,7 @@ return y.x + 5 res = self.interp_operations(f, [23]) assert res == 28 - self.check_operations_history(getfield_gc_i=0, getfield_gc_pure_i=1, int_add=1) + self.check_operations_history(getfield_gc_i=1, int_add=1) def test_fields_subclass(self): class X(object): @@ -41,8 +41,7 @@ return z.x + z.y + 5 res = self.interp_operations(f, [23, 11]) assert res == 39 - self.check_operations_history(getfield_gc_i=0, getfield_gc_pure_i=2, - int_add=2) + self.check_operations_history(getfield_gc_i=2, int_add=2) def f(x, y): # this time, the field 'x' only shows up on subclass 'Y' @@ -50,8 +49,7 @@ return z.x + z.y + 5 res = self.interp_operations(f, [23, 11]) assert res == 39 - self.check_operations_history(getfield_gc_i=0, getfield_gc_pure_i=2, - int_add=2) + self.check_operations_history(getfield_gc_i=2, int_add=2) def test_array(self): class X(object): @@ -66,8 +64,7 @@ return a.y[index] res = self.interp_operations(f, [2], listops=True) assert res == 30 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=1, - getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) + self.check_operations_history(getfield_gc_r=1, getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) def test_array_index_error(self): class X(object): @@ -89,8 +86,7 @@ return a.get(index) res = self.interp_operations(f, [2], listops=True) assert res == 30 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=1, - getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) + self.check_operations_history(getfield_gc_r=1, getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) def test_array_in_immutable(self): class X(object): @@ -106,8 +102,7 @@ return y.lst[index] + y.y + 5 res = self.interp_operations(f, [23, 0], listops=True) assert res == 23 + 24 + 5 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=1, - getfield_gc_pure_i=1, + self.check_operations_history(getfield_gc_r=1, getfield_gc_i=1, getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1, int_add=3) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_jitdriver.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_jitdriver.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_jitdriver.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_jitdriver.py 2016-03-19 16:40:12.000000000 +0000 @@ -193,7 +193,7 @@ return pc + 1 driver = JitDriver(greens=["pc"], reds='auto', - get_unique_id=get_unique_id) + get_unique_id=get_unique_id, is_recursive=True) def f(arg): i = 0 @@ -213,6 +213,21 @@ if op.getopname() == 'enter_portal_frame': assert op.getarg(0).getint() == 0 assert op.getarg(1).getint() == 1 - + + def test_manual_leave_enter_portal_frame(self): + from rpython.rlib import jit + driver = JitDriver(greens=[], reds='auto', is_recursive=True) + + def f(arg): + i = 0 + while i < 100: + driver.jit_merge_point() + jit.enter_portal_frame(42) + jit.leave_portal_frame() + i += 1 + + self.meta_interp(f, [0]) + self.check_simple_loop(enter_portal_frame=1, leave_portal_frame=1) + class TestLLtype(MultipleJitDriversTests, LLJitMixin): pass diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_pyjitpl.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_pyjitpl.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_pyjitpl.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_pyjitpl.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,6 +1,7 @@ # some unit tests for the bytecode decoding +import py from rpython.jit.metainterp import pyjitpl from rpython.jit.metainterp import jitprof from rpython.jit.metainterp.history import ConstInt @@ -11,9 +12,22 @@ def test_portal_trace_positions(): + py.test.skip("bleh, too direct test, rewrite or kill") + class jitdriver_sd: + index = 0 + + class warmstate: + @staticmethod + def get_unique_id(*args): + return 0 + + class jitdriver: + is_recursive = True + jitcode = JitCode("f") jitcode.setup(None) portal = JitCode("portal") + portal.jitdriver_sd = jitdriver_sd portal.setup(None) class FakeStaticData: cpu = None @@ -25,6 +39,10 @@ metainterp.framestack = [] class FakeHistory: operations = [] + + @staticmethod + def record(*args): + pass history = metainterp.history = FakeHistory() metainterp.newframe(portal, "green1") history.operations.append(1) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_quasiimmut.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_quasiimmut.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_quasiimmut.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_quasiimmut.py 2016-03-19 16:40:12.000000000 +0000 @@ -74,7 +74,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=2, getfield_gc=0) + self.check_resops(guard_not_invalidated=2) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -101,7 +101,7 @@ res = self.meta_interp(f, [100, 7], enable_opts="") assert res == 700 # there should be no getfields, even though optimizations are turned off - self.check_resops(guard_not_invalidated=1, getfield_gc=0) + self.check_resops(guard_not_invalidated=1) def test_nonopt_1(self): myjitdriver = JitDriver(greens=[], reds=['x', 'total', 'lst']) @@ -124,8 +124,7 @@ assert f(100, 7) == 721 res = self.meta_interp(f, [100, 7]) assert res == 721 - self.check_resops(guard_not_invalidated=0, getfield_gc_r=1, - getfield_gc_pure_i=2) + self.check_resops(guard_not_invalidated=0, getfield_gc_r=1, getfield_gc_i=2) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -156,7 +155,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=2, getfield_gc=0) + self.check_resops(guard_not_invalidated=2) def test_change_during_tracing_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -208,7 +207,7 @@ assert f(100, 7) == 700 res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=0, getfield_gc=0) + self.check_resops(guard_not_invalidated=0) def test_change_invalidate_reentering(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -234,7 +233,7 @@ assert g(100, 7) == 700707 res = self.meta_interp(g, [100, 7]) assert res == 700707 - self.check_resops(guard_not_invalidated=4, getfield_gc=0) + self.check_resops(guard_not_invalidated=4) def test_invalidate_while_running(self): jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) @@ -348,7 +347,7 @@ res = self.meta_interp(f, [100, 30]) assert res == 6019 self.check_resops(guard_not_invalidated=8, guard_not_forced=0, - call_may_force=0, getfield_gc=0) + call_may_force=0) def test_list_simple_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -374,8 +373,7 @@ getarrayitem_gc_pure_r=0, getarrayitem_gc_i=0, getarrayitem_gc_r=0, - getfield_gc_i=0, getfield_gc_pure_i=0, - getfield_gc_r=0, getfield_gC_pure_r=0) + getfield_gc_i=0, getfield_gc_r=0) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -405,9 +403,7 @@ assert res == 700 # operations must have been removed by the frontend self.check_resops(getarrayitem_gc_pure_i=0, guard_not_invalidated=1, - getarrayitem_gc_i=0, - getfield_gc=0, getfield_gc_pure_i=0, - getfield_gc_pure_r=0) + getarrayitem_gc_i=0, getfield_gc_i=0, getfield_gc_r=0) def test_list_length_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_recursive.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_recursive.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_recursive.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_recursive.py 2016-03-19 16:40:12.000000000 +0000 @@ -541,7 +541,8 @@ code = hlstr(code) return "%s %d %s" % (code, pc, code[pc]) myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'], - get_printable_location=p) + get_printable_location=p, + is_recursive=True) def f(code, n): pc = 0 @@ -1311,7 +1312,7 @@ return (code + 1) * 2 driver = JitDriver(greens=["pc", "code"], reds='auto', - get_unique_id=get_unique_id) + get_unique_id=get_unique_id, is_recursive=True) def f(pc, code): i = 0 diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_resoperation.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_resoperation.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_resoperation.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_resoperation.py 2016-03-19 16:40:12.000000000 +0000 @@ -111,26 +111,26 @@ def test_unpack_1(): op = rop.ResOperation(rop.rop.VEC_UNPACK_I, [rop.InputArgVector(), ConstInt(0), ConstInt(1)]) - assert (op.type, op.datatype, op.bytesize, op.is_vector()) == \ - ('i', 'i', INT_WORD, False) + assert (op.type, op.is_vector()) == ('i', False) op = rop.ResOperation(rop.rop.VEC_UNPACK_I, [rop.InputArgVector(), ConstInt(0), ConstInt(2)]) - assert (op.type, op.datatype, op.bytesize, op.is_vector()) == \ - ('i', 'i', INT_WORD, True) + assert (op.type, op.is_vector()) == ('i', True) def test_load_singlefloat(): descr = ArrayDescr(8,4, None, 'S', concrete_type='f') - op = rop.ResOperation(rop.rop.VEC_RAW_LOAD_I, - [rop.InputArgInt(), ConstInt(0)], - descr=descr) + args = [rop.InputArgInt(), ConstInt(0)] + baseop = rop.ResOperation(rop.rop.RAW_LOAD_I, args, descr=descr) + baseop.set_forwarded(rop.VectorizationInfo(baseop)) + op = rop.VecOperation(rop.rop.VEC_RAW_LOAD_I, args, baseop, 4, descr=descr) assert (op.type, op.datatype, op.bytesize, op.is_vector()) == ('i', 'i', 4, True) def test_vec_store(): descr = ArrayDescr(0,8, None, 'F', concrete_type='f') vec = rop.InputArgVector() - op = rop.ResOperation(rop.rop.VEC_RAW_STORE, - [rop.InputArgRef(), ConstInt(0), vec], - descr=descr) + args = [rop.InputArgRef(), ConstInt(0), vec] + baseop = rop.ResOperation(rop.rop.RAW_STORE, args, descr=descr) + baseop.set_forwarded(rop.VectorizationInfo(baseop)) + op = rop.VecOperation(rop.rop.VEC_RAW_STORE, args, baseop, 2, descr=descr) assert (op.type, op.datatype, op.bytesize, op.is_vector()) == ('v', 'v', 8, True) def test_vec_guard(): @@ -138,15 +138,17 @@ vec.bytesize = 4 vec.type = vec.datatype = 'i' vec.sigend = True - op = rop.ResOperation(rop.rop.GUARD_TRUE, [vec]) - assert (op.type, op.datatype, op.bytesize, op.is_vector()) == ('v', 'i', 4, False) + baseop = rop.ResOperation(rop.rop.GUARD_TRUE, [vec]) + baseop.set_forwarded(rop.VectorizationInfo(baseop)) + op = rop.VecOperation(rop.rop.VEC_GUARD_TRUE, [vec], baseop, 4) + assert (op.type, op.datatype, op.bytesize, op.is_vector()) == ('v', 'v', 0, False) def test_types(): op = rop.ResOperation(rop.rop.INT_ADD, [ConstInt(0),ConstInt(1)]) - assert op.type == 'i' - assert op.datatype == 'i' - assert op.bytesize == INT_WORD - op = rop.ResOperation(rop.rop.VEC_CAST_FLOAT_TO_SINGLEFLOAT, [op]) + op.set_forwarded(rop.VectorizationInfo(op)) + baseop = rop.ResOperation(rop.rop.CAST_FLOAT_TO_SINGLEFLOAT, [op]) + baseop.set_forwarded(rop.VectorizationInfo(baseop)) + op = rop.VecOperation(rop.rop.VEC_CAST_FLOAT_TO_SINGLEFLOAT, [op], baseop, 2) assert op.type == 'i' assert op.datatype == 'i' assert op.bytesize == 4 diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_resumecode.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_resumecode.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_resumecode.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_resumecode.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,29 @@ + +from rpython.jit.metainterp.resumecode import NUMBERING, NULL_NUMBER +from rpython.jit.metainterp.resumecode import create_numbering,\ + unpack_numbering +from rpython.rtyper.lltypesystem import lltype + +from hypothesis import strategies, given + + +def test_pack_unpack(): + examples = [ + [1, 2, 3, 4, 257, 10000, 13, 15], + [1, 2, 3, 4], + range(1, 10, 2), + [13000, 12000, 10000, 256, 255, 254, 257, -3, -1000] + ] + for l in examples: + n = create_numbering(l) + assert unpack_numbering(n) == l + +@given(strategies.lists(strategies.integers(-2**15, 2**15-1))) +def test_roundtrip(l): + n = create_numbering(l) + assert unpack_numbering(n) == l + +@given(strategies.lists(strategies.integers(-2**15, 2**15-1))) +def test_compressing(l): + n = create_numbering(l) + assert len(n.code) <= len(l) * 3 diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_resume.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_resume.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_resume.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_resume.py 2016-03-19 16:40:12.000000000 +0000 @@ -5,12 +5,15 @@ from rpython.jit.metainterp.resume import ResumeDataVirtualAdder,\ AbstractResumeDataReader, get_VirtualCache_class, ResumeDataBoxReader,\ tag, TagOverflow, untag, tagged_eq, UNASSIGNED, TAGBOX, TAGVIRTUAL,\ - tagged_list_eq, AbstractVirtualInfo, NUMBERING, TAGCONST, NULLREF,\ + tagged_list_eq, AbstractVirtualInfo, TAGCONST, NULLREF,\ ResumeDataDirectReader, TAGINT, REF, VirtualInfo, VStructInfo,\ VArrayInfoNotClear, VStrPlainInfo, VStrConcatInfo, VStrSliceInfo,\ VUniPlainInfo, VUniConcatInfo, VUniSliceInfo, Snapshot, FrameInfo,\ capture_resumedata, ResumeDataLoopMemo, UNASSIGNEDVIRTUAL, INT,\ - annlowlevel, PENDINGFIELDSP + annlowlevel, PENDINGFIELDSP, unpack_uint, TAG_CONST_OFFSET, TopSnapshot +from rpython.jit.metainterp.resumecode import unpack_numbering,\ + create_numbering, NULL_NUMBER + from rpython.jit.metainterp.optimizeopt import info from rpython.jit.metainterp.history import ConstInt, Const, AbstractDescr from rpython.jit.metainterp.history import ConstPtr, ConstFloat @@ -19,8 +22,12 @@ from rpython.jit.codewriter import heaptracker, longlong from rpython.jit.metainterp.resoperation import ResOperation, InputArgInt,\ InputArgRef, rop +from rpython.jit.metainterp.test.strategies import boxlists from rpython.rlib.debug import debug_start, debug_stop, debug_print,\ have_debug_prints +from rpython.jit.metainterp import resumecode + +from hypothesis import given class Storage: rd_frame_info_list = None @@ -237,17 +244,18 @@ def get_current_position_info(self): class MyInfo: @staticmethod - def enumerate_vars(callback_i, callback_r, callback_f, _): + def enumerate_vars(callback_i, callback_r, callback_f, _, index): count_i = count_r = count_f = 0 - for index, ARG in enumerate(self.ARGS): + for ARG in self.ARGS: if ARG == lltype.Signed: - callback_i(index, count_i); count_i += 1 + index = callback_i(index, count_i); count_i += 1 elif ARG == llmemory.GCREF: - callback_r(index, count_r); count_r += 1 + index = callback_r(index, count_r); count_r += 1 elif ARG == longlong.FLOATSTORAGE: - callback_f(index, count_f); count_f += 1 + index = callback_f(index, count_f); count_f += 1 else: assert 0 + return index return MyInfo() def setarg_i(self, index, value): @@ -274,25 +282,20 @@ assert bh.written_f == expected_f -def Numbering(prev, nums): - numb = lltype.malloc(NUMBERING, len(nums)) - numb.prev = prev or lltype.nullptr(NUMBERING) - for i in range(len(nums)): - numb.nums[i] = nums[i] - return numb +Numbering = create_numbering + +def tagconst(i): + return tag(i + TAG_CONST_OFFSET, TAGCONST) def test_simple_read(): #b1, b2, b3 = [BoxInt(), InputArgRef(), BoxInt()] c1, c2, c3 = [ConstInt(111), ConstInt(222), ConstInt(333)] storage = Storage() storage.rd_consts = [c1, c2, c3] - numb = Numbering(None, [tag(0, TAGBOX), tag(1, TAGBOX), tag(2, TAGBOX)]) - numb = Numbering(numb, [tag(1, TAGCONST), tag(2, TAGCONST)]) - numb = Numbering(numb, [tag(0, TAGBOX), - tag(0, TAGCONST), - NULLREF, - tag(0, TAGBOX), - tag(1, TAGBOX)]) + numb = Numbering([tag(0, TAGBOX), tagconst(0), + NULLREF, tag(0, TAGBOX), tag(1, TAGBOX)] + + [tagconst(1), tagconst(2)] + + [tag(0, TAGBOX), tag(1, TAGBOX), tag(2, TAGBOX)]) storage.rd_numb = numb storage.rd_count = 3 # @@ -335,7 +338,7 @@ def test_simple_read_tagged_ints(): storage = Storage() storage.rd_consts = [] - numb = Numbering(None, [tag(100, TAGINT)]) + numb = Numbering([tag(100, TAGINT)]) storage.rd_numb = numb # cpu = MyCPU([]) @@ -531,34 +534,30 @@ assert snap1.prev is snap assert snap1.boxes is l1 +class FakeJitCode(object): + def __init__(self, name, index): + self.name = name + self.index = index + def test_FrameInfo_create(): - jitcode = "JITCODE" + jitcode = FakeJitCode("jitcode", 13) fi = FrameInfo(None, jitcode, 1) assert fi.prev is None - assert fi.jitcode is jitcode - assert fi.pc == 1 + jitcode_pos, pc = unpack_uint(fi.packed_jitcode_pc) + assert jitcode_pos == 13 + assert pc == 1 - jitcode1 = "JITCODE1" + jitcode1 = FakeJitCode("JITCODE1", 42) fi1 = FrameInfo(fi, jitcode1, 3) assert fi1.prev is fi - assert fi1.jitcode is jitcode1 - assert fi1.pc == 3 - -def test_Numbering_create(): - l = [rffi.r_short(1), rffi.r_short(2)] - numb = Numbering(None, l) - assert not numb.prev - assert list(numb.nums) == l - - l1 = [rffi.r_short(3)] - numb1 = Numbering(numb, l1) - assert numb1.prev == numb - assert list(numb1.nums) == l1 + jitcode_pos, pc = unpack_uint(fi1.packed_jitcode_pc) + assert jitcode_pos == 42 + assert pc == 3 def test_capture_resumedata(): b1, b2, b3 = [InputArgInt(), InputArgRef(), InputArgInt()] c1, c2, c3 = [ConstInt(1), ConstInt(2), ConstInt(3)] - fs = [FakeFrame("code0", 0, b1, c1, b2)] + fs = [FakeFrame(FakeJitCode("code0", 13), 0, b1, c1, b2)] storage = Storage() capture_resumedata(fs, None, [], storage) @@ -567,22 +566,21 @@ assert fs[0].parent_resumedata_frame_info_list is None assert storage.rd_frame_info_list.prev is None - assert storage.rd_frame_info_list.jitcode == 'code0' + assert unpack_uint(storage.rd_frame_info_list.packed_jitcode_pc)[0] == 13 assert storage.rd_snapshot.boxes == [] # for virtualrefs snapshot = storage.rd_snapshot.prev assert snapshot.prev is None assert snapshot.boxes == fs[0]._env storage = Storage() - fs = [FakeFrame("code0", 0, b1, c1, b2), - FakeFrame("code1", 3, b3, c2, b1), - FakeFrame("code2", 9, c3, b2)] + fs = [FakeFrame(FakeJitCode("code0", 0), 0, b1, c1, b2), + FakeFrame(FakeJitCode("code1", 1), 3, b3, c2, b1), + FakeFrame(FakeJitCode("code2", 2), 9, c3, b2)] capture_resumedata(fs, None, [], storage) frame_info_list = storage.rd_frame_info_list assert frame_info_list.prev is fs[2].parent_resumedata_frame_info_list - assert frame_info_list.jitcode == 'code2' - assert frame_info_list.pc == 9 + assert unpack_uint(frame_info_list.packed_jitcode_pc) == (2, 9) assert storage.rd_snapshot.boxes == [] # for virtualrefs snapshot = storage.rd_snapshot.prev @@ -591,14 +589,14 @@ frame_info_list = frame_info_list.prev assert frame_info_list.prev is fs[1].parent_resumedata_frame_info_list - assert frame_info_list.jitcode == 'code1' + assert unpack_uint(frame_info_list.packed_jitcode_pc) == (1, 3) snapshot = snapshot.prev assert snapshot.prev is fs[1].parent_resumedata_snapshot assert snapshot.boxes == fs[1]._env frame_info_list = frame_info_list.prev assert frame_info_list.prev is None - assert frame_info_list.jitcode == 'code0' + assert unpack_uint(frame_info_list.packed_jitcode_pc) == (0, 0) snapshot = snapshot.prev assert snapshot.prev is None assert snapshot.boxes == fs[0]._env @@ -611,11 +609,11 @@ frame_info_list = storage.rd_frame_info_list assert frame_info_list.prev is fs[2].parent_resumedata_frame_info_list - assert frame_info_list.jitcode == 'code2' - assert frame_info_list.pc == 15 - + assert unpack_uint(frame_info_list.packed_jitcode_pc) == (2, 15) + snapshot = storage.rd_snapshot - assert snapshot.boxes == vrs + vbs # in the same list + assert snapshot.boxes == vrs + assert snapshot.vable_boxes == [b2, b1] snapshot = snapshot.prev assert snapshot.prev is fs[2].parent_resumedata_snapshot @@ -863,7 +861,7 @@ tagged = memo.getconst(const) index, tagbits = untag(tagged) assert tagbits == TAGCONST - assert memo.consts[index] is const + assert memo.consts[index - TAG_CONST_OFFSET] is const tagged = memo.getconst(ConstInt(50000)) index2, tagbits = untag(tagged) assert tagbits == TAGCONST @@ -881,7 +879,7 @@ tagged = memo.getconst(const) index, tagbits = untag(tagged) assert tagbits == TAGCONST - assert memo.consts[index] is const + assert memo.consts[index - TAG_CONST_OFFSET] is const tagged = memo.getconst(cpu.ts.ConstRef(demo55o)) index2, tagbits = untag(tagged) assert tagbits == TAGCONST @@ -899,7 +897,7 @@ tagged = memo.getconst(const) index, tagbits = untag(tagged) assert tagbits == TAGCONST - assert memo.consts[index] is const + assert memo.consts[index - TAG_CONST_OFFSET] is const def test_ResumeDataLoopMemo_number(): b1, b2, b3, b4, b5 = [InputArgInt(), InputArgInt(), InputArgInt(), @@ -909,36 +907,35 @@ env = [b1, c1, b2, b1, c2] snap = Snapshot(None, env) env1 = [c3, b3, b1, c1] - snap1 = Snapshot(snap, env1) + snap1 = TopSnapshot(snap, env1, []) env2 = [c3, b3, b1, c3] - snap2 = Snapshot(snap, env2) + snap2 = TopSnapshot(snap, env2, []) memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) + frameinfo = FrameInfo(None, FakeJitCode("jitcode", 0), 0) - numb, liveboxes, v = memo.number(FakeOptimizer(), snap1) + numb, liveboxes, v = memo.number(FakeOptimizer(), snap1, frameinfo) assert v == 0 assert liveboxes == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), b3: tag(2, TAGBOX)} - assert list(numb.nums) == [tag(3, TAGINT), tag(2, TAGBOX), tag(0, TAGBOX), - tag(1, TAGINT)] - assert list(numb.prev.nums) == [tag(0, TAGBOX), tag(1, TAGINT), - tag(1, TAGBOX), - tag(0, TAGBOX), tag(2, TAGINT)] - assert not numb.prev.prev + base = [0, 0, tag(0, TAGBOX), tag(1, TAGINT), + tag(1, TAGBOX), tag(0, TAGBOX), tag(2, TAGINT)] + + assert unpack_numbering(numb) == [0, 2, tag(3, TAGINT), tag(2, TAGBOX), + tag(0, TAGBOX), tag(1, TAGINT)] + base - numb2, liveboxes2, v = memo.number(FakeOptimizer(), snap2) + numb2, liveboxes2, v = memo.number(FakeOptimizer(), snap2, frameinfo) assert v == 0 assert liveboxes2 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), b3: tag(2, TAGBOX)} assert liveboxes2 is not liveboxes - assert list(numb2.nums) == [tag(3, TAGINT), tag(2, TAGBOX), tag(0, TAGBOX), - tag(3, TAGINT)] - assert numb2.prev == numb.prev + assert unpack_numbering(numb2) == [0, 2, tag(3, TAGINT), tag(2, TAGBOX), + tag(0, TAGBOX), tag(3, TAGINT)] + base env3 = [c3, b3, b1, c3] - snap3 = Snapshot(snap, env3) + snap3 = TopSnapshot(snap, env3, []) class FakeVirtualInfo(info.AbstractInfo): def __init__(self, virt): @@ -949,42 +946,61 @@ # renamed b3.set_forwarded(c4) - numb3, liveboxes3, v = memo.number(FakeOptimizer(), snap3) + numb3, liveboxes3, v = memo.number(FakeOptimizer(), snap3, frameinfo) assert v == 0 assert liveboxes3 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX)} - assert list(numb3.nums) == [tag(3, TAGINT), tag(4, TAGINT), tag(0, TAGBOX), - tag(3, TAGINT)] - assert numb3.prev == numb.prev + assert unpack_numbering(numb3) == [0, 2, tag(3, TAGINT), tag(4, TAGINT), + tag(0, TAGBOX), tag(3, TAGINT)] + base # virtual env4 = [c3, b4, b1, c3] - snap4 = Snapshot(snap, env4) + snap4 = TopSnapshot(snap, env4, []) b4.set_forwarded(FakeVirtualInfo(True)) - numb4, liveboxes4, v = memo.number(FakeOptimizer(), snap4) + numb4, liveboxes4, v = memo.number(FakeOptimizer(), snap4, frameinfo) assert v == 1 assert liveboxes4 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), b4: tag(0, TAGVIRTUAL)} - assert list(numb4.nums) == [tag(3, TAGINT), tag(0, TAGVIRTUAL), - tag(0, TAGBOX), tag(3, TAGINT)] - assert numb4.prev == numb.prev + assert unpack_numbering(numb4) == [0, 2, tag(3, TAGINT), tag(0, TAGVIRTUAL), + tag(0, TAGBOX), tag(3, TAGINT)] + base env5 = [b1, b4, b5] - snap5 = Snapshot(snap4, env5) + snap5 = TopSnapshot(snap4, [], env5) b4.set_forwarded(FakeVirtualInfo(True)) b5.set_forwarded(FakeVirtualInfo(True)) - numb5, liveboxes5, v = memo.number(FakeOptimizer(), snap5) + frameinfo = FrameInfo(frameinfo, FakeJitCode("foo", 2), 1) + numb5, liveboxes5, v = memo.number(FakeOptimizer(), snap5, frameinfo) assert v == 2 assert liveboxes5 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), b4: tag(0, TAGVIRTUAL), b5: tag(1, TAGVIRTUAL)} - assert list(numb5.nums) == [tag(0, TAGBOX), tag(0, TAGVIRTUAL), - tag(1, TAGVIRTUAL)] - assert numb5.prev == numb4 - + assert unpack_numbering(numb5) == [ + 3, tag(0, TAGBOX), tag(0, TAGVIRTUAL), tag(1, TAGVIRTUAL), + 0, + 2, 1, tag(3, TAGINT), tag(0, TAGVIRTUAL), tag(0, TAGBOX), tag(3, TAGINT) + ] + base + +@given(boxlists) +def test_ResumeDataLoopMemo_random(lst): + s = TopSnapshot(None, [], lst) + frameinfo = FrameInfo(None, FakeJitCode("foo", 0), 0) + memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) + num, liveboxes, v = memo.number(FakeOptimizer(), s, frameinfo) + l = unpack_numbering(num) + assert l[-1] == 0 + assert l[0] == len(lst) + for i, item in enumerate(lst): + v, tag = untag(l[i + 1]) + if tag == TAGBOX: + assert l[i + 1] == liveboxes[item] + elif tag == TAGCONST: + assert memo.consts[v].getint() == item.getint() + elif tag == TAGINT: + assert v == item.getint() + def test_ResumeDataLoopMemo_number_boxes(): memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) b1, b2 = [InputArgInt(), InputArgInt()] @@ -1068,9 +1084,12 @@ storage = Storage() snapshot = Snapshot(None, [b1, ConstInt(1), b1, b2]) snapshot = Snapshot(snapshot, [ConstInt(2), ConstInt(3)]) - snapshot = Snapshot(snapshot, [b1, b2, b3]) - storage.rd_snapshot = snapshot - storage.rd_frame_info_list = None + snapshot = Snapshot(snapshot, [b1, b2, b3]) + top_snapshot = TopSnapshot(snapshot, [], []) + frameinfo = FrameInfo(FrameInfo(FrameInfo(None, FakeJitCode("code1", 21), 22), + FakeJitCode("code2", 31), 32), FakeJitCode("code3", 41), 42) + storage.rd_snapshot = top_snapshot + storage.rd_frame_info_list = frameinfo return storage def test_virtual_adder_int_constants(): @@ -1082,11 +1101,14 @@ assert storage.rd_snapshot is None cpu = MyCPU([]) reader = ResumeDataDirectReader(MyMetaInterp(cpu), storage, "deadframe") + reader.consume_vref_and_vable(None, None, None) + reader.cur_index += 2 # framestack _next_section(reader, sys.maxint, 2**16, -65) + reader.cur_index += 2 # framestack _next_section(reader, 2, 3) + reader.cur_index += 2 # framestack _next_section(reader, sys.maxint, 1, sys.maxint, 2**16) - def test_virtual_adder_memo_const_sharing(): b1s, b2s, b3s = [ConstInt(sys.maxint), ConstInt(2**16), ConstInt(-65)] storage = make_storage(b1s, b2s, b3s) @@ -1120,8 +1142,9 @@ return True class MyInfo: @staticmethod - def enumerate_vars(callback_i, callback_r, callback_f, _): - for index, tagged in enumerate(self.cur_numb.nums): + def enumerate_vars(callback_i, callback_r, callback_f, _, index): + while index < len(self.numb.code): + tagged, _ = resumecode.numb_next_item(self.numb, index) _, tag = untag(tagged) if tag == TAGVIRTUAL: kind = REF @@ -1129,13 +1152,20 @@ kind = Whatever() box = self.decode_box(tagged, kind) if box.type == INT: - callback_i(index, index) + index = callback_i(index, index) elif box.type == REF: - callback_r(index, index) + index = callback_r(index, index) elif box.type == FLOAT: - callback_f(index, index) + index = callback_f(index, index) else: assert 0 + size, self.cur_index = resumecode.numb_next_item(self.numb, 0) + assert size == 0 + size, self.cur_index = resumecode.numb_next_item(self.numb, self.cur_index) + assert size == 0 + pc, self.cur_index = resumecode.numb_next_item(self.numb, self.cur_index) + jitcode_pos, self.cur_index = resumecode.numb_next_item(self.numb, self.cur_index) + self._prepare_next_section(MyInfo()) return self.lst @@ -1151,6 +1181,7 @@ def test_virtual_adder_no_op_renaming(): + py.test.skip("rewrite fake reader") b1s, b2s, b3s = [InputArgInt(1), InputArgInt(2), InputArgInt(3)] storage = make_storage(b1s, b2s, b3s) memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) @@ -1175,6 +1206,7 @@ def test_virtual_adder_make_constant(): + py.test.skip("rewrite fake reader") b1s, b2s, b3s = [InputArgInt(1), InputArgRef(), InputArgInt(3)] b1s = ConstInt(111) storage = make_storage(b1s, b2s, b3s) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_strstorage.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_strstorage.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_strstorage.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_strstorage.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,53 @@ +import py +import sys +import struct +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.strstorage import str_storage_getitem +from rpython.rlib.test.test_strstorage import BaseStrStorageTest +from rpython.jit.codewriter import longlong +from rpython.jit.metainterp.history import getkind +from rpython.jit.metainterp.test.support import LLJitMixin + +class TestStrStorage(BaseStrStorageTest, LLJitMixin): + + # for the individual tests see + # ====> ../../../rlib/test/test_strstorage.py + + def str_storage_getitem(self, TYPE, buf, offset): + def f(): + return str_storage_getitem(TYPE, buf, offset) + res = self.interp_operations(f, [], supports_singlefloats=True) + # + kind = getkind(TYPE)[0] # 'i' or 'f' + self.check_operations_history({'gc_load_indexed_%s' % kind: 1, + 'finish': 1}) + # + if TYPE == lltype.SingleFloat: + # interp_operations returns the int version of r_singlefloat, but + # our tests expects to receive an r_singlefloat: let's convert it + # back! + return longlong.int2singlefloat(res) + return res + + #def str_storage_supported(self, TYPE): + # py.test.skip('this is not a JIT test') + + def test_force_virtual_str_storage(self): + byteorder = sys.byteorder + size = rffi.sizeof(lltype.Signed) + def f(val): + if byteorder == 'little': + x = chr(val) + '\x00'*(size-1) + else: + x = '\x00'*(size-1) + chr(val) + return str_storage_getitem(lltype.Signed, x, 0) + res = self.interp_operations(f, [42], supports_singlefloats=True) + assert res == 42 + self.check_operations_history({ + 'newstr': 1, # str forcing + 'strsetitem': 1, # str forcing + 'call_pure_r': 1, # str forcing (copystrcontent) + 'guard_no_exception': 1, # str forcing + 'gc_load_indexed_i': 1, # str_storage_getitem + 'finish': 1 + }) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_tlc.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_tlc.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_tlc.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_tlc.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,5 +1,4 @@ import py -from rpython.rtyper.module.support import LLSupport from rpython.jit.tl import tlc diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_tracingopts.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_tracingopts.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_tracingopts.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_tracingopts.py 2016-03-19 16:40:12.000000000 +0000 @@ -436,10 +436,10 @@ return p.x[0] + p.x[1] res = self.interp_operations(fn, [7]) assert res == 7 + 7 + 1 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_r=0) res = self.interp_operations(fn, [-7]) assert res == -7 - 7 + 1 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_r=0) def test_heap_caching_and_elidable_function(self): class A: @@ -517,12 +517,12 @@ return a1[0] + a2[0] + gn(a1, a2) res = self.interp_operations(fn, [7]) assert res == 2 * 7 + 2 * 6 - self.check_operations_history(getfield_gc_pure_i=0, - getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_i=0, + getfield_gc_r=0) res = self.interp_operations(fn, [-7]) assert res == 2 * -7 + 2 * -8 - self.check_operations_history(getfield_gc_pure_i=0, - getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_i=0, + getfield_gc_r=0) def test_heap_caching_multiple_arrays(self): class Gbl(object): diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_vector.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_vector.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_vector.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_vector.py 2016-03-19 16:40:12.000000000 +0000 @@ -173,7 +173,7 @@ (lltype.Signed, lambda x: not bool(x), 1, None, -1,32, False), (lltype.Signed, lambda x: not bool(x), 1, 0, 14,32, True), (lltype.Signed, lambda x: not bool(x), 1, 0, 15,31, True), - (lltype.Signed, lambda x: not bool(x), 1, 0, 16,30, True), + (lltype.Signed, lambda x: not bool(x), 1, 0, 4,30, True), (lltype.Signed, lambda x: x == 0, 1, None, -1,33, False), (lltype.Signed, lambda x: x == 0, 1, 0, 33,34, True), # any diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_virtual.py pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_virtual.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/test/test_virtual.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/test/test_virtual.py 2016-03-19 16:40:12.000000000 +0000 @@ -1077,7 +1077,7 @@ res = self.meta_interp(f, [], repeat=7) assert res == f() - def test_getfield_gc_pure_nobug(self): + def test_pure_getfield_gc_nobug(self): mydriver = JitDriver(reds=['i', 's', 'a'], greens=[]) class A(object): diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/virtualizable.py pypy-5.0.1+dfsg/rpython/jit/metainterp/virtualizable.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/virtualizable.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/virtualizable.py 2016-03-19 16:40:12.000000000 +0000 @@ -2,6 +2,7 @@ from rpython.jit.metainterp import history from rpython.jit.metainterp.typesystem import deref, fieldType, arrayItem from rpython.jit.metainterp.warmstate import wrap, unwrap +from rpython.jit.metainterp.resumecode import numb_next_item from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper import rvirtualizable from rpython.rtyper.lltypesystem import lltype, llmemory @@ -116,53 +117,52 @@ i = i + 1 assert len(boxes) == i + 1 - def write_from_resume_data_partial(virtualizable, reader, numb): + def get_total_size(virtualizable): + virtualizable = cast_gcref_to_vtype(virtualizable) + size = 0 + for _, fieldname in unroll_array_fields: + lst = getattr(virtualizable, fieldname) + size += getlength(lst) + for _, fieldname in unroll_static_fields: + size += 1 + return size + + def write_from_resume_data_partial(virtualizable, reader, index, numb): virtualizable = cast_gcref_to_vtype(virtualizable) # Load values from the reader (see resume.py) described by # the list of numbers 'nums', and write them in their proper - # place in the 'virtualizable'. This works from the end of - # the list and returns the index in 'nums' of the start of - # the virtualizable data found, allowing the caller to do - # further processing with the start of the list. - i = len(numb.nums) - 1 - assert i >= 0 - for ARRAYITEMTYPE, fieldname in unroll_array_fields_rev: - lst = getattr(virtualizable, fieldname) - for j in range(getlength(lst) - 1, -1, -1): - i -= 1 - assert i >= 0 - x = reader.load_value_of_type(ARRAYITEMTYPE, numb.nums[i]) - setarrayitem(lst, j, x) - for FIELDTYPE, fieldname in unroll_static_fields_rev: - i -= 1 - assert i >= 0 - x = reader.load_value_of_type(FIELDTYPE, numb.nums[i]) + # place in the 'virtualizable'. + for FIELDTYPE, fieldname in unroll_static_fields: + item, index = numb_next_item(numb, index) + x = reader.load_value_of_type(FIELDTYPE, item) setattr(virtualizable, fieldname, x) - return i + for ARRAYITEMTYPE, fieldname in unroll_array_fields: + lst = getattr(virtualizable, fieldname) + for j in range(getlength(lst)): + item, index = numb_next_item(numb, index) + x = reader.load_value_of_type(ARRAYITEMTYPE, item) + setarrayitem(lst, j, x) + return index - def load_list_of_boxes(virtualizable, reader, numb): + def load_list_of_boxes(virtualizable, reader, vable_box, numb, index): virtualizable = cast_gcref_to_vtype(virtualizable) # Uses 'virtualizable' only to know the length of the arrays; # does not write anything into it. The returned list is in # the format expected of virtualizable_boxes, so it ends in # the virtualizable itself. - i = len(numb.nums) - 1 - assert i >= 0 - boxes = [reader.decode_box_of_type(self.VTYPEPTR, numb.nums[i])] - for ARRAYITEMTYPE, fieldname in unroll_array_fields_rev: - lst = getattr(virtualizable, fieldname) - for j in range(getlength(lst) - 1, -1, -1): - i -= 1 - assert i >= 0 - box = reader.decode_box_of_type(ARRAYITEMTYPE, numb.nums[i]) - boxes.append(box) - for FIELDTYPE, fieldname in unroll_static_fields_rev: - i -= 1 - assert i >= 0 - box = reader.decode_box_of_type(FIELDTYPE, numb.nums[i]) + boxes = [] + for FIELDTYPE, fieldname in unroll_static_fields: + item, index = numb_next_item(numb, index) + box = reader.decode_box_of_type(FIELDTYPE, item) boxes.append(box) - boxes.reverse() - return boxes + for ARRAYITEMTYPE, fieldname in unroll_array_fields: + lst = getattr(virtualizable, fieldname) + for j in range(getlength(lst)): + item, index = numb_next_item(numb, index) + box = reader.decode_box_of_type(ARRAYITEMTYPE, item) + boxes.append(box) + boxes.append(vable_box) + return boxes, index def check_boxes(virtualizable, boxes): virtualizable = cast_gcref_to_vtype(virtualizable) @@ -217,6 +217,7 @@ self.check_boxes = check_boxes self.get_index_in_array = get_index_in_array self.get_array_length = get_array_length + self.get_total_size = get_total_size def cast_to_vtype(virtualizable): return self.cpu.ts.cast_to_instance_maybe(VTYPEPTR, virtualizable) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/warmspot.py pypy-5.0.1+dfsg/rpython/jit/metainterp/warmspot.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/warmspot.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/warmspot.py 2016-03-19 16:40:12.000000000 +0000 @@ -46,6 +46,17 @@ no_stats=True, ProfilerClass=ProfilerClass, **kwds) + if len(warmrunnerdesc.jitdrivers_sd) == 1: + jd = warmrunnerdesc.jitdrivers_sd[0] + jd.jitdriver.is_recursive = True + else: + count_recursive = 0 + for jd in warmrunnerdesc.jitdrivers_sd: + count_recursive += jd.jitdriver.is_recursive + if count_recursive == 0: + raise Exception("if you have more than one jitdriver, at least" + " one of them has to be marked with is_recursive=True," + " none found") for jd in warmrunnerdesc.jitdrivers_sd: jd.warmstate.set_param_inlining(inline) jd.warmstate.set_param_vec(vec) @@ -250,7 +261,8 @@ verbose = False # not self.cpu.translate_support_code self.rewrite_access_helpers() self.create_jit_entry_points() - self.codewriter.make_jitcodes(verbose=verbose) + jitcodes = self.codewriter.make_jitcodes(verbose=verbose) + self.metainterp_sd.jitcodes = jitcodes self.rewrite_can_enter_jits() self.rewrite_set_param_and_get_stats() self.rewrite_force_virtual(vrefinfo) diff -Nru pypy-4.0.1+dfsg/rpython/jit/metainterp/warmstate.py pypy-5.0.1+dfsg/rpython/jit/metainterp/warmstate.py --- pypy-4.0.1+dfsg/rpython/jit/metainterp/warmstate.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/metainterp/warmstate.py 2016-03-19 16:40:12.000000000 +0000 @@ -433,6 +433,14 @@ bound_reached(hash, None, *args) return + # Workaround for issue #2200, maybe temporary. This is not + # a proper fix, but only a hack that should work well enough + # for PyPy's main jitdriver... See test_issue2200_recursion + from rpython.jit.metainterp.blackhole import workaround2200 + if workaround2200.active: + workaround2200.active = False + return + # Here, we have found 'cell'. # if cell.flags & (JC_TRACING | JC_TEMPORARY): diff -Nru pypy-4.0.1+dfsg/rpython/jit/tool/oparser.py pypy-5.0.1+dfsg/rpython/jit/tool/oparser.py --- pypy-4.0.1+dfsg/rpython/jit/tool/oparser.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/tool/oparser.py 2016-03-19 16:40:12.000000000 +0000 @@ -9,7 +9,8 @@ from rpython.jit.metainterp.resoperation import rop, ResOperation, \ InputArgInt, InputArgRef, InputArgFloat, InputArgVector, \ - ResOpWithDescr, N_aryOp, UnaryOp, PlainResOp, optypes, OpHelpers + ResOpWithDescr, N_aryOp, UnaryOp, PlainResOp, optypes, OpHelpers, \ + VectorizationInfo class ParseError(Exception): pass @@ -338,7 +339,8 @@ if res in self.vars: raise ParseError("Double assign to var %s in line: %s" % (res, line)) resop = self.create_op(opnum, args, res, descr, fail_args) - res = self.update_vector(resop, res) + if not self.use_mock_model: + res = self.update_vector(resop, res) self.update_memo(resop, res) self.vars[res] = resop return resop @@ -363,11 +365,17 @@ pattern = re.compile('.*\[(\d+)x(u?)(i|f)(\d+)\]') match = pattern.match(var) if match: - resop.count = int(match.group(1)) - resop.signed = not (match.group(2) == 'u') - resop.datatype = match.group(3) - resop.bytesize = int(match.group(4)) // 8 + vecinfo = VectorizationInfo(None) + vecinfo.count = int(match.group(1)) + vecinfo.signed = not (match.group(2) == 'u') + vecinfo.datatype = match.group(3) + vecinfo.bytesize = int(match.group(4)) // 8 + resop._vec_debug_info = vecinfo return var[:var.find('[')] + + vecinfo = VectorizationInfo(resop) + vecinfo.count = -1 + resop._vec_debug_info = vecinfo return var def parse_op_no_result(self, line): diff -Nru pypy-4.0.1+dfsg/rpython/jit/tool/traceviewer.py pypy-5.0.1+dfsg/rpython/jit/tool/traceviewer.py --- pypy-4.0.1+dfsg/rpython/jit/tool/traceviewer.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/jit/tool/traceviewer.py 2016-03-19 16:40:12.000000000 +0000 @@ -103,9 +103,9 @@ self.last_guard = -1 else: # guards can be out of order nowadays - groups = sorted(groups) - self.first_guard = guard_number(groups[0]) - self.last_guard = guard_number(groups[-1]) + groups = sorted(map(guard_number, groups)) + self.first_guard = groups[0] + self.last_guard = groups[-1] content = property(get_content, set_content) diff -Nru pypy-4.0.1+dfsg/rpython/memory/gc/incminimark.py pypy-5.0.1+dfsg/rpython/memory/gc/incminimark.py --- pypy-4.0.1+dfsg/rpython/memory/gc/incminimark.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/memory/gc/incminimark.py 2016-03-19 16:40:12.000000000 +0000 @@ -702,10 +702,11 @@ major (gen>=2) collection.""" if gen <= 1: self.minor_collection() - if gen == 1 or self.gc_state != STATE_SCANNING: + if gen == 1 or (self.gc_state != STATE_SCANNING and gen != -1): self.major_collection_step() else: self.minor_and_major_collection() + self.rrc_invoke_callback() def collect_and_reserve(self, totalsize): @@ -783,12 +784,15 @@ self.threshold_reached()): # ^^but only if still self.minor_collection() # the same collection self.major_collection_step() - # - # The nursery might not be empty now, because of - # execute_finalizers(). If it is almost full again, - # we need to fix it with another call to minor_collection(). - if self.nursery_free + totalsize > self.nursery_top: - self.minor_collection() + # + self.rrc_invoke_callback() + # + # The nursery might not be empty now, because of + # execute_finalizers() or rrc_invoke_callback(). + # If it is almost full again, + # we need to fix it with another call to minor_collection(). + if self.nursery_free + totalsize > self.nursery_top: + self.minor_collection() # else: ll_assert(minor_collection_count == 2, @@ -861,6 +865,7 @@ if self.threshold_reached(raw_malloc_usage(totalsize) + self.nursery_size // 2): self.major_collection_step(raw_malloc_usage(totalsize)) + self.rrc_invoke_callback() # note that this loop should not be infinite: when the # last step of a major collection is done but # threshold_reached(totalsize) is still true, then @@ -1080,35 +1085,19 @@ "odd-valued (i.e. tagged) pointer unexpected here") return self.nursery <= addr < self.nursery + self.nursery_size - def appears_to_be_young(self, addr): - # "is a valid addr to a young object?" - # but it's ok to occasionally return True accidentally. - # Maybe the best implementation would be a bloom filter - # of some kind instead of the dictionary lookup that is - # sometimes done below. But the expected common answer - # is "Yes" because addr points to the nursery, so it may - # not be useful to optimize the other case too much. - # - # First, if 'addr' appears to be a pointer to some place within - # the nursery, return True - if not self.translated_to_c: - # When non-translated, filter out tagged pointers explicitly. - # When translated, it may occasionally give a wrong answer - # of True if 'addr' is a tagged pointer with just the wrong value. - if not self.is_valid_gc_object(addr): - return False - + def is_young_object(self, addr): + # Check if the object at 'addr' is young. + if not self.is_valid_gc_object(addr): + return False # filter out tagged pointers explicitly. if self.nursery <= addr < self.nursery_top: return True # addr is in the nursery - # # Else, it may be in the set 'young_rawmalloced_objects' return (bool(self.young_rawmalloced_objects) and self.young_rawmalloced_objects.contains(addr)) - appears_to_be_young._always_inline_ = True def debug_is_old_object(self, addr): return (self.is_valid_gc_object(addr) - and not self.appears_to_be_young(addr)) + and not self.is_young_object(addr)) def is_forwarded(self, obj): """Returns True if the nursery obj is marked as forwarded. @@ -1618,6 +1607,10 @@ self._visit_old_objects_pointing_to_pinned, None) current_old_objects_pointing_to_pinned.delete() # + # visit the P list from rawrefcount, if enabled. + if self.rrc_enabled: + self.rrc_minor_collection_trace() + # while True: # If we are using card marking, do a partial trace of the arrays # that are flagged with GCFLAG_CARDS_SET. @@ -1661,6 +1654,10 @@ else: self.nursery_objects_shadows.clear() # + # visit the P and O lists from rawrefcount, if enabled. + if self.rrc_enabled: + self.rrc_minor_collection_free() + # # Walk the list of young raw-malloced objects, and either free # them or make them old. if self.young_rawmalloced_objects: @@ -2178,9 +2175,13 @@ # finalizers/weak references are rare and short which means that # they do not need a separate state and do not need to be # made incremental. + # For now, the same applies to rawrefcount'ed objects. if (not self.objects_to_trace.non_empty() and not self.more_objects_to_trace.non_empty()): # + if self.rrc_enabled: + self.rrc_major_collection_trace() + # if self.objects_with_finalizers.non_empty(): self.deal_with_objects_with_finalizers() elif self.old_objects_with_weakrefs.non_empty(): @@ -2215,6 +2216,10 @@ self.old_objects_pointing_to_pinned = \ new_old_objects_pointing_to_pinned self.updated_old_objects_pointing_to_pinned = True + # + if self.rrc_enabled: + self.rrc_major_collection_free() + # self.gc_state = STATE_SWEEPING #END MARKING elif self.gc_state == STATE_SWEEPING: @@ -2745,3 +2750,247 @@ (obj + offset).address[0] = llmemory.NULL self.old_objects_with_weakrefs.delete() self.old_objects_with_weakrefs = new_with_weakref + + + # ---------- + # RawRefCount + + rrc_enabled = False + + _ADDRARRAY = lltype.Array(llmemory.Address, hints={'nolength': True}) + PYOBJ_HDR = lltype.Struct('GCHdr_PyObject', + ('ob_refcnt', lltype.Signed), + ('ob_pypy_link', lltype.Signed)) + PYOBJ_HDR_PTR = lltype.Ptr(PYOBJ_HDR) + RAWREFCOUNT_DEALLOC_TRIGGER = lltype.Ptr(lltype.FuncType([], lltype.Void)) + + def _pyobj(self, pyobjaddr): + return llmemory.cast_adr_to_ptr(pyobjaddr, self.PYOBJ_HDR_PTR) + + def rawrefcount_init(self, dealloc_trigger_callback): + # see pypy/doc/discussion/rawrefcount.rst + if not self.rrc_enabled: + self.rrc_p_list_young = self.AddressStack() + self.rrc_p_list_old = self.AddressStack() + self.rrc_o_list_young = self.AddressStack() + self.rrc_o_list_old = self.AddressStack() + self.rrc_p_dict = self.AddressDict() # non-nursery keys only + self.rrc_p_dict_nurs = self.AddressDict() # nursery keys only + p = lltype.malloc(self._ADDRARRAY, 1, flavor='raw', + track_allocation=False) + self.rrc_singleaddr = llmemory.cast_ptr_to_adr(p) + self.rrc_dealloc_trigger_callback = dealloc_trigger_callback + self.rrc_dealloc_pending = self.AddressStack() + self.rrc_enabled = True + + def check_no_more_rawrefcount_state(self): + "NOT_RPYTHON: for tests" + assert self.rrc_p_list_young.length() == 0 + assert self.rrc_p_list_old .length() == 0 + assert self.rrc_o_list_young.length() == 0 + assert self.rrc_o_list_old .length() == 0 + def check_value_is_null(key, value, ignore): + assert value == llmemory.NULL + self.rrc_p_dict.foreach(check_value_is_null, None) + self.rrc_p_dict_nurs.foreach(check_value_is_null, None) + + def rawrefcount_create_link_pypy(self, gcobj, pyobject): + ll_assert(self.rrc_enabled, "rawrefcount.init not called") + obj = llmemory.cast_ptr_to_adr(gcobj) + objint = llmemory.cast_adr_to_int(obj, "symbolic") + self._pyobj(pyobject).ob_pypy_link = objint + # + lst = self.rrc_p_list_young + if self.is_in_nursery(obj): + dct = self.rrc_p_dict_nurs + else: + dct = self.rrc_p_dict + if not self.is_young_object(obj): + lst = self.rrc_p_list_old + lst.append(pyobject) + dct.setitem(obj, pyobject) + + def rawrefcount_create_link_pyobj(self, gcobj, pyobject): + ll_assert(self.rrc_enabled, "rawrefcount.init not called") + obj = llmemory.cast_ptr_to_adr(gcobj) + if self.is_young_object(obj): + self.rrc_o_list_young.append(pyobject) + else: + self.rrc_o_list_old.append(pyobject) + objint = llmemory.cast_adr_to_int(obj, "symbolic") + self._pyobj(pyobject).ob_pypy_link = objint + # there is no rrc_o_dict + + def rawrefcount_from_obj(self, gcobj): + obj = llmemory.cast_ptr_to_adr(gcobj) + if self.is_in_nursery(obj): + dct = self.rrc_p_dict_nurs + else: + dct = self.rrc_p_dict + return dct.get(obj) + + def rawrefcount_to_obj(self, pyobject): + obj = llmemory.cast_int_to_adr(self._pyobj(pyobject).ob_pypy_link) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + def rawrefcount_next_dead(self): + if self.rrc_dealloc_pending.non_empty(): + return self.rrc_dealloc_pending.pop() + return llmemory.NULL + + + def rrc_invoke_callback(self): + if self.rrc_enabled and self.rrc_dealloc_pending.non_empty(): + self.rrc_dealloc_trigger_callback() + + def rrc_minor_collection_trace(self): + length_estimate = self.rrc_p_dict_nurs.length() + self.rrc_p_dict_nurs.delete() + self.rrc_p_dict_nurs = self.AddressDict(length_estimate) + self.rrc_p_list_young.foreach(self._rrc_minor_trace, + self.rrc_singleaddr) + + def _rrc_minor_trace(self, pyobject, singleaddr): + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT + # + rc = self._pyobj(pyobject).ob_refcnt + if rc == REFCNT_FROM_PYPY or rc == REFCNT_FROM_PYPY_LIGHT: + pass # the corresponding object may die + else: + # force the corresponding object to be alive + intobj = self._pyobj(pyobject).ob_pypy_link + singleaddr.address[0] = llmemory.cast_int_to_adr(intobj) + self._trace_drag_out(singleaddr, llmemory.NULL) + + def rrc_minor_collection_free(self): + ll_assert(self.rrc_p_dict_nurs.length() == 0, "p_dict_nurs not empty 1") + lst = self.rrc_p_list_young + while lst.non_empty(): + self._rrc_minor_free(lst.pop(), self.rrc_p_list_old, + self.rrc_p_dict) + lst = self.rrc_o_list_young + no_o_dict = self.null_address_dict() + while lst.non_empty(): + self._rrc_minor_free(lst.pop(), self.rrc_o_list_old, + no_o_dict) + + def _rrc_minor_free(self, pyobject, surviving_list, surviving_dict): + intobj = self._pyobj(pyobject).ob_pypy_link + obj = llmemory.cast_int_to_adr(intobj) + if self.is_in_nursery(obj): + if self.is_forwarded(obj): + # Common case: survives and moves + obj = self.get_forwarding_address(obj) + intobj = llmemory.cast_adr_to_int(obj, "symbolic") + self._pyobj(pyobject).ob_pypy_link = intobj + surviving = True + if surviving_dict: + # Surviving nursery object: was originally in + # rrc_p_dict_nurs and now must be put into rrc_p_dict + surviving_dict.setitem(obj, pyobject) + else: + surviving = False + elif (bool(self.young_rawmalloced_objects) and + self.young_rawmalloced_objects.contains(obj)): + # young weakref to a young raw-malloced object + if self.header(obj).tid & GCFLAG_VISITED_RMY: + surviving = True # survives, but does not move + else: + surviving = False + if surviving_dict: + # Dying young large object: was in rrc_p_dict, + # must be deleted + surviving_dict.setitem(obj, llmemory.NULL) + else: + ll_assert(False, "rrc_X_list_young contains non-young obj") + return + # + if surviving: + surviving_list.append(pyobject) + else: + self._rrc_free(pyobject) + + def _rrc_free(self, pyobject): + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT + # + rc = self._pyobj(pyobject).ob_refcnt + if rc >= REFCNT_FROM_PYPY_LIGHT: + rc -= REFCNT_FROM_PYPY_LIGHT + if rc == 0: + lltype.free(self._pyobj(pyobject), flavor='raw') + else: + # can only occur if LIGHT is used in create_link_pyobj() + self._pyobj(pyobject).ob_refcnt = rc + self._pyobj(pyobject).ob_pypy_link = 0 + else: + ll_assert(rc >= REFCNT_FROM_PYPY, "refcount underflow?") + ll_assert(rc < int(REFCNT_FROM_PYPY_LIGHT * 0.99), + "refcount underflow from REFCNT_FROM_PYPY_LIGHT?") + rc -= REFCNT_FROM_PYPY + self._pyobj(pyobject).ob_pypy_link = 0 + if rc == 0: + self.rrc_dealloc_pending.append(pyobject) + # an object with refcnt == 0 cannot stay around waiting + # for its deallocator to be called. Some code (lxml) + # expects that tp_dealloc is called immediately when + # the refcnt drops to 0. If it isn't, we get some + # uncleared raw pointer that can still be used to access + # the object; but (PyObject *)raw_pointer is then bogus + # because after a Py_INCREF()/Py_DECREF() on it, its + # tp_dealloc is also called! + rc = 1 + self._pyobj(pyobject).ob_refcnt = rc + _rrc_free._always_inline_ = True + + def rrc_major_collection_trace(self): + self.rrc_p_list_old.foreach(self._rrc_major_trace, None) + + def _rrc_major_trace(self, pyobject, ignore): + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT + # + rc = self._pyobj(pyobject).ob_refcnt + if rc == REFCNT_FROM_PYPY or rc == REFCNT_FROM_PYPY_LIGHT: + pass # the corresponding object may die + else: + # force the corresponding object to be alive + intobj = self._pyobj(pyobject).ob_pypy_link + obj = llmemory.cast_int_to_adr(intobj) + self.objects_to_trace.append(obj) + self.visit_all_objects() + + def rrc_major_collection_free(self): + ll_assert(self.rrc_p_dict_nurs.length() == 0, "p_dict_nurs not empty 2") + length_estimate = self.rrc_p_dict.length() + self.rrc_p_dict.delete() + self.rrc_p_dict = new_p_dict = self.AddressDict(length_estimate) + new_p_list = self.AddressStack() + while self.rrc_p_list_old.non_empty(): + self._rrc_major_free(self.rrc_p_list_old.pop(), new_p_list, + new_p_dict) + self.rrc_p_list_old.delete() + self.rrc_p_list_old = new_p_list + # + new_o_list = self.AddressStack() + no_o_dict = self.null_address_dict() + while self.rrc_o_list_old.non_empty(): + self._rrc_major_free(self.rrc_o_list_old.pop(), new_o_list, + no_o_dict) + self.rrc_o_list_old.delete() + self.rrc_o_list_old = new_o_list + + def _rrc_major_free(self, pyobject, surviving_list, surviving_dict): + # The pyobject survives if the corresponding obj survives. + # This is true if the obj has one of the following two flags: + # * GCFLAG_VISITED: was seen during tracing + # * GCFLAG_NO_HEAP_PTRS: immortal object never traced (so far) + intobj = self._pyobj(pyobject).ob_pypy_link + obj = llmemory.cast_int_to_adr(intobj) + if self.header(obj).tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS): + surviving_list.append(pyobject) + if surviving_dict: + surviving_dict.insertclean(obj, pyobject) + else: + self._rrc_free(pyobject) diff -Nru pypy-4.0.1+dfsg/rpython/memory/gc/inspector.py pypy-5.0.1+dfsg/rpython/memory/gc/inspector.py --- pypy-4.0.1+dfsg/rpython/memory/gc/inspector.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/memory/gc/inspector.py 2016-03-19 16:40:12.000000000 +0000 @@ -3,7 +3,6 @@ """ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup from rpython.rlib.objectmodel import free_non_gc_object -from rpython.rtyper.module.ll_os import UNDERSCORE_ON_WIN32 from rpython.rlib import rposix, rgc, jit from rpython.memory.support import AddressDict, get_address_stack @@ -94,7 +93,7 @@ # ---------- -raw_os_write = rffi.llexternal(UNDERSCORE_ON_WIN32 + 'write', +raw_os_write = rffi.llexternal(rposix.UNDERSCORE_ON_WIN32 + 'write', [rffi.INT, llmemory.Address, rffi.SIZE_T], rffi.SIZE_T, sandboxsafe=True, _nowrapper=True) diff -Nru pypy-4.0.1+dfsg/rpython/memory/gc/test/test_rawrefcount.py pypy-5.0.1+dfsg/rpython/memory/gc/test/test_rawrefcount.py --- pypy-4.0.1+dfsg/rpython/memory/gc/test/test_rawrefcount.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/memory/gc/test/test_rawrefcount.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,291 @@ +import py +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.memory.gc.incminimark import IncrementalMiniMarkGC +from rpython.memory.gc.test.test_direct import BaseDirectGCTest +from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY +from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT + +PYOBJ_HDR = IncrementalMiniMarkGC.PYOBJ_HDR +PYOBJ_HDR_PTR = IncrementalMiniMarkGC.PYOBJ_HDR_PTR + +S = lltype.GcForwardReference() +S.become(lltype.GcStruct('S', + ('x', lltype.Signed), + ('prev', lltype.Ptr(S)), + ('next', lltype.Ptr(S)))) + + +class TestRawRefCount(BaseDirectGCTest): + GCClass = IncrementalMiniMarkGC + + def _collect(self, major, expected_trigger=0): + if major: + self.gc.collect() + else: + self.gc.minor_collection() + count1 = len(self.trigger) + self.gc.rrc_invoke_callback() + count2 = len(self.trigger) + assert count2 - count1 == expected_trigger + + def _rawrefcount_pair(self, intval, is_light=False, is_pyobj=False, + create_old=False, create_immortal=False, + force_external=False): + if is_light: + rc = REFCNT_FROM_PYPY_LIGHT + else: + rc = REFCNT_FROM_PYPY + self.trigger = [] + self.gc.rawrefcount_init(lambda: self.trigger.append(1)) + # + if create_immortal: + p1 = lltype.malloc(S, immortal=True) + else: + saved = self.gc.nonlarge_max + try: + if force_external: + self.gc.nonlarge_max = 1 + p1 = self.malloc(S) + finally: + self.gc.nonlarge_max = saved + p1.x = intval + if create_immortal: + self.consider_constant(p1) + elif create_old: + self.stackroots.append(p1) + self._collect(major=False) + p1 = self.stackroots.pop() + p1ref = lltype.cast_opaque_ptr(llmemory.GCREF, p1) + r1 = lltype.malloc(PYOBJ_HDR, flavor='raw', immortal=create_immortal) + r1.ob_refcnt = rc + r1.ob_pypy_link = 0 + r1addr = llmemory.cast_ptr_to_adr(r1) + if is_pyobj: + assert not is_light + self.gc.rawrefcount_create_link_pyobj(p1ref, r1addr) + else: + self.gc.rawrefcount_create_link_pypy(p1ref, r1addr) + assert r1.ob_refcnt == rc + assert r1.ob_pypy_link != 0 + + def check_alive(extra_refcount): + assert r1.ob_refcnt == rc + extra_refcount + assert r1.ob_pypy_link != 0 + p1ref = self.gc.rawrefcount_to_obj(r1addr) + p1 = lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref) + assert p1.x == intval + if not is_pyobj: + assert self.gc.rawrefcount_from_obj(p1ref) == r1addr + else: + assert self.gc.rawrefcount_from_obj(p1ref) == llmemory.NULL + return p1 + return p1, p1ref, r1, r1addr, check_alive + + def test_rawrefcount_objects_basic(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_light=True, create_old=old)) + p2 = self.malloc(S) + p2.x = 84 + p2ref = lltype.cast_opaque_ptr(llmemory.GCREF, p2) + r2 = lltype.malloc(PYOBJ_HDR, flavor='raw') + r2.ob_refcnt = 1 + r2.ob_pypy_link = 0 + r2addr = llmemory.cast_ptr_to_adr(r2) + # p2 and r2 are not linked + assert r1.ob_pypy_link != 0 + assert r2.ob_pypy_link == 0 + assert self.gc.rawrefcount_from_obj(p1ref) == r1addr + assert self.gc.rawrefcount_from_obj(p2ref) == llmemory.NULL + assert self.gc.rawrefcount_to_obj(r1addr) == p1ref + assert self.gc.rawrefcount_to_obj(r2addr) == lltype.nullptr( + llmemory.GCREF.TO) + lltype.free(r1, flavor='raw') + lltype.free(r2, flavor='raw') + + def test_rawrefcount_objects_collection_survives_from_raw(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_light=True, create_old=old)) + check_alive(0) + r1.ob_refcnt += 1 + self._collect(major=False) + check_alive(+1) + self._collect(major=True) + check_alive(+1) + r1.ob_refcnt -= 1 + self._collect(major=False) + p1 = check_alive(0) + self._collect(major=True) + py.test.raises(RuntimeError, "r1.ob_refcnt") # dead + py.test.raises(RuntimeError, "p1.x") # dead + self.gc.check_no_more_rawrefcount_state() + assert self.trigger == [] + assert self.gc.rawrefcount_next_dead() == llmemory.NULL + + def test_rawrefcount_dies_quickly(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_light=True, create_old=old)) + check_alive(0) + self._collect(major=False) + if old: + check_alive(0) + self._collect(major=True) + py.test.raises(RuntimeError, "r1.ob_refcnt") # dead + py.test.raises(RuntimeError, "p1.x") # dead + self.gc.check_no_more_rawrefcount_state() + + def test_rawrefcount_objects_collection_survives_from_obj(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_light=True, create_old=old)) + check_alive(0) + self.stackroots.append(p1) + self._collect(major=False) + check_alive(0) + self._collect(major=True) + check_alive(0) + p1 = self.stackroots.pop() + self._collect(major=False) + check_alive(0) + assert p1.x == 42 + self._collect(major=True) + py.test.raises(RuntimeError, "r1.ob_refcnt") # dead + py.test.raises(RuntimeError, "p1.x") # dead + self.gc.check_no_more_rawrefcount_state() + + def test_rawrefcount_objects_basic_old(self): + self.test_rawrefcount_objects_basic(old=True) + def test_rawrefcount_objects_collection_survives_from_raw_old(self): + self.test_rawrefcount_objects_collection_survives_from_raw(old=True) + def test_rawrefcount_dies_quickly_old(self): + self.test_rawrefcount_dies_quickly(old=True) + def test_rawrefcount_objects_collection_survives_from_obj_old(self): + self.test_rawrefcount_objects_collection_survives_from_obj(old=True) + + def test_pypy_nonlight_survives_from_raw(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_light=False, create_old=old)) + check_alive(0) + r1.ob_refcnt += 1 + self._collect(major=False) + check_alive(+1) + self._collect(major=True) + check_alive(+1) + r1.ob_refcnt -= 1 + self._collect(major=False) + p1 = check_alive(0) + self._collect(major=True, expected_trigger=1) + py.test.raises(RuntimeError, "p1.x") # dead + assert r1.ob_refcnt == 1 # in the pending list + assert r1.ob_pypy_link == 0 + assert self.gc.rawrefcount_next_dead() == r1addr + assert self.gc.rawrefcount_next_dead() == llmemory.NULL + assert self.gc.rawrefcount_next_dead() == llmemory.NULL + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + def test_pypy_nonlight_survives_from_obj(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_light=False, create_old=old)) + check_alive(0) + self.stackroots.append(p1) + self._collect(major=False) + check_alive(0) + self._collect(major=True) + check_alive(0) + p1 = self.stackroots.pop() + self._collect(major=False) + check_alive(0) + assert p1.x == 42 + self._collect(major=True, expected_trigger=1) + py.test.raises(RuntimeError, "p1.x") # dead + assert r1.ob_refcnt == 1 + assert r1.ob_pypy_link == 0 + assert self.gc.rawrefcount_next_dead() == r1addr + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + def test_pypy_nonlight_dies_quickly(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_light=False, create_old=old)) + check_alive(0) + if old: + self._collect(major=False) + check_alive(0) + self._collect(major=True, expected_trigger=1) + else: + self._collect(major=False, expected_trigger=1) + py.test.raises(RuntimeError, "p1.x") # dead + assert r1.ob_refcnt == 1 + assert r1.ob_pypy_link == 0 + assert self.gc.rawrefcount_next_dead() == r1addr + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + def test_pypy_nonlight_survives_from_raw_old(self): + self.test_pypy_nonlight_survives_from_raw(old=True) + def test_pypy_nonlight_survives_from_obj_old(self): + self.test_pypy_nonlight_survives_from_obj(old=True) + def test_pypy_nonlight_dies_quickly_old(self): + self.test_pypy_nonlight_dies_quickly(old=True) + + @py.test.mark.parametrize('external', [False, True]) + def test_pyobject_pypy_link_dies_on_minor_collection(self, external): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_pyobj=True, force_external=external)) + check_alive(0) + r1.ob_refcnt += 1 # the pyobject is kept alive + self._collect(major=False) + assert r1.ob_refcnt == 1 # refcnt dropped to 1 + assert r1.ob_pypy_link == 0 # detached + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + @py.test.mark.parametrize('old,external', [ + (False, False), (True, False), (False, True)]) + def test_pyobject_dies(self, old, external): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_pyobj=True, create_old=old, + force_external=external)) + check_alive(0) + if old: + self._collect(major=False) + check_alive(0) + self._collect(major=True, expected_trigger=1) + else: + self._collect(major=False, expected_trigger=1) + assert r1.ob_refcnt == 1 # refcnt 1, in the pending list + assert r1.ob_pypy_link == 0 # detached + assert self.gc.rawrefcount_next_dead() == r1addr + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + @py.test.mark.parametrize('old,external', [ + (False, False), (True, False), (False, True)]) + def test_pyobject_survives_from_obj(self, old, external): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_pyobj=True, create_old=old, + force_external=external)) + check_alive(0) + self.stackroots.append(p1) + self._collect(major=False) + check_alive(0) + self._collect(major=True) + check_alive(0) + p1 = self.stackroots.pop() + self._collect(major=False) + check_alive(0) + assert p1.x == 42 + assert self.trigger == [] + self._collect(major=True, expected_trigger=1) + py.test.raises(RuntimeError, "p1.x") # dead + assert r1.ob_refcnt == 1 + assert r1.ob_pypy_link == 0 + assert self.gc.rawrefcount_next_dead() == r1addr + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + def test_pyobject_attached_to_prebuilt_obj(self): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, create_immortal=True)) + check_alive(0) + self._collect(major=True) + check_alive(0) diff -Nru pypy-4.0.1+dfsg/rpython/memory/gctransform/boehm.py pypy-5.0.1+dfsg/rpython/memory/gctransform/boehm.py --- pypy-4.0.1+dfsg/rpython/memory/gctransform/boehm.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/memory/gctransform/boehm.py 2016-03-19 16:40:12.000000000 +0000 @@ -74,7 +74,7 @@ def gct_fv_gc_malloc_varsize(self, hop, flags, TYPE, v_length, c_const_size, c_item_size, c_offset_to_length): - # XXX same behavior for zero=True: in theory that's wrong + # XXX same behavior for zero=True: in theory that's wrong if c_offset_to_length is None: v_raw = hop.genop("direct_call", [self.malloc_varsize_no_length_ptr, v_length, @@ -156,6 +156,11 @@ resulttype = lltype.Signed) hop.genop('int_invert', [v_int], resultvar=hop.spaceop.result) + def gcheader_initdata(self, obj): + hdr = lltype.malloc(self.HDR, immortal=True) + hdr.hash = lltype.identityhash_nocache(obj._as_ptr()) + return hdr._obj + ########## weakrefs ########## # Boehm: weakref objects are small structures containing only a Boehm diff -Nru pypy-4.0.1+dfsg/rpython/memory/gctransform/framework.py pypy-5.0.1+dfsg/rpython/memory/gctransform/framework.py --- pypy-4.0.1+dfsg/rpython/memory/gctransform/framework.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/memory/gctransform/framework.py 2016-03-19 16:40:12.000000000 +0000 @@ -35,15 +35,11 @@ return True return graphanalyze.BoolGraphAnalyzer.analyze_direct_call(self, graph, seen) - def analyze_external_call(self, op, seen=None): - try: - funcobj = op.args[0].value._obj - except lltype.DelayedPointer: - return True - if getattr(funcobj, 'random_effects_on_gcobjs', False): + def analyze_external_call(self, funcobj, seen=None): + if funcobj.random_effects_on_gcobjs: return True - return graphanalyze.BoolGraphAnalyzer.analyze_external_call(self, op, - seen) + return graphanalyze.BoolGraphAnalyzer.analyze_external_call( + self, funcobj, seen) def analyze_simple_operation(self, op, graphinfo): if op.opname in ('malloc', 'malloc_varsize'): flags = op.args[1].value @@ -157,6 +153,7 @@ else: # for regular translation: pick the GC from the config GCClass, GC_PARAMS = choose_gc_from_config(translator.config) + self.GCClass = GCClass if hasattr(translator, '_jit2gc'): self.layoutbuilder = translator._jit2gc['layoutbuilder'] @@ -292,7 +289,6 @@ s_gcref = SomePtr(llmemory.GCREF) gcdata = self.gcdata - translator = self.translator #use the GC flag to find which malloc method to use #malloc_zero_filled == Ture -> malloc_fixedsize/varsize_clear #malloc_zero_filled == Flase -> malloc_fixedsize/varsize @@ -326,7 +322,7 @@ GCClass.malloc_varsize.im_func, [s_gc, s_typeid16] + [annmodel.SomeInteger(nonneg=True) for i in range(4)], s_gcref) - + self.collect_ptr = getfn(GCClass.collect.im_func, [s_gc, annmodel.SomeInteger()], annmodel.s_None) self.can_move_ptr = getfn(GCClass.can_move.im_func, @@ -487,6 +483,29 @@ annmodel.SomeInteger(nonneg=True)], annmodel.s_None) + if hasattr(GCClass, 'rawrefcount_init'): + self.rawrefcount_init_ptr = getfn( + GCClass.rawrefcount_init, + [s_gc, SomePtr(GCClass.RAWREFCOUNT_DEALLOC_TRIGGER)], + annmodel.s_None) + self.rawrefcount_create_link_pypy_ptr = getfn( + GCClass.rawrefcount_create_link_pypy, + [s_gc, s_gcref, SomeAddress()], + annmodel.s_None) + self.rawrefcount_create_link_pyobj_ptr = getfn( + GCClass.rawrefcount_create_link_pyobj, + [s_gc, s_gcref, SomeAddress()], + annmodel.s_None) + self.rawrefcount_from_obj_ptr = getfn( + GCClass.rawrefcount_from_obj, [s_gc, s_gcref], SomeAddress(), + inline = True) + self.rawrefcount_to_obj_ptr = getfn( + GCClass.rawrefcount_to_obj, [s_gc, SomeAddress()], s_gcref, + inline = True) + self.rawrefcount_next_dead_ptr = getfn( + GCClass.rawrefcount_next_dead, [s_gc], SomeAddress(), + inline = True) + if GCClass.can_usually_pin_objects: self.pin_ptr = getfn(GCClass.pin, [s_gc, SomeAddress()], @@ -1124,8 +1143,8 @@ resultvar=op.result) def gct_gc_thread_run(self, hop): - assert self.translator.config.translation.thread - if hasattr(self.root_walker, 'thread_run_ptr'): + if (self.translator.config.translation.thread and + hasattr(self.root_walker, 'thread_run_ptr')): livevars = self.push_roots(hop) assert not livevars, "live GC var around %s!" % (hop.spaceop,) hop.genop("direct_call", [self.root_walker.thread_run_ptr]) @@ -1232,6 +1251,50 @@ resultvar=hop.spaceop.result) self.pop_roots(hop, livevars) + def gct_gc_rawrefcount_init(self, hop): + [v_fnptr] = hop.spaceop.args + assert v_fnptr.concretetype == self.GCClass.RAWREFCOUNT_DEALLOC_TRIGGER + hop.genop("direct_call", + [self.rawrefcount_init_ptr, self.c_const_gc, v_fnptr]) + + def gct_gc_rawrefcount_create_link_pypy(self, hop): + [v_gcobj, v_pyobject] = hop.spaceop.args + assert v_gcobj.concretetype == llmemory.GCREF + assert v_pyobject.concretetype == llmemory.Address + hop.genop("direct_call", + [self.rawrefcount_create_link_pypy_ptr, self.c_const_gc, + v_gcobj, v_pyobject]) + + def gct_gc_rawrefcount_create_link_pyobj(self, hop): + [v_gcobj, v_pyobject] = hop.spaceop.args + assert v_gcobj.concretetype == llmemory.GCREF + assert v_pyobject.concretetype == llmemory.Address + hop.genop("direct_call", + [self.rawrefcount_create_link_pyobj_ptr, self.c_const_gc, + v_gcobj, v_pyobject]) + + def gct_gc_rawrefcount_from_obj(self, hop): + [v_gcobj] = hop.spaceop.args + assert v_gcobj.concretetype == llmemory.GCREF + assert hop.spaceop.result.concretetype == llmemory.Address + hop.genop("direct_call", + [self.rawrefcount_from_obj_ptr, self.c_const_gc, v_gcobj], + resultvar=hop.spaceop.result) + + def gct_gc_rawrefcount_to_obj(self, hop): + [v_pyobject] = hop.spaceop.args + assert v_pyobject.concretetype == llmemory.Address + assert hop.spaceop.result.concretetype == llmemory.GCREF + hop.genop("direct_call", + [self.rawrefcount_to_obj_ptr, self.c_const_gc, v_pyobject], + resultvar=hop.spaceop.result) + + def gct_gc_rawrefcount_next_dead(self, hop): + assert hop.spaceop.result.concretetype == llmemory.Address + hop.genop("direct_call", + [self.rawrefcount_next_dead_ptr, self.c_const_gc], + resultvar=hop.spaceop.result) + def _set_into_gc_array_part(self, op): if op.opname == 'setarrayitem': return op.args[1] @@ -1389,7 +1452,7 @@ [v] + previous_steps + [c_name, c_null]) else: llops.genop('bare_setfield', [v, c_name, c_null]) - + return elif isinstance(TYPE, lltype.Array): ITEM = TYPE.OF @@ -1416,6 +1479,25 @@ resulttype=llmemory.Address) llops.genop('raw_memclear', [v_adr, v_totalsize]) + def gcheader_initdata(self, obj): + o = lltype.top_container(obj) + needs_hash = self.get_prebuilt_hash(o) is not None + hdr = self.gc_header_for(o, needs_hash) + return hdr._obj + + def get_prebuilt_hash(self, obj): + # for prebuilt objects that need to have their hash stored and + # restored. Note that only structures that are StructNodes all + # the way have their hash stored (and not e.g. structs with var- + # sized arrays at the end). 'obj' must be the top_container. + TYPE = lltype.typeOf(obj) + if not isinstance(TYPE, lltype.GcStruct): + return None + if TYPE._is_varsize(): + return None + return getattr(obj, '_hash_cache_', None) + + class TransformerLayoutBuilder(gctypelayout.TypeLayoutBuilder): diff -Nru pypy-4.0.1+dfsg/rpython/memory/gctransform/refcounting.py pypy-5.0.1+dfsg/rpython/memory/gctransform/refcounting.py --- pypy-4.0.1+dfsg/rpython/memory/gctransform/refcounting.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/memory/gctransform/refcounting.py 2016-03-19 16:40:12.000000000 +0000 @@ -285,3 +285,7 @@ resulttype=llmemory.Address) hop.genop("direct_call", [self.identityhash_ptr, v_adr], resultvar=hop.spaceop.result) + + def gcheader_initdata(self, obj): + top = lltype.top_container(obj) + return self.gcheaderbuilder.header_of_object(top)._obj diff -Nru pypy-4.0.1+dfsg/rpython/memory/gctransform/support.py pypy-5.0.1+dfsg/rpython/memory/gctransform/support.py --- pypy-4.0.1+dfsg/rpython/memory/gctransform/support.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/memory/gctransform/support.py 2016-03-19 16:40:12.000000000 +0000 @@ -73,15 +73,19 @@ hop.exception_cannot_occur() return hop.inputconst(hop.r_result.lowleveltype, hop.s_result.const) +def write(fd, string): + from rpython.rlib.rposix import c_write + return c_write(fd, string, len(string)) + def ll_call_destructor(destrptr, destr_v, typename): try: destrptr(destr_v) except Exception, e: try: - os.write(2, "a destructor of type ") - os.write(2, typename) - os.write(2, " raised an exception ") - os.write(2, str(e)) - os.write(2, " ignoring it\n") + write(2, "a destructor of type ") + write(2, typename) + write(2, " raised an exception ") + write(2, str(e)) + write(2, " ignoring it\n") except: pass diff -Nru pypy-4.0.1+dfsg/rpython/memory/gctransform/test/test_framework.py pypy-5.0.1+dfsg/rpython/memory/gctransform/test/test_framework.py --- pypy-4.0.1+dfsg/rpython/memory/gctransform/test/test_framework.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/memory/gctransform/test/test_framework.py 2016-03-19 16:40:12.000000000 +0000 @@ -40,7 +40,7 @@ t.config.translation.gc = "minimark" cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) - db = cbuild.generate_graphs_for_llinterp() + db = cbuild.build_database() entrypointptr = cbuild.getentrypointptr() entrygraph = entrypointptr._obj.graph @@ -69,7 +69,7 @@ return -x t = rtype(g, [int]) gg = graphof(t, g) - assert not CollectAnalyzer(t).analyze_direct_call(gg) + assert not CollectAnalyzer(t).analyze_direct_call(gg) def test_cancollect_external(): fext1 = rffi.llexternal('fext1', [], lltype.Void, releasegil=False) @@ -110,12 +110,12 @@ def entrypoint(argv): return g() + 2 - + t = rtype(entrypoint, [s_list_of_strings]) t.config.translation.gc = "minimark" cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) - db = cbuild.generate_graphs_for_llinterp() + db = cbuild.build_database() def test_no_collect_detection(): from rpython.rlib import rgc @@ -134,12 +134,13 @@ def entrypoint(argv): return g() + 2 - + t = rtype(entrypoint, [s_list_of_strings]) t.config.translation.gc = "minimark" cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) - f = py.test.raises(Exception, cbuild.generate_graphs_for_llinterp) + with py.test.raises(Exception) as f: + cbuild.build_database() expected = "'no_collect' function can trigger collection: at_most or size < 0: + if at_most < 0: + at_most = 0 + size = at_most + offset += buffer.offset + buffer = buffer.buffer + # self.buffer = buffer self.offset = offset self.size = size @@ -99,6 +122,12 @@ else: return 0 + def as_str_and_offset_maybe(self): + string, offset = self.buffer.as_str_and_offset_maybe() + if string is not None: + return string, offset+self.offset + return None, 0 + def getitem(self, index): return self.buffer.getitem(self.offset + index) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/debug.py pypy-5.0.1+dfsg/rpython/rlib/debug.py --- pypy-4.0.1+dfsg/rpython/rlib/debug.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/debug.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,76 +1,41 @@ -import sys, time +import sys +import time + from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import is_valid_int -from rpython.rtyper.extfunc import ExtFuncEntry +from rpython.rtyper.extfunc import register_external from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem import rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo - -def ll_assert(x, msg): - """After translation to C, this becomes an RPyAssert.""" - assert type(x) is bool, "bad type! got %r" % (type(x),) - assert x, msg - -class Entry(ExtRegistryEntry): - _about_ = ll_assert - - def compute_result_annotation(self, s_x, s_msg): - assert s_msg.is_constant(), ("ll_assert(x, msg): " - "the msg must be constant") - return None - - def specialize_call(self, hop): - vlist = hop.inputargs(lltype.Bool, lltype.Void) - hop.exception_cannot_occur() - hop.genop('debug_assert', vlist) - -class FatalError(Exception): - pass - -def fatalerror(msg): - # print the RPython traceback and abort with a fatal error - if not we_are_translated(): - raise FatalError(msg) - from rpython.rtyper.lltypesystem import lltype - from rpython.rtyper.lltypesystem.lloperation import llop - llop.debug_print_traceback(lltype.Void) - llop.debug_fatalerror(lltype.Void, msg) -fatalerror._dont_inline_ = True -fatalerror._jit_look_inside_ = False -fatalerror._annenforceargs_ = [str] - -def fatalerror_notb(msg): - # a variant of fatalerror() that doesn't print the RPython traceback - if not we_are_translated(): - raise FatalError(msg) - from rpython.rtyper.lltypesystem import lltype - from rpython.rtyper.lltypesystem.lloperation import llop - llop.debug_fatalerror(lltype.Void, msg) -fatalerror_notb._dont_inline_ = True -fatalerror_notb._jit_look_inside_ = False -fatalerror_notb._annenforceargs_ = [str] +# Expose these here (public interface) +from rpython.rtyper.debug import ( + ll_assert, FatalError, fatalerror, fatalerror_notb) class DebugLog(list): def debug_print(self, *args): self.append(('debug_print',) + args) + def debug_start(self, category, time=None): self.append(('debug_start', category, time)) + def debug_stop(self, category, time=None): - for i in xrange(len(self)-1, -1, -1): + for i in xrange(len(self) - 1, -1, -1): if self[i][0] == 'debug_start': assert self[i][1] == category, ( "nesting error: starts with %r but stops with %r" % (self[i][1], category)) starttime = self[i][2] if starttime is not None or time is not None: - self[i:] = [(category, starttime, time, self[i+1:])] + self[i:] = [(category, starttime, time, self[i + 1:])] else: - self[i:] = [(category, self[i+1:])] + self[i:] = [(category, self[i + 1:])] return assert False, ("nesting error: no start corresponding to stop %r" % (category,)) + def __repr__(self): import pprint return pprint.pformat(list(self)) @@ -161,7 +126,6 @@ return self.bookkeeper.immutablevalue(False) def specialize_call(self, hop): - from rpython.rtyper.lltypesystem import lltype t = hop.rtyper.annotator.translator hop.exception_cannot_occur() if t.config.translation.log: @@ -189,7 +153,6 @@ return annmodel.SomeInteger() def specialize_call(self, hop): - from rpython.rtyper.lltypesystem import lltype hop.exception_cannot_occur() return hop.genop('debug_offset', [], resulttype=lltype.Signed) @@ -223,7 +186,6 @@ return None def specialize_call(self, hop): - from rpython.rtyper.lltypesystem import lltype vlist = hop.inputargs(lltype.Signed) hop.exception_cannot_occur() return hop.genop('debug_forked', vlist) @@ -244,7 +206,6 @@ def compute_result_annotation(self, s_RESTYPE, s_pythonfunction, *args_s): from rpython.annotator import model as annmodel from rpython.rtyper.llannotation import lltype_to_annotation - from rpython.rtyper.lltypesystem import lltype assert s_RESTYPE.is_constant() assert s_pythonfunction.is_constant() s_result = s_RESTYPE.const @@ -255,7 +216,6 @@ def specialize_call(self, hop): from rpython.annotator import model as annmodel - from rpython.rtyper.lltypesystem import lltype RESTYPE = hop.args_s[0].const if not isinstance(RESTYPE, lltype.LowLevelType): assert isinstance(RESTYPE, annmodel.SomeObject) @@ -283,7 +243,8 @@ def compute_result_annotation(self, s_arg, s_checker): if not s_checker.is_constant(): - raise ValueError("Second argument of check_annotation must be constant") + raise ValueError( + "Second argument of check_annotation must be constant") checker = s_checker.const checker(s_arg, self.bookkeeper) return s_arg @@ -308,11 +269,14 @@ assert isinstance(s_arg, SomeList) # the logic behind it is that we try not to propagate # make_sure_not_resized, when list comprehension is not on - if self.bookkeeper.annotator.translator.config.translation.list_comprehension_operations: + config = self.bookkeeper.annotator.translator.config + if config.translation.list_comprehension_operations: s_arg.listdef.never_resize() else: from rpython.annotator.annrpython import log - log.WARNING('make_sure_not_resized called, but has no effect since list_comprehension is off') + log.WARNING( + "make_sure_not_resized called, but has no effect since " + "list_comprehension is off") return s_arg def specialize_call(self, hop): @@ -433,15 +397,11 @@ import pdb; pdb.set_trace() if not sys.platform.startswith('win'): - def _make_impl_attach_gdb(): - # circular imports fun :-( - import sys - from rpython.rtyper.lltypesystem import rffi - if sys.platform.startswith('linux'): - # Only necessary on Linux - eci = ExternalCompilationInfo(includes=['string.h', 'assert.h', - 'sys/prctl.h'], - post_include_bits=[""" + if sys.platform.startswith('linux'): + # Only necessary on Linux + eci = ExternalCompilationInfo(includes=['string.h', 'assert.h', + 'sys/prctl.h'], + post_include_bits=[""" /* If we have an old Linux kernel (or compile with old system headers), the following two macros are not defined. But we would still like a pypy translated on such a system to run on a more modern system. */ @@ -455,55 +415,38 @@ prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY); } """]) - allow_attach = rffi.llexternal( - "pypy__allow_attach", [], lltype.Void, - compilation_info=eci, _nowrapper=True) - else: - # Do nothing, there's no prctl - def allow_attach(): - pass - - def impl_attach_gdb(): - import os - allow_attach() - pid = os.getpid() - gdbpid = os.fork() - if gdbpid == 0: - shell = os.environ.get("SHELL") or "/bin/sh" - sepidx = shell.rfind(os.sep) + 1 - if sepidx > 0: - argv0 = shell[sepidx:] - else: - argv0 = shell - try: - os.execv(shell, [argv0, "-c", "gdb -p %d" % pid]) - except OSError as e: - os.write(2, "Could not start GDB: %s" % ( - os.strerror(e.errno))) - raise SystemExit + allow_attach = rffi.llexternal( + "pypy__allow_attach", [], lltype.Void, + compilation_info=eci, _nowrapper=True) + else: + # Do nothing, there's no prctl + def allow_attach(): + pass + + def impl_attach_gdb(): + import os + allow_attach() + pid = os.getpid() + gdbpid = os.fork() + if gdbpid == 0: + shell = os.environ.get("SHELL") or "/bin/sh" + sepidx = shell.rfind(os.sep) + 1 + if sepidx > 0: + argv0 = shell[sepidx:] else: - time.sleep(1) # give the GDB time to attach + argv0 = shell + try: + os.execv(shell, [argv0, "-c", "gdb -p %d" % pid]) + except OSError as e: + os.write(2, "Could not start GDB: %s" % ( + os.strerror(e.errno))) + raise SystemExit + else: + time.sleep(1) # give the GDB time to attach - return impl_attach_gdb else: - def _make_impl_attach_gdb(): - def impl_attach_gdb(): - print "Don't know how to attach GDB on Windows" - return impl_attach_gdb - - -class FunEntry(ExtFuncEntry): - _about_ = attach_gdb - signature_args = [] - #lltypeimpl = staticmethod(impl_attach_gdb) --- done lazily below - name = "impl_attach_gdb" - - @property - def lltypeimpl(self): - if not hasattr(self.__class__, '_lltypeimpl'): - self.__class__._lltypeimpl = staticmethod(_make_impl_attach_gdb()) - return self._lltypeimpl + def impl_attach_gdb(): + print "Don't know how to attach GDB on Windows" - def compute_result_annotation(self, *args_s): - from rpython.annotator.model import s_None - return s_None +register_external(attach_gdb, [], result=None, + export_name="impl_attach_gdb", llimpl=impl_attach_gdb) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/entrypoint.py pypy-5.0.1+dfsg/rpython/rlib/entrypoint.py --- pypy-4.0.1+dfsg/rpython/rlib/entrypoint.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/entrypoint.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,4 +1,4 @@ -secondary_entrypoints = {} +secondary_entrypoints = {"main": []} import py from rpython.rtyper.lltypesystem import lltype, rffi @@ -23,10 +23,10 @@ return deco def entrypoint_lowlevel(key, argtypes, c_name=None, relax=False): - """ Note: entrypoint should call llop.gc_stack_bottom on it's own. - That's necessary for making it work with asmgcc and hence JIT + """ Note: entrypoint should acquire the GIL and call + llop.gc_stack_bottom on its own. - If in doubt, use entrypoint(). + If in doubt, use entrypoint_highlevel(). if key == 'main' than it's included by default """ @@ -43,14 +43,25 @@ pypy_debug_catch_fatal_exception = rffi.llexternal('pypy_debug_catch_fatal_exception', [], lltype.Void) -def entrypoint(key, argtypes, c_name=None): - """if key == 'main' than it's included by default +def entrypoint_highlevel(key, argtypes, c_name=None): + """ + Export the decorated Python function as C, under the name 'c_name'. + + The function is wrapped inside a function that does the necessary + GIL-acquiring and GC-root-stack-bottom-ing. + + If key == 'main' then it's included by default; otherwise you need + to list the key in the config's secondaryentrypoints (or give it + on the command-line with --entrypoints when translating). """ def deco(func): source = py.code.Source(""" + from rpython.rlib import rgil + def wrapper(%(args)s): - # the tuple has to be killed, but it's fine because this is - # called from C + # acquire the GIL + rgil.acquire() + # rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py # this should not raise @@ -67,6 +78,9 @@ llop.debug_fatalerror(lltype.Void, "error in c callback") assert 0 # dead code rffi.stackcounter.stacks_counter -= 1 + # release the GIL + rgil.release() + # return res """ % {'args': ', '.join(['arg%d' % i for i in range(len(argtypes))])}) d = {'rffi': rffi, 'lltype': lltype, @@ -79,18 +93,19 @@ if c_name is not None: wrapper.c_name = c_name export_symbol(wrapper) - return wrapper + # + # the return value of the decorator is *the original function*, + # so that it can be called from Python too. The wrapper is only + # registered in secondary_entrypoints where genc finds it. + func.exported_wrapper = wrapper + return func return deco -# the point of dance below is so the call to rpython_startup_code actually -# does call asm_stack_bottom. It's here because there is no other good place. -# This thing is imported by any target which has any API, so it'll get -# registered - -RPython_StartupCode = rffi.llexternal('RPython_StartupCode', [], lltype.Void, - _nowrapper=True, - random_effects_on_gcobjs=True) - -@entrypoint('main', [], c_name='rpython_startup_code') -def rpython_startup_code(): - RPython_StartupCode() + +def entrypoint(*args, **kwds): + raise Exception("entrypoint.entrypoint() is removed because of a bug. " + "Remove the 'aroundstate' code in your functions and " + "then call entrypoint_highlevel(), which does that for " + "you. Another difference is that entrypoint_highlevel() " + "returns the normal Python function, which can be safely " + "called from more Python code.") diff -Nru pypy-4.0.1+dfsg/rpython/rlib/exports.py pypy-5.0.1+dfsg/rpython/rlib/exports.py --- pypy-4.0.1+dfsg/rpython/rlib/exports.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/exports.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,5 +1,7 @@ from rpython.rtyper.lltypesystem.lltype import typeOf, ContainerType +# XXX kill me + def export_struct(name, struct): assert name not in EXPORTS_names, "Duplicate export " + name assert isinstance(typeOf(struct), ContainerType) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/jit.py pypy-5.0.1+dfsg/rpython/rlib/jit.py --- pypy-4.0.1+dfsg/rpython/rlib/jit.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/jit.py 2016-03-19 16:40:15.000000000 +0000 @@ -284,7 +284,7 @@ def loop_unrolling_heuristic(lst, size, cutoff=2): """ In which cases iterating over items of lst can be unrolled """ - return isvirtual(lst) or (isconstant(size) and size <= cutoff) + return size == 0 or isvirtual(lst) or (isconstant(size) and size <= cutoff) class Entry(ExtRegistryEntry): _about_ = hint @@ -604,7 +604,7 @@ get_printable_location=None, confirm_enter_jit=None, can_never_inline=None, should_unroll_one_iteration=None, name='jitdriver', check_untranslated=True, vectorize=False, - get_unique_id=None): + get_unique_id=None, is_recursive=False): if greens is not None: self.greens = greens self.name = name @@ -623,6 +623,8 @@ raise AttributeError("no 'greens' or 'reds' supplied") if virtualizables is not None: self.virtualizables = virtualizables + if get_unique_id is not None: + assert is_recursive, "get_unique_id and is_recursive must be specified at the same time" for v in self.virtualizables: assert v in self.reds # if reds are automatic, they won't be passed to jit_merge_point, so @@ -643,6 +645,7 @@ self.can_never_inline = can_never_inline self.should_unroll_one_iteration = should_unroll_one_iteration self.check_untranslated = check_untranslated + self.is_recursive = is_recursive self.vec = vectorize def _freeze_(self): @@ -1061,6 +1064,12 @@ greenkey where it started, reason is a string why it got aborted """ + def on_trace_too_long(self, jitdriver, greenkey, greenkey_repr): + """ A hook called each time we abort the trace because it's too + long with the greenkey being the one responsible for the + disabled function + """ + #def before_optimize(self, debug_info): # """ A hook called before optimizer is run, called with instance of # JitDebugInfo. Overwrite for custom behavior @@ -1108,7 +1117,7 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.rclass import ll_type - ll_assert(ll_value == lltype.nullptr(lltype.typeOf(ll_value).TO), "record_exact_class called with None argument") + ll_assert(ll_value != lltype.nullptr(lltype.typeOf(ll_value).TO), "record_exact_class called with None argument") ll_assert(ll_type(ll_value) is ll_cls, "record_exact_class called with invalid arguments") llop.jit_record_exact_class(lltype.Void, ll_value, ll_cls) @@ -1159,6 +1168,24 @@ hop.exception_is_here() return hop.genop('jit_conditional_call', args_v) +def enter_portal_frame(unique_id): + """call this when starting to interpret a function. calling this is not + necessary for almost all interpreters. The only exception is stackless + interpreters where the portal never calls itself. + """ + from rpython.rtyper.lltypesystem import lltype + from rpython.rtyper.lltypesystem.lloperation import llop + llop.jit_enter_portal_frame(lltype.Void, unique_id) + +def leave_portal_frame(): + """call this after the end of executing a function. calling this is not + necessary for almost all interpreters. The only exception is stackless + interpreters where the portal never calls itself. + """ + from rpython.rtyper.lltypesystem import lltype + from rpython.rtyper.lltypesystem.lloperation import llop + llop.jit_leave_portal_frame(lltype.Void) + class Counters(object): counters=""" TRACING diff -Nru pypy-4.0.1+dfsg/rpython/rlib/objectmodel.py pypy-5.0.1+dfsg/rpython/rlib/objectmodel.py --- pypy-4.0.1+dfsg/rpython/rlib/objectmodel.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/objectmodel.py 2016-03-19 16:40:12.000000000 +0000 @@ -114,6 +114,8 @@ specialize = _Specialize() +NOT_CONSTANT = object() # to use in enforceargs() + def enforceargs(*types_, **kwds): """ Decorate a function with forcing of RPython-level types on arguments. None means no enforcing. @@ -273,8 +275,6 @@ return lltype.Signed malloc_zero_filled = CDefinedIntSymbolic('MALLOC_ZERO_FILLED', default=0) -running_on_llinterp = CDefinedIntSymbolic('RUNNING_ON_LLINTERP', default=1) -# running_on_llinterp is meant to have the value 0 in all backends # ____________________________________________________________ @@ -292,6 +292,20 @@ def sc_we_are_translated(ctx): return Constant(True) +def register_replacement_for(replaced_function, sandboxed_name=None): + def wrap(func): + from rpython.rtyper.extregistry import ExtRegistryEntry + class ExtRegistry(ExtRegistryEntry): + _about_ = replaced_function + def compute_annotation(self): + if sandboxed_name: + config = self.bookkeeper.annotator.translator.config + if config.translation.sandbox: + func._sandbox_external_name = sandboxed_name + func._dont_inline_ = True + return self.bookkeeper.immutablevalue(func) + return func + return wrap def keepalive_until_here(*values): pass @@ -319,6 +333,25 @@ # XXX this can be made more efficient in the future return bytearray(str(i)) +def fetch_translated_config(): + """Returns the config that is current when translating. + Returns None if not translated. + """ + return None + +class Entry(ExtRegistryEntry): + _about_ = fetch_translated_config + + def compute_result_annotation(self): + config = self.bookkeeper.annotator.translator.config + return self.bookkeeper.immutablevalue(config) + + def specialize_call(self, hop): + from rpython.rtyper.lltypesystem import lltype + translator = hop.rtyper.annotator.translator + hop.exception_cannot_occur() + return hop.inputconst(lltype.Void, translator.config) + # ____________________________________________________________ class FREED_OBJECT(object): @@ -585,22 +618,10 @@ def hlinvoke(repr, llcallable, *args): raise TypeError("hlinvoke is meant to be rtyped and not called direclty") -def invoke_around_extcall(before, after): - """Call before() before any external function call, and after() after. - At the moment only one pair before()/after() can be registered at a time. - """ - # NOTE: the hooks are cleared during translation! To be effective - # in a compiled program they must be set at run-time. - from rpython.rtyper.lltypesystem import rffi - rffi.aroundstate.before = before - rffi.aroundstate.after = after - # the 'aroundstate' contains regular function and not ll pointers to them, - # but let's call llhelper() anyway to force their annotation - from rpython.rtyper.annlowlevel import llhelper - llhelper(rffi.AroundFnPtr, before) - llhelper(rffi.AroundFnPtr, after) - def is_in_callback(): + """Returns True if we're currently in a callback *or* if there are + multiple threads around. + """ from rpython.rtyper.lltypesystem import rffi return rffi.stackcounter.stacks_counter > 1 diff -Nru pypy-4.0.1+dfsg/rpython/rlib/_os_support.py pypy-5.0.1+dfsg/rpython/rlib/_os_support.py --- pypy-4.0.1+dfsg/rpython/rlib/_os_support.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/_os_support.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,109 @@ +import sys + +from rpython.annotator.model import s_Str0, s_Unicode0 +from rpython.rlib import rstring +from rpython.rlib.objectmodel import specialize +from rpython.rtyper.lltypesystem import rffi + + +_CYGWIN = sys.platform == 'cygwin' +_WIN32 = sys.platform.startswith('win') +UNDERSCORE_ON_WIN32 = '_' if _WIN32 else '' +_MACRO_ON_POSIX = True if not _WIN32 else None + + +class StringTraits(object): + str = str + str0 = s_Str0 + CHAR = rffi.CHAR + CCHARP = rffi.CCHARP + charp2str = staticmethod(rffi.charp2str) + charpsize2str = staticmethod(rffi.charpsize2str) + scoped_str2charp = staticmethod(rffi.scoped_str2charp) + str2charp = staticmethod(rffi.str2charp) + free_charp = staticmethod(rffi.free_charp) + scoped_alloc_buffer = staticmethod(rffi.scoped_alloc_buffer) + + @staticmethod + @specialize.argtype(0) + def as_str(path): + assert path is not None + if isinstance(path, str): + return path + elif isinstance(path, unicode): + # This never happens in PyPy's Python interpreter! + # Only in raw RPython code that uses unicode strings. + # We implement python2 behavior: silently convert to ascii. + return path.encode('ascii') + else: + return path.as_bytes() + + @staticmethod + @specialize.argtype(0) + def as_str0(path): + res = StringTraits.as_str(path) + rstring.check_str0(res) + return res + + +class UnicodeTraits(object): + str = unicode + str0 = s_Unicode0 + CHAR = rffi.WCHAR_T + CCHARP = rffi.CWCHARP + charp2str = staticmethod(rffi.wcharp2unicode) + charpsize2str = staticmethod(rffi.wcharpsize2unicode) + str2charp = staticmethod(rffi.unicode2wcharp) + scoped_str2charp = staticmethod(rffi.scoped_unicode2wcharp) + free_charp = staticmethod(rffi.free_wcharp) + scoped_alloc_buffer = staticmethod(rffi.scoped_alloc_unicodebuffer) + + @staticmethod + @specialize.argtype(0) + def as_str(path): + assert path is not None + if isinstance(path, unicode): + return path + else: + return path.as_unicode() + + @staticmethod + @specialize.argtype(0) + def as_str0(path): + res = UnicodeTraits.as_str(path) + rstring.check_str0(res) + return res + + +string_traits = StringTraits() +unicode_traits = UnicodeTraits() + + +# Returns True when the unicode function should be called: +# - on Windows +# - if the path is Unicode. +if _WIN32: + @specialize.argtype(0) + def _prefer_unicode(path): + assert path is not None + if isinstance(path, str): + return False + elif isinstance(path, unicode): + return True + else: + return path.is_unicode + + @specialize.argtype(0) + def _preferred_traits(path): + if _prefer_unicode(path): + return unicode_traits + else: + return string_traits +else: + @specialize.argtype(0) + def _prefer_unicode(path): + return False + + @specialize.argtype(0) + def _preferred_traits(path): + return string_traits diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rarithmetic.py pypy-5.0.1+dfsg/rpython/rlib/rarithmetic.py --- pypy-4.0.1+dfsg/rpython/rlib/rarithmetic.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rarithmetic.py 2016-03-19 16:40:12.000000000 +0000 @@ -536,7 +536,7 @@ else: r_int64 = int -# needed for ll_os_stat.time_t_to_FILE_TIME in the 64 bit case +# needed for rposix_stat.time_t_to_FILE_TIME in the 64 bit case r_uint32 = build_int('r_uint32', False, 32) SHRT_MIN = -2**(_get_bitsize('h') - 1) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rawrefcount.py pypy-5.0.1+dfsg/rpython/rlib/rawrefcount.py --- pypy-4.0.1+dfsg/rpython/rlib/rawrefcount.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rawrefcount.py 2016-03-19 16:40:15.000000000 +0000 @@ -0,0 +1,266 @@ +# +# See documentation in pypy/doc/discussion/rawrefcount.rst +# +# This is meant for pypy's cpyext module, but is a generally +# useful interface over our GC. XXX "pypy" should be removed here +# +import sys, weakref +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rlib.objectmodel import we_are_translated, specialize +from rpython.rtyper.extregistry import ExtRegistryEntry +from rpython.rlib import rgc + + +REFCNT_FROM_PYPY = sys.maxint // 4 + 1 +REFCNT_FROM_PYPY_LIGHT = REFCNT_FROM_PYPY + (sys.maxint // 2 + 1) + +RAWREFCOUNT_DEALLOC_TRIGGER = lltype.Ptr(lltype.FuncType([], lltype.Void)) + + +def _build_pypy_link(p): + res = len(_adr2pypy) + _adr2pypy.append(p) + return res + + +def init(dealloc_trigger_callback=None): + """NOT_RPYTHON: set up rawrefcount with the GC. This is only used + for tests; it should not be called at all during translation. + """ + global _p_list, _o_list, _adr2pypy, _pypy2ob + global _d_list, _dealloc_trigger_callback + _p_list = [] + _o_list = [] + _adr2pypy = [None] + _pypy2ob = {} + _d_list = [] + _dealloc_trigger_callback = dealloc_trigger_callback + +def create_link_pypy(p, ob): + "NOT_RPYTHON: a link where the PyPy object contains some or all the data" + #print 'create_link_pypy\n\t%s\n\t%s' % (p, ob) + assert p not in _pypy2ob + #assert not ob.c_ob_pypy_link + ob.c_ob_pypy_link = _build_pypy_link(p) + _pypy2ob[p] = ob + _p_list.append(ob) + +def create_link_pyobj(p, ob): + """NOT_RPYTHON: a link where the PyObject contains all the data. + from_obj() will not work on this 'p'.""" + #print 'create_link_pyobj\n\t%s\n\t%s' % (p, ob) + assert p not in _pypy2ob + #assert not ob.c_ob_pypy_link + ob.c_ob_pypy_link = _build_pypy_link(p) + _o_list.append(ob) + +def from_obj(OB_PTR_TYPE, p): + "NOT_RPYTHON" + ob = _pypy2ob.get(p) + if ob is None: + return lltype.nullptr(OB_PTR_TYPE.TO) + assert lltype.typeOf(ob) == OB_PTR_TYPE + return ob + +def to_obj(Class, ob): + "NOT_RPYTHON" + link = ob.c_ob_pypy_link + if link == 0: + return None + p = _adr2pypy[link] + assert isinstance(p, Class) + return p + +def next_dead(OB_PTR_TYPE): + if len(_d_list) == 0: + return lltype.nullptr(OB_PTR_TYPE.TO) + ob = _d_list.pop() + assert lltype.typeOf(ob) == OB_PTR_TYPE + return ob + +def _collect(track_allocation=True): + """NOT_RPYTHON: for tests only. Emulates a GC collection. + Will invoke dealloc_trigger_callback() once if there are objects + whose _Py_Dealloc() should be called. + """ + def detach(ob, wr_list): + assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY + assert ob.c_ob_pypy_link + p = _adr2pypy[ob.c_ob_pypy_link] + assert p is not None + _adr2pypy[ob.c_ob_pypy_link] = None + wr_list.append((ob, weakref.ref(p))) + return p + + global _p_list, _o_list + wr_p_list = [] + new_p_list = [] + for ob in reversed(_p_list): + if ob.c_ob_refcnt not in (REFCNT_FROM_PYPY, REFCNT_FROM_PYPY_LIGHT): + new_p_list.append(ob) + else: + p = detach(ob, wr_p_list) + del _pypy2ob[p] + del p + ob = None + _p_list = Ellipsis + + wr_o_list = [] + for ob in reversed(_o_list): + detach(ob, wr_o_list) + _o_list = Ellipsis + + rgc.collect() # forces the cycles to be resolved and the weakrefs to die + rgc.collect() + rgc.collect() + + def attach(ob, wr, final_list): + assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY + p = wr() + if p is not None: + assert ob.c_ob_pypy_link + _adr2pypy[ob.c_ob_pypy_link] = p + final_list.append(ob) + return p + else: + ob.c_ob_pypy_link = 0 + if ob.c_ob_refcnt >= REFCNT_FROM_PYPY_LIGHT: + ob.c_ob_refcnt -= REFCNT_FROM_PYPY_LIGHT + ob.c_ob_pypy_link = 0 + if ob.c_ob_refcnt == 0: + lltype.free(ob, flavor='raw', + track_allocation=track_allocation) + else: + assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY + assert ob.c_ob_refcnt < int(REFCNT_FROM_PYPY_LIGHT * 0.99) + ob.c_ob_refcnt -= REFCNT_FROM_PYPY + ob.c_ob_pypy_link = 0 + if ob.c_ob_refcnt == 0: + ob.c_ob_refcnt = 1 + _d_list.append(ob) + return None + + _p_list = new_p_list + for ob, wr in wr_p_list: + p = attach(ob, wr, _p_list) + if p is not None: + _pypy2ob[p] = ob + _o_list = [] + for ob, wr in wr_o_list: + attach(ob, wr, _o_list) + + if _d_list: + res = _dealloc_trigger_callback() + if res == "RETRY": + _collect(track_allocation=track_allocation) + +_keepalive_forever = set() +def _dont_free_any_more(): + "Make sure that any object still referenced won't be freed any more." + for ob in _p_list + _o_list: + _keepalive_forever.add(to_obj(object, ob)) + del _d_list[:] + +# ____________________________________________________________ + + +def _unspec_p(hop, v_p): + assert isinstance(v_p.concretetype, lltype.Ptr) + assert v_p.concretetype.TO._gckind == 'gc' + return hop.genop('cast_opaque_ptr', [v_p], resulttype=llmemory.GCREF) + +def _unspec_ob(hop, v_ob): + assert isinstance(v_ob.concretetype, lltype.Ptr) + assert v_ob.concretetype.TO._gckind == 'raw' + return hop.genop('cast_ptr_to_adr', [v_ob], resulttype=llmemory.Address) + +def _spec_p(hop, v_p): + assert v_p.concretetype == llmemory.GCREF + return hop.genop('cast_opaque_ptr', [v_p], + resulttype=hop.r_result.lowleveltype) + +def _spec_ob(hop, v_ob): + assert v_ob.concretetype == llmemory.Address + return hop.genop('cast_adr_to_ptr', [v_ob], + resulttype=hop.r_result.lowleveltype) + + +class Entry(ExtRegistryEntry): + _about_ = init + + def compute_result_annotation(self, s_dealloc_callback): + from rpython.rtyper.llannotation import SomePtr + assert isinstance(s_dealloc_callback, SomePtr) # ll-ptr-to-function + + def specialize_call(self, hop): + hop.exception_cannot_occur() + [v_dealloc_callback] = hop.inputargs(hop.args_r[0]) + hop.genop('gc_rawrefcount_init', [v_dealloc_callback]) + + +class Entry(ExtRegistryEntry): + _about_ = (create_link_pypy, create_link_pyobj) + + def compute_result_annotation(self, s_p, s_ob): + pass + + def specialize_call(self, hop): + if self.instance is create_link_pypy: + name = 'gc_rawrefcount_create_link_pypy' + elif self.instance is create_link_pyobj: + name = 'gc_rawrefcount_create_link_pyobj' + v_p, v_ob = hop.inputargs(*hop.args_r) + hop.exception_cannot_occur() + hop.genop(name, [_unspec_p(hop, v_p), _unspec_ob(hop, v_ob)]) + + +class Entry(ExtRegistryEntry): + _about_ = from_obj + + def compute_result_annotation(self, s_OB_PTR_TYPE, s_p): + from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import lltype_to_annotation + assert (isinstance(s_p, annmodel.SomeInstance) or + annmodel.s_None.contains(s_p)) + assert s_OB_PTR_TYPE.is_constant() + return lltype_to_annotation(s_OB_PTR_TYPE.const) + + def specialize_call(self, hop): + hop.exception_cannot_occur() + v_p = hop.inputarg(hop.args_r[1], arg=1) + v_ob = hop.genop('gc_rawrefcount_from_obj', [_unspec_p(hop, v_p)], + resulttype = llmemory.Address) + return _spec_ob(hop, v_ob) + +class Entry(ExtRegistryEntry): + _about_ = to_obj + + def compute_result_annotation(self, s_Class, s_ob): + from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import SomePtr + assert isinstance(s_ob, SomePtr) + assert s_Class.is_constant() + classdef = self.bookkeeper.getuniqueclassdef(s_Class.const) + return annmodel.SomeInstance(classdef, can_be_None=True) + + def specialize_call(self, hop): + hop.exception_cannot_occur() + v_ob = hop.inputarg(hop.args_r[1], arg=1) + v_p = hop.genop('gc_rawrefcount_to_obj', [_unspec_ob(hop, v_ob)], + resulttype = llmemory.GCREF) + return _spec_p(hop, v_p) + +class Entry(ExtRegistryEntry): + _about_ = next_dead + + def compute_result_annotation(self, s_OB_PTR_TYPE): + from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import lltype_to_annotation + assert s_OB_PTR_TYPE.is_constant() + return lltype_to_annotation(s_OB_PTR_TYPE.const) + + def specialize_call(self, hop): + hop.exception_cannot_occur() + v_ob = hop.genop('gc_rawrefcount_next_dead', [], + resulttype = llmemory.Address) + return _spec_ob(hop, v_ob) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rbigint.py pypy-5.0.1+dfsg/rpython/rlib/rbigint.py --- pypy-4.0.1+dfsg/rpython/rlib/rbigint.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rbigint.py 2016-03-19 16:40:15.000000000 +0000 @@ -414,14 +414,18 @@ @jit.elidable def _toint_helper(self): x = self._touint_helper() - # Haven't lost any bits, but if the sign bit is set we're in - # trouble *unless* this is the min negative number. So, - # trouble iff sign bit set && (positive || some bit set other - # than the sign bit). - sign = self.sign - if intmask(x) < 0 and (sign > 0 or (x << 1) != 0): - raise OverflowError - return intmask(intmask(x) * sign) + # Haven't lost any bits so far + if self.sign >= 0: + res = intmask(x) + if res < 0: + raise OverflowError + else: + # Use "-" on the unsigned number, not on the signed number. + # This is needed to produce valid C code. + res = intmask(-x) + if res >= 0: + raise OverflowError + return res @jit.elidable def tolonglong(self): @@ -2794,8 +2798,10 @@ def parse_digit_string(parser): # helper for fromstr - a = rbigint() base = parser.base + if (base & (base - 1)) == 0: + return parse_string_from_binary_base(parser) + a = rbigint() digitmax = BASE_MAX[base] tens, dig = 1, 0 while True: @@ -2811,3 +2817,52 @@ tens *= base a.sign *= parser.sign return a + +def parse_string_from_binary_base(parser): + # The point to this routine is that it takes time linear in the number of + # string characters. + from rpython.rlib.rstring import ParseStringError + + base = parser.base + if base == 2: bits_per_char = 1 + elif base == 4: bits_per_char = 2 + elif base == 8: bits_per_char = 3 + elif base == 16: bits_per_char = 4 + elif base == 32: bits_per_char = 5 + else: + raise AssertionError + + # n <- total number of bits needed, while moving 'parser' to the end + n = 0 + while parser.next_digit() >= 0: + n += 1 + + # b <- number of Python digits needed, = ceiling(n/SHIFT). */ + try: + b = ovfcheck(n * bits_per_char) + b = ovfcheck(b + (SHIFT - 1)) + except OverflowError: + raise ParseStringError("long string too large to convert") + b = (b // SHIFT) or 1 + z = rbigint([NULLDIGIT] * b, sign=parser.sign) + + # Read string from right, and fill in long from left; i.e., + # from least to most significant in both. + accum = _widen_digit(0) + bits_in_accum = 0 + pdigit = 0 + for _ in range(n): + k = parser.prev_digit() + accum |= _widen_digit(k) << bits_in_accum + bits_in_accum += bits_per_char + if bits_in_accum >= SHIFT: + z.setdigit(pdigit, accum) + pdigit += 1 + assert pdigit <= b + accum >>= SHIFT + bits_in_accum -= SHIFT + + if bits_in_accum: + z.setdigit(pdigit, accum) + z._normalize() + return z diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rerased.py pypy-5.0.1+dfsg/rpython/rlib/rerased.py --- pypy-4.0.1+dfsg/rpython/rlib/rerased.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rerased.py 2016-03-19 16:40:12.000000000 +0000 @@ -16,9 +16,8 @@ import sys from rpython.annotator import model as annmodel -from rpython.tool.pairtype import pairtype from rpython.rtyper.extregistry import ExtRegistryEntry -from rpython.rtyper.rmodel import Repr +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib.rarithmetic import is_valid_int @@ -96,19 +95,19 @@ def compute_result_annotation(self, s_obj): identity.enter_tunnel(self.bookkeeper, s_obj) - return SomeErased() + return _some_erased() def specialize_call(self, hop): bk = hop.rtyper.annotator.bookkeeper s_obj = identity.get_input_annotation(bk) hop.exception_cannot_occur() - return hop.r_result.rtype_erase(hop, s_obj) + return _rtype_erase(hop, s_obj) class Entry(ExtRegistryEntry): _about_ = unerase def compute_result_annotation(self, s_obj): - assert SomeErased().contains(s_obj) + assert _some_erased().contains(s_obj) return identity.leave_tunnel(self.bookkeeper) def specialize_call(self, hop): @@ -116,7 +115,7 @@ if hop.r_result.lowleveltype is lltype.Void: return hop.inputconst(lltype.Void, None) [v] = hop.inputargs(hop.args_r[0]) - return hop.args_r[0].rtype_unerase(hop, v) + return _rtype_unerase(hop, v) return erase, unerase @@ -134,6 +133,21 @@ def __repr__(self): return "Erased(%r, %r)" % (self._x, self._identity) + def _convert_const_ptr(self, r_self): + value = self + if value._identity is _identity_for_ints: + config = r_self.rtyper.annotator.translator.config + assert config.translation.taggedpointers, "need to enable tagged pointers to use erase_int" + return lltype.cast_int_to_ptr(r_self.lowleveltype, value._x * 2 + 1) + bk = r_self.rtyper.annotator.bookkeeper + s_obj = value._identity.get_input_annotation(bk) + r_obj = r_self.rtyper.getrepr(s_obj) + if r_obj.lowleveltype is lltype.Void: + return lltype.nullptr(r_self.lowleveltype.TO) + v = r_obj.convert_const(value._x) + return lltype.cast_opaque_ptr(r_self.lowleveltype, v) + + class Entry(ExtRegistryEntry): _about_ = erase_int @@ -141,22 +155,22 @@ config = self.bookkeeper.annotator.translator.config assert config.translation.taggedpointers, "need to enable tagged pointers to use erase_int" assert annmodel.SomeInteger().contains(s_obj) - return SomeErased() + return _some_erased() def specialize_call(self, hop): - return hop.r_result.rtype_erase_int(hop) + return _rtype_erase_int(hop) class Entry(ExtRegistryEntry): _about_ = unerase_int def compute_result_annotation(self, s_obj): - assert SomeErased().contains(s_obj) + assert _some_erased().contains(s_obj) return annmodel.SomeInteger() def specialize_call(self, hop): [v] = hop.inputargs(hop.args_r[0]) assert isinstance(hop.s_result, annmodel.SomeInteger) - return hop.args_r[0].rtype_unerase_int(hop, v) + return _rtype_unerase_int(hop, v) def ll_unerase_int(gcref): x = llop.cast_ptr_to_int(lltype.Signed, gcref) @@ -171,71 +185,39 @@ identity = self.instance._identity s_obj = self.bookkeeper.immutablevalue(self.instance._x) identity.enter_tunnel(self.bookkeeper, s_obj) - return SomeErased() + return _some_erased() # annotation and rtyping support -class SomeErased(annmodel.SomeObject): - - def can_be_none(self): - return False # cannot be None, but can contain a None - - def rtyper_makerepr(self, rtyper): - return ErasedRepr(rtyper) - - def rtyper_makekey(self): - return self.__class__, - -class __extend__(pairtype(SomeErased, SomeErased)): - - def union((serased1, serased2)): - return SomeErased() +def _some_erased(): + return lltype_to_annotation(llmemory.GCREF) - -class ErasedRepr(Repr): - lowleveltype = llmemory.GCREF - def __init__(self, rtyper): - self.rtyper = rtyper - - def rtype_erase(self, hop, s_obj): - hop.exception_cannot_occur() - r_obj = self.rtyper.getrepr(s_obj) - if r_obj.lowleveltype is lltype.Void: - return hop.inputconst(self.lowleveltype, - lltype.nullptr(self.lowleveltype.TO)) - [v_obj] = hop.inputargs(r_obj) - return hop.genop('cast_opaque_ptr', [v_obj], - resulttype=self.lowleveltype) - - def rtype_unerase(self, hop, s_obj): - [v] = hop.inputargs(hop.args_r[0]) - return hop.genop('cast_opaque_ptr', [v], resulttype=hop.r_result) - - def rtype_unerase_int(self, hop, v): - hop.exception_cannot_occur() - return hop.gendirectcall(ll_unerase_int, v) - - def rtype_erase_int(self, hop): - [v_value] = hop.inputargs(lltype.Signed) - c_one = hop.inputconst(lltype.Signed, 1) - hop.exception_is_here() - v2 = hop.genop('int_add_ovf', [v_value, v_value], - resulttype = lltype.Signed) - v2p1 = hop.genop('int_add', [v2, c_one], - resulttype = lltype.Signed) - v_instance = hop.genop('cast_int_to_ptr', [v2p1], - resulttype=self.lowleveltype) - return v_instance - - def convert_const(self, value): - if value._identity is _identity_for_ints: - config = self.rtyper.annotator.translator.config - assert config.translation.taggedpointers, "need to enable tagged pointers to use erase_int" - return lltype.cast_int_to_ptr(self.lowleveltype, value._x * 2 + 1) - bk = self.rtyper.annotator.bookkeeper - s_obj = value._identity.get_input_annotation(bk) - r_obj = self.rtyper.getrepr(s_obj) - if r_obj.lowleveltype is lltype.Void: - return lltype.nullptr(self.lowleveltype.TO) - v = r_obj.convert_const(value._x) - return lltype.cast_opaque_ptr(self.lowleveltype, v) +def _rtype_erase(hop, s_obj): + hop.exception_cannot_occur() + r_obj = hop.rtyper.getrepr(s_obj) + if r_obj.lowleveltype is lltype.Void: + return hop.inputconst(llmemory.GCREF, + lltype.nullptr(llmemory.GCREF.TO)) + [v_obj] = hop.inputargs(r_obj) + return hop.genop('cast_opaque_ptr', [v_obj], + resulttype=llmemory.GCREF) + +def _rtype_unerase(hop, s_obj): + [v] = hop.inputargs(hop.args_r[0]) + return hop.genop('cast_opaque_ptr', [v], resulttype=hop.r_result) + +def _rtype_unerase_int(hop, v): + hop.exception_cannot_occur() + return hop.gendirectcall(ll_unerase_int, v) + +def _rtype_erase_int(hop): + [v_value] = hop.inputargs(lltype.Signed) + c_one = hop.inputconst(lltype.Signed, 1) + hop.exception_is_here() + v2 = hop.genop('int_add_ovf', [v_value, v_value], + resulttype = lltype.Signed) + v2p1 = hop.genop('int_add', [v2, c_one], + resulttype = lltype.Signed) + v_instance = hop.genop('cast_int_to_ptr', [v2p1], + resulttype=llmemory.GCREF) + return v_instance diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rfile.py pypy-5.0.1+dfsg/rpython/rlib/rfile.py --- pypy-4.0.1+dfsg/rpython/rlib/rfile.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rfile.py 2016-03-19 16:40:12.000000000 +0000 @@ -173,7 +173,6 @@ def create_fdopen_rfile(fd, mode="r", buffering=-1): newmode = _sanitize_mode(mode) - fd = rffi.cast(rffi.INT, fd) rposix.validate_fd(fd) ll_mode = rffi.str2charp(newmode) try: diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rgc.py pypy-5.0.1+dfsg/rpython/rlib/rgc.py --- pypy-4.0.1+dfsg/rpython/rlib/rgc.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rgc.py 2016-03-19 16:40:12.000000000 +0000 @@ -172,7 +172,7 @@ # although a pinned object can't move we must return 'False'. A pinned # object can be unpinned any time and becomes movable. return False - i = 0 + i = -1 while can_move(p): if i > 6: raise NotImplementedError("can't make object non-movable!") @@ -186,7 +186,13 @@ """ if not obj: return False - return can_move(obj) + # XXX returning can_move() here might acidentally work for the use + # cases (see issue #2212), but this is not really safe. Now we + # just return True for any non-NULL pointer, and too bad for the + # few extra 'cond_call_gc_wb'. It could be improved e.g. to return + # False if 'obj' is a static prebuilt constant, or if we're not + # running incminimark... + return True #can_move(obj) def _heap_stats(): raise NotImplementedError # can't be run directly @@ -481,6 +487,7 @@ class _GcRef(object): # implementation-specific: there should not be any after translation __slots__ = ['_x', '_handle'] + _TYPE = llmemory.GCREF def __init__(self, x): self._x = x def __hash__(self): diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rgil.py pypy-5.0.1+dfsg/rpython/rlib/rgil.py --- pypy-4.0.1+dfsg/rpython/rlib/rgil.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rgil.py 2016-03-19 16:40:12.000000000 +0000 @@ -2,6 +2,7 @@ from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rtyper.extregistry import ExtRegistryEntry # these functions manipulate directly the GIL, whose definition does not # escape the C code itself @@ -10,27 +11,135 @@ eci = ExternalCompilationInfo( includes = ['src/thread.h'], separate_module_files = [translator_c_dir / 'src' / 'thread.c'], - include_dirs = [translator_c_dir]) + include_dirs = [translator_c_dir], + post_include_bits = ['#define RPY_WITH_GIL']) llexternal = rffi.llexternal -gil_allocate = llexternal('RPyGilAllocate', [], lltype.Void, - _nowrapper=True, sandboxsafe=True, - compilation_info=eci) +_gil_allocate = llexternal('RPyGilAllocate', [], lltype.Void, + _nowrapper=True, sandboxsafe=True, + compilation_info=eci) -gil_yield_thread = llexternal('RPyGilYieldThread', [], lltype.Signed, +_gil_yield_thread = llexternal('RPyGilYieldThread', [], lltype.Signed, _nowrapper=True, sandboxsafe=True, compilation_info=eci) -gil_release = llexternal('RPyGilRelease', [], lltype.Void, +_gil_release = llexternal('RPyGilRelease', [], lltype.Void, _nowrapper=True, sandboxsafe=True, compilation_info=eci) -gil_acquire = llexternal('RPyGilAcquire', [], lltype.Void, +_gil_acquire = llexternal('RPyGilAcquire', [], lltype.Void, _nowrapper=True, sandboxsafe=True, compilation_info=eci) gil_fetch_fastgil = llexternal('RPyFetchFastGil', [], llmemory.Address, _nowrapper=True, sandboxsafe=True, compilation_info=eci) + +# ____________________________________________________________ + + +def invoke_after_thread_switch(callback): + """Invoke callback() after a thread switch. + + This is a hook used by pypy.module.signal. Several callbacks should + be easy to support (but not right now). + + This function should be called from the translated RPython program + (i.e. *not* at module level!), but registers the callback + statically. The exact point at which invoke_after_thread_switch() + is called has no importance: the callback() will be called anyway. + """ + print "NOTE: invoke_after_thread_switch() is meant to be translated " + print "and not called directly. Using some emulation." + global _emulated_after_thread_switch + _emulated_after_thread_switch = callback + +_emulated_after_thread_switch = None + +def _after_thread_switch(): + """NOT_RPYTHON""" + if _emulated_after_thread_switch is not None: + _emulated_after_thread_switch() + + +class Entry(ExtRegistryEntry): + _about_ = invoke_after_thread_switch + + def compute_result_annotation(self, s_callback): + assert s_callback.is_constant() + callback = s_callback.const + bk = self.bookkeeper + translator = bk.annotator.translator + if hasattr(translator, '_rgil_invoke_after_thread_switch'): + assert translator._rgil_invoke_after_thread_switch == callback, ( + "not implemented yet: several invoke_after_thread_switch()") + else: + translator._rgil_invoke_after_thread_switch = callback + bk.emulate_pbc_call("rgil.invoke_after_thread_switch", s_callback, []) + + def specialize_call(self, hop): + # the actual call is not done here + hop.exception_cannot_occur() + +class Entry(ExtRegistryEntry): + _about_ = _after_thread_switch + + def compute_result_annotation(self): + # the call has been emulated already in invoke_after_thread_switch() + pass + + def specialize_call(self, hop): + translator = hop.rtyper.annotator.translator + if hasattr(translator, '_rgil_invoke_after_thread_switch'): + func = translator._rgil_invoke_after_thread_switch + graph = translator._graphof(func) + llfn = hop.rtyper.getcallable(graph) + c_callback = hop.inputconst(lltype.typeOf(llfn), llfn) + hop.exception_is_here() + hop.genop("direct_call", [c_callback]) + else: + hop.exception_cannot_occur() + + +def allocate(): + _gil_allocate() + +def release(): + # this function must not raise, in such a way that the exception + # transformer knows that it cannot raise! + _gil_release() +release._gctransformer_hint_cannot_collect_ = True +release._dont_reach_me_in_del_ = True + +def acquire(): + from rpython.rlib import rthread + _gil_acquire() + rthread.gc_thread_run() + _after_thread_switch() +acquire._gctransformer_hint_cannot_collect_ = True +acquire._dont_reach_me_in_del_ = True + +# The _gctransformer_hint_cannot_collect_ hack is needed for +# translations in which the *_external_call() functions are not inlined. +# They tell the gctransformer not to save and restore the local GC +# pointers in the shadow stack. This is necessary because the GIL is +# not held after the call to gil.release() or before the call +# to gil.acquire(). + +def yield_thread(): + # explicitly release the gil, in a way that tries to give more + # priority to other threads (as opposed to continuing to run in + # the same thread). + if _gil_yield_thread(): + from rpython.rlib import rthread + rthread.gc_thread_run() + _after_thread_switch() +yield_thread._gctransformer_hint_close_stack_ = True +yield_thread._dont_reach_me_in_del_ = True +yield_thread._dont_inline_ = True + +# yield_thread() needs a different hint: _gctransformer_hint_close_stack_. +# The *_external_call() functions are themselves called only from the rffi +# module from a helper function that also has this hint. diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rmarshal.py pypy-5.0.1+dfsg/rpython/rlib/rmarshal.py --- pypy-4.0.1+dfsg/rpython/rlib/rmarshal.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rmarshal.py 2016-03-19 16:40:12.000000000 +0000 @@ -90,6 +90,8 @@ dumper._annenforceargs_ = [s_list_of_chars, s_obj] def add_loader(s_obj, loader): + # 's_obj' should be the **least general annotation** that we're + # interested in, somehow loaders.append((s_obj, loader)) def get_dumper_annotation(dumper): @@ -187,6 +189,14 @@ r_32bits_mask = r_longlong(0xFFFFFFFF) +def load_longlong_nonneg(loader): + x = load_longlong(loader) + if x < 0: + raise ValueError("expected a non-negative longlong") + return x +add_loader(annmodel.SomeInteger(knowntype=r_longlong, nonneg=True), + load_longlong_nonneg) + def load_longlong(loader): if readchr(loader) != TYPE_INT64: raise ValueError("expected a longlong") diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rpath.py pypy-5.0.1+dfsg/rpython/rlib/rpath.py --- pypy-4.0.1+dfsg/rpython/rlib/rpath.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rpath.py 2016-03-19 16:40:12.000000000 +0000 @@ -4,6 +4,8 @@ import os, stat from rpython.rlib import rposix +from rpython.rlib.signature import signature +from rpython.annotator.model import s_Str0 # ____________________________________________________________ @@ -56,6 +58,7 @@ path = slash*initial_slashes + path return path or dot +@signature(s_Str0, returns=s_Str0) def _posix_rabspath(path): """Return an absolute, **non-normalized** path. **This version does not let exceptions propagate.**""" @@ -142,11 +145,12 @@ comps.append(dot) return prefix + backslash.join(comps) +@signature(s_Str0, returns=s_Str0) def _nt_rabspath(path): try: if path == '': path = os.getcwd() - return rposix._getfullpathname(path) + return rposix.getfullpathname(path) except OSError: return path diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rposix_environ.py pypy-5.0.1+dfsg/rpython/rlib/rposix_environ.py --- pypy-4.0.1+dfsg/rpython/rlib/rposix_environ.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rposix_environ.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,224 @@ +import os +import sys +from rpython.annotator import model as annmodel +from rpython.rlib._os_support import _WIN32, StringTraits, UnicodeTraits +from rpython.rlib.objectmodel import enforceargs +# importing rposix here creates a cycle on Windows +from rpython.rtyper.controllerentry import Controller +from rpython.rtyper.extfunc import register_external +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.translator.tool.cbuild import ExternalCompilationInfo + +str0 = annmodel.s_Str0 + +# ____________________________________________________________ +# +# Annotation support to control access to 'os.environ' in the RPython +# program + +class OsEnvironController(Controller): + knowntype = os.environ.__class__ + + def convert(self, obj): + # 'None' is good enough, there is only one os.environ + return None + + def getitem(self, obj, key): + # in the RPython program reads of 'os.environ[key]' are + # redirected here + result = r_getenv(key) + if result is None: + raise KeyError + return result + + @enforceargs(None, None, str0, None) + def setitem(self, obj, key, value): + # in the RPython program, 'os.environ[key] = value' is + # redirected here + r_putenv(key, value) + + def delitem(self, obj, key): + # in the RPython program, 'del os.environ[key]' is redirected + # here + absent = r_getenv(key) is None + # Always call unsetenv(), to get eventual OSErrors + r_unsetenv(key) + if absent: + raise KeyError + + def get_keys(self, obj): + # 'os.environ.keys' is redirected here - note that it's the + # getattr that arrives here, not the actual method call! + return r_envkeys + + def get_items(self, obj): + # 'os.environ.items' is redirected here (not the actual method + # call!) + return r_envitems + + def get_get(self, obj): + # 'os.environ.get' is redirected here (not the actual method + # call!) + return r_getenv + +# ____________________________________________________________ +# Access to the 'environ' external variable +prefix = '' +if sys.platform.startswith('darwin'): + CCHARPPP = rffi.CArrayPtr(rffi.CCHARPP) + _os_NSGetEnviron = rffi.llexternal( + '_NSGetEnviron', [], CCHARPPP, + compilation_info=ExternalCompilationInfo(includes=['crt_externs.h']) + ) + def os_get_environ(): + return _os_NSGetEnviron()[0] +elif _WIN32: + eci = ExternalCompilationInfo(includes=['stdlib.h']) + CWCHARPP = lltype.Ptr(lltype.Array(rffi.CWCHARP, hints={'nolength': True})) + + os_get_environ, _os_set_environ = rffi.CExternVariable( + rffi.CCHARPP, '_environ', eci) + get__wenviron, _set__wenviron = rffi.CExternVariable( + CWCHARPP, '_wenviron', eci, c_type='wchar_t **') + prefix = '_' +else: + os_get_environ, _os_set_environ = rffi.CExternVariable( + rffi.CCHARPP, 'environ', ExternalCompilationInfo()) + +# ____________________________________________________________ +# +# Lower-level interface: dummy placeholders and external registations + +def r_envkeys(): + just_a_placeholder + +def envkeys_llimpl(): + environ = os_get_environ() + result = [] + i = 0 + while environ[i]: + name_value = rffi.charp2str(environ[i]) + p = name_value.find('=') + if p >= 0: + result.append(name_value[:p]) + i += 1 + return result + +register_external(r_envkeys, [], [str0], # returns a list of strings + export_name='ll_os.ll_os_envkeys', + llimpl=envkeys_llimpl) + +# ____________________________________________________________ + +def r_envitems(): + just_a_placeholder + +def r_getenv(name): + just_a_placeholder # should return None if name not found + +def r_putenv(name, value): + just_a_placeholder + +os_getenv = rffi.llexternal('getenv', [rffi.CCHARP], rffi.CCHARP, + releasegil=False) +os_putenv = rffi.llexternal(prefix + 'putenv', [rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +if _WIN32: + _wgetenv = rffi.llexternal('_wgetenv', [rffi.CWCHARP], rffi.CWCHARP, + compilation_info=eci, releasegil=False) + _wputenv = rffi.llexternal('_wputenv', [rffi.CWCHARP], rffi.INT, + compilation_info=eci, + save_err=rffi.RFFI_SAVE_LASTERROR) + +class EnvKeepalive: + pass +envkeepalive = EnvKeepalive() +envkeepalive.byname = {} +envkeepalive.bywname = {} + +def make_env_impls(win32=False): + if not win32: + traits = StringTraits() + get_environ, getenv, putenv = os_get_environ, os_getenv, os_putenv + byname, eq = envkeepalive.byname, '=' + def last_error(msg): + from rpython.rlib import rposix + raise OSError(rposix.get_saved_errno(), msg) + else: + traits = UnicodeTraits() + get_environ, getenv, putenv = get__wenviron, _wgetenv, _wputenv + byname, eq = envkeepalive.bywname, u'=' + from rpython.rlib.rwin32 import lastSavedWindowsError as last_error + + def envitems_llimpl(): + environ = get_environ() + result = [] + i = 0 + while environ[i]: + name_value = traits.charp2str(environ[i]) + p = name_value.find(eq) + if p >= 0: + result.append((name_value[:p], name_value[p+1:])) + i += 1 + return result + + def getenv_llimpl(name): + with traits.scoped_str2charp(name) as l_name: + l_result = getenv(l_name) + return traits.charp2str(l_result) if l_result else None + + def putenv_llimpl(name, value): + l_string = traits.str2charp(name + eq + value) + error = rffi.cast(lltype.Signed, putenv(l_string)) + if error: + traits.free_charp(l_string) + last_error("putenv failed") + # keep 'l_string' alive - we know that the C library needs it + # until the next call to putenv() with the same 'name'. + l_oldstring = byname.get(name, lltype.nullptr(traits.CCHARP.TO)) + byname[name] = l_string + if l_oldstring: + traits.free_charp(l_oldstring) + + return envitems_llimpl, getenv_llimpl, putenv_llimpl + +envitems_llimpl, getenv_llimpl, putenv_llimpl = make_env_impls() + +register_external(r_envitems, [], [(str0, str0)], + export_name='ll_os.ll_os_envitems', + llimpl=envitems_llimpl) +register_external(r_getenv, [str0], + annmodel.SomeString(can_be_None=True, no_nul=True), + export_name='ll_os.ll_os_getenv', + llimpl=getenv_llimpl) +register_external(r_putenv, [str0, str0], annmodel.s_None, + export_name='ll_os.ll_os_putenv', + llimpl=putenv_llimpl) + +# ____________________________________________________________ + +def r_unsetenv(name): + # default implementation for platforms without a real unsetenv() + r_putenv(name, '') + +if hasattr(__import__(os.name), 'unsetenv'): + os_unsetenv = rffi.llexternal('unsetenv', [rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def unsetenv_llimpl(name): + with rffi.scoped_str2charp(name) as l_name: + error = rffi.cast(lltype.Signed, os_unsetenv(l_name)) + if error: + from rpython.rlib import rposix + raise OSError(rposix.get_saved_errno(), "os_unsetenv failed") + try: + l_oldstring = envkeepalive.byname[name] + except KeyError: + pass + else: + del envkeepalive.byname[name] + rffi.free_charp(l_oldstring) + + register_external(r_unsetenv, [str0], annmodel.s_None, + export_name='ll_os.ll_os_unsetenv', + llimpl=unsetenv_llimpl) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rposix.py pypy-5.0.1+dfsg/rpython/rlib/rposix.py --- pypy-4.0.1+dfsg/rpython/rlib/rposix.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rposix.py 2016-03-19 16:40:15.000000000 +0000 @@ -1,14 +1,26 @@ import os +import sys +import errno +from rpython.annotator.model import s_Str0 from rpython.rtyper.lltypesystem.rffi import CConstant, CExternVariable, INT -from rpython.rtyper.lltypesystem import ll2ctypes, rffi -from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rlib.rarithmetic import intmask -from rpython.rlib.objectmodel import specialize -from rpython.rlib import jit +from rpython.rtyper.lltypesystem import lltype, ll2ctypes, rffi +from rpython.rtyper.tool import rffi_platform +from rpython.rlib import debug, jit, rstring, rthread, types +from rpython.rlib._os_support import ( + _CYGWIN, _MACRO_ON_POSIX, UNDERSCORE_ON_WIN32, _WIN32, + _prefer_unicode, _preferred_traits) +from rpython.rlib.objectmodel import ( + specialize, enforceargs, register_replacement_for, NOT_CONSTANT) +from rpython.rlib.rarithmetic import intmask, widen +from rpython.rlib.signature import signature +from rpython.tool.sourcetools import func_renamer from rpython.translator.platform import platform +from rpython.translator.tool.cbuild import ExternalCompilationInfo -WIN32 = os.name == "nt" +if _WIN32: + from rpython.rlib import rwin32 + from rpython.rlib.rwin32file import make_win32_traits class CConstantErrno(CConstant): # these accessors are used when calling get_errno() or set_errno() @@ -102,7 +114,6 @@ with the flag llexternal(..., save_err=rffi.RFFI_SAVE_ERRNO). Functions without that flag don't change the saved errno. """ - from rpython.rlib import rthread return intmask(rthread.tlfield_rpy_errno.getraw()) def set_saved_errno(errno): @@ -113,7 +124,6 @@ zero; for that case, use llexternal(..., save_err=RFFI_ZERO_ERRNO_BEFORE) and then you don't need set_saved_errno(0). """ - from rpython.rlib import rthread rthread.tlfield_rpy_errno.setraw(rffi.cast(INT, errno)) def get_saved_alterrno(): @@ -122,7 +132,6 @@ with the flag llexternal(..., save_err=rffi.RFFI_SAVE_ERRNO | rffl.RFFI_ALT_ERRNO). Functions without that flag don't change the saved errno. """ - from rpython.rlib import rthread return intmask(rthread.tlfield_alt_errno.getraw()) def set_saved_alterrno(errno): @@ -133,7 +142,6 @@ zero; for that case, use llexternal(..., save_err=RFFI_ZERO_ERRNO_BEFORE) and then you don't need set_saved_errno(0). """ - from rpython.rlib import rthread rthread.tlfield_alt_errno.setraw(rffi.cast(INT, errno)) @@ -141,15 +149,13 @@ @specialize.call_location() def _errno_before(save_err): if save_err & rffi.RFFI_READSAVED_ERRNO: - from rpython.rlib import rthread if save_err & rffi.RFFI_ALT_ERRNO: _set_errno(rthread.tlfield_alt_errno.getraw()) else: _set_errno(rthread.tlfield_rpy_errno.getraw()) elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE: _set_errno(rffi.cast(rffi.INT, 0)) - if WIN32 and (save_err & rffi.RFFI_READSAVED_LASTERROR): - from rpython.rlib import rthread, rwin32 + if _WIN32 and (save_err & rffi.RFFI_READSAVED_LASTERROR): if save_err & rffi.RFFI_ALT_ERRNO: err = rthread.tlfield_alt_lasterror.getraw() else: @@ -161,9 +167,8 @@ @specialize.call_location() def _errno_after(save_err): - if WIN32: + if _WIN32: if save_err & rffi.RFFI_SAVE_LASTERROR: - from rpython.rlib import rthread, rwin32 err = rwin32._GetLastError() # careful, setraw() overwrites GetLastError. # We must read it first, before the errno handling. @@ -172,14 +177,13 @@ else: rthread.tlfield_rpy_lasterror.setraw(err) elif save_err & rffi.RFFI_SAVE_WSALASTERROR: - from rpython.rlib import rthread, _rsocket_rffi + from rpython.rlib import _rsocket_rffi err = _rsocket_rffi._WSAGetLastError() if save_err & rffi.RFFI_ALT_ERRNO: rthread.tlfield_alt_lasterror.setraw(err) else: rthread.tlfield_rpy_lasterror.setraw(err) if save_err & rffi.RFFI_SAVE_ERRNO: - from rpython.rlib import rthread if save_err & rffi.RFFI_ALT_ERRNO: rthread.tlfield_alt_errno.setraw(_get_errno()) else: @@ -191,6 +195,7 @@ "_PyVerify_fd", [rffi.INT], rffi.INT, compilation_info=errno_eci, )) + @enforceargs(int) def validate_fd(fd): if not is_valid_fd(fd): from errno import EBADF @@ -199,6 +204,7 @@ def is_valid_fd(fd): return 1 + @enforceargs(int) def validate_fd(fd): pass @@ -211,6 +217,75 @@ except OSError: pass +if _WIN32: + includes = ['io.h', 'sys/utime.h', 'sys/types.h'] + libraries = [] +else: + if sys.platform.startswith(('darwin', 'netbsd', 'openbsd')): + _ptyh = 'util.h' + elif sys.platform.startswith('freebsd'): + _ptyh = 'libutil.h' + else: + _ptyh = 'pty.h' + includes = ['unistd.h', 'sys/types.h', 'sys/wait.h', + 'utime.h', 'sys/time.h', 'sys/times.h', + 'grp.h', 'dirent.h', 'sys/stat.h', 'fcntl.h', + 'signal.h', 'sys/utsname.h', _ptyh] + libraries = ['util'] +eci = ExternalCompilationInfo( + includes=includes, + libraries=libraries, +) + +class CConfig: + _compilation_info_ = eci + SEEK_SET = rffi_platform.DefinedConstantInteger('SEEK_SET') + SEEK_CUR = rffi_platform.DefinedConstantInteger('SEEK_CUR') + SEEK_END = rffi_platform.DefinedConstantInteger('SEEK_END') + OFF_T_SIZE = rffi_platform.SizeOf('off_t') + + HAVE_UTIMES = rffi_platform.Has('utimes') + UTIMBUF = rffi_platform.Struct('struct %sutimbuf' % UNDERSCORE_ON_WIN32, + [('actime', rffi.INT), + ('modtime', rffi.INT)]) + if not _WIN32: + CLOCK_T = rffi_platform.SimpleType('clock_t', rffi.INT) + + TMS = rffi_platform.Struct( + 'struct tms', [('tms_utime', rffi.INT), + ('tms_stime', rffi.INT), + ('tms_cutime', rffi.INT), + ('tms_cstime', rffi.INT)]) + + GETPGRP_HAVE_ARG = rffi_platform.Has("getpgrp(0)") + SETPGRP_HAVE_ARG = rffi_platform.Has("setpgrp(0, 0)") + +config = rffi_platform.configure(CConfig) +globals().update(config) + +def external(name, args, result, compilation_info=eci, **kwds): + return rffi.llexternal(name, args, result, + compilation_info=compilation_info, **kwds) + +# For now we require off_t to be the same size as LONGLONG, which is the +# interface required by callers of functions that thake an argument of type +# off_t. +if not _WIN32: + assert OFF_T_SIZE == rffi.sizeof(rffi.LONGLONG) + +c_dup = external(UNDERSCORE_ON_WIN32 + 'dup', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_dup2 = external(UNDERSCORE_ON_WIN32 + 'dup2', [rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_open = external(UNDERSCORE_ON_WIN32 + 'open', + [rffi.CCHARP, rffi.INT, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +# Win32 Unicode functions +c_wopen = external(UNDERSCORE_ON_WIN32 + 'wopen', + [rffi.CWCHARP, rffi.INT, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + #___________________________________________________________________ # Wrappers around posix functions, that accept either strings, or # instances with a "as_bytes()" method. @@ -219,95 +294,1418 @@ # - but rpython.rtyper.module.ll_os.py on Windows will replace these functions # with other wrappers that directly handle unicode strings. @specialize.argtype(0) +@signature(types.any(), returns=s_Str0) def _as_bytes(path): assert path is not None if isinstance(path, str): return path + elif isinstance(path, unicode): + # This never happens in PyPy's Python interpreter! + # Only in raw RPython code that uses unicode strings. + # We implement python2 behavior: silently convert to ascii. + return path.encode('ascii') else: return path.as_bytes() @specialize.argtype(0) -def open(path, flags, mode): - return os.open(_as_bytes(path), flags, mode) +def _as_bytes0(path): + """Crashes translation if the path contains NUL characters.""" + res = _as_bytes(path) + rstring.check_str0(res) + return res @specialize.argtype(0) -def stat(path): - return os.stat(_as_bytes(path)) +def _as_unicode(path): + assert path is not None + if isinstance(path, unicode): + return path + else: + return path.as_unicode() @specialize.argtype(0) -def lstat(path): - return os.lstat(_as_bytes(path)) +def _as_unicode0(path): + """Crashes translation if the path contains NUL characters.""" + res = _as_unicode(path) + rstring.check_str0(res) + return res +@specialize.argtype(0, 1) +def putenv(name, value): + os.environ[_as_bytes(name)] = _as_bytes(value) @specialize.argtype(0) -def statvfs(path): - return os.statvfs(_as_bytes(path)) +def unsetenv(name): + del os.environ[_as_bytes(name)] + +#___________________________________________________________________ +# Implementation of many posix functions. +# They usually check the return value and raise an (RPython) OSError +# with errno. + +def replace_os_function(name): + func = getattr(os, name, None) + if func is None: + return lambda f: f + return register_replacement_for( + func, + sandboxed_name='ll_os.ll_os_%s' % name) + +@specialize.arg(0) +def handle_posix_error(name, result): + result = widen(result) + if result < 0: + raise OSError(get_saved_errno(), '%s failed' % name) + return result + +@replace_os_function('dup') +def dup(fd): + validate_fd(fd) + return handle_posix_error('dup', c_dup(fd)) + +@replace_os_function('dup2') +def dup2(fd, newfd): + validate_fd(fd) + handle_posix_error('dup2', c_dup2(fd, newfd)) +#___________________________________________________________________ +@replace_os_function('open') @specialize.argtype(0) -def unlink(path): - return os.unlink(_as_bytes(path)) +@enforceargs(NOT_CONSTANT, int, int, typecheck=False) +def open(path, flags, mode): + if _prefer_unicode(path): + fd = c_wopen(_as_unicode0(path), flags, mode) + else: + fd = c_open(_as_bytes0(path), flags, mode) + return handle_posix_error('open', fd) -@specialize.argtype(0, 1) -def rename(path1, path2): - return os.rename(_as_bytes(path1), _as_bytes(path2)) +c_read = external(UNDERSCORE_ON_WIN32 + 'read', + [rffi.INT, rffi.VOIDP, rffi.SIZE_T], rffi.SSIZE_T, + save_err=rffi.RFFI_SAVE_ERRNO) +c_write = external(UNDERSCORE_ON_WIN32 + 'write', + [rffi.INT, rffi.VOIDP, rffi.SIZE_T], rffi.SSIZE_T, + save_err=rffi.RFFI_SAVE_ERRNO) +c_close = external(UNDERSCORE_ON_WIN32 + 'close', [rffi.INT], rffi.INT, + releasegil=False, save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('read') +@enforceargs(int, int) +def read(fd, count): + if count < 0: + raise OSError(errno.EINVAL, None) + validate_fd(fd) + with rffi.scoped_alloc_buffer(count) as buf: + void_buf = rffi.cast(rffi.VOIDP, buf.raw) + got = handle_posix_error('read', c_read(fd, void_buf, count)) + return buf.str(got) + +@replace_os_function('write') +@enforceargs(int, None) +def write(fd, data): + count = len(data) + validate_fd(fd) + with rffi.scoped_nonmovingbuffer(data) as buf: + return handle_posix_error('write', c_write(fd, buf, count)) + +@replace_os_function('close') +def close(fd): + validate_fd(fd) + handle_posix_error('close', c_close(fd)) + +c_lseek = external('_lseeki64' if _WIN32 else 'lseek', + [rffi.INT, rffi.LONGLONG, rffi.INT], rffi.LONGLONG, + macro=_MACRO_ON_POSIX, save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('lseek') +def lseek(fd, pos, how): + validate_fd(fd) + if SEEK_SET is not None: + if how == 0: + how = SEEK_SET + elif how == 1: + how = SEEK_CUR + elif how == 2: + how = SEEK_END + return handle_posix_error('lseek', c_lseek(fd, pos, how)) + +c_ftruncate = external('ftruncate', [rffi.INT, rffi.LONGLONG], rffi.INT, + macro=_MACRO_ON_POSIX, save_err=rffi.RFFI_SAVE_ERRNO) +c_fsync = external('fsync' if not _WIN32 else '_commit', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_fdatasync = external('fdatasync', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('ftruncate') +def ftruncate(fd, length): + validate_fd(fd) + handle_posix_error('ftruncate', c_ftruncate(fd, length)) + +@replace_os_function('fsync') +def fsync(fd): + validate_fd(fd) + handle_posix_error('fsync', c_fsync(fd)) + +@replace_os_function('fdatasync') +def fdatasync(fd): + validate_fd(fd) + handle_posix_error('fdatasync', c_fdatasync(fd)) + +#___________________________________________________________________ +c_chdir = external('chdir', [rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_fchdir = external('fchdir', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_access = external(UNDERSCORE_ON_WIN32 + 'access', + [rffi.CCHARP, rffi.INT], rffi.INT) +c_waccess = external(UNDERSCORE_ON_WIN32 + 'waccess', + [rffi.CWCHARP, rffi.INT], rffi.INT) + +@replace_os_function('chdir') @specialize.argtype(0) -def listdir(dirname): - return os.listdir(_as_bytes(dirname)) +def chdir(path): + if not _WIN32: + handle_posix_error('chdir', c_chdir(_as_bytes0(path))) + else: + traits = _preferred_traits(path) + win32traits = make_win32_traits(traits) + path = traits.as_str0(path) + + # This is a reimplementation of the C library's chdir + # function, but one that produces Win32 errors instead of DOS + # error codes. + # chdir is essentially a wrapper around SetCurrentDirectory; + # however, it also needs to set "magic" environment variables + # indicating the per-drive current directory, which are of the + # form =: + if not win32traits.SetCurrentDirectory(path): + raise rwin32.lastSavedWindowsError() + MAX_PATH = rwin32.MAX_PATH + assert MAX_PATH > 0 + + with traits.scoped_alloc_buffer(MAX_PATH) as path: + res = win32traits.GetCurrentDirectory(MAX_PATH + 1, path.raw) + if not res: + raise rwin32.lastSavedWindowsError() + res = rffi.cast(lltype.Signed, res) + assert res > 0 + if res <= MAX_PATH + 1: + new_path = path.str(res) + else: + with traits.scoped_alloc_buffer(res) as path: + res = win32traits.GetCurrentDirectory(res, path.raw) + if not res: + raise rwin32.lastSavedWindowsError() + res = rffi.cast(lltype.Signed, res) + assert res > 0 + new_path = path.str(res) + if traits.str is unicode: + if new_path[0] == u'\\' or new_path[0] == u'/': # UNC path + return + magic_envvar = u'=' + new_path[0] + u':' + else: + if new_path[0] == '\\' or new_path[0] == '/': # UNC path + return + magic_envvar = '=' + new_path[0] + ':' + if not win32traits.SetEnvironmentVariable(magic_envvar, new_path): + raise rwin32.lastSavedWindowsError() + +@replace_os_function('fchdir') +def fchdir(fd): + validate_fd(fd) + handle_posix_error('fchdir', c_fchdir(fd)) +@replace_os_function('access') @specialize.argtype(0) def access(path, mode): - return os.access(_as_bytes(path), mode) + if _WIN32: + # All files are executable on Windows + mode = mode & ~os.X_OK + if _prefer_unicode(path): + error = c_waccess(_as_unicode0(path), mode) + else: + error = c_access(_as_bytes0(path), mode) + return error == 0 +# This Win32 function is not exposed via os, but needed to get a +# correct implementation of os.path.abspath. @specialize.argtype(0) -def chmod(path, mode): - return os.chmod(_as_bytes(path), mode) +def getfullpathname(path): + length = rwin32.MAX_PATH + 1 + traits = _preferred_traits(path) + win32traits = make_win32_traits(traits) + with traits.scoped_alloc_buffer(length) as buf: + res = win32traits.GetFullPathName( + traits.as_str0(path), rffi.cast(rwin32.DWORD, length), + buf.raw, lltype.nullptr(win32traits.LPSTRP.TO)) + if res == 0: + raise rwin32.lastSavedWindowsError("_getfullpathname failed") + result = buf.str(intmask(res)) + assert result is not None + result = rstring.assert_str0(result) + return result + +c_getcwd = external(UNDERSCORE_ON_WIN32 + 'getcwd', + [rffi.CCHARP, rffi.SIZE_T], rffi.CCHARP, + save_err=rffi.RFFI_SAVE_ERRNO) +c_wgetcwd = external(UNDERSCORE_ON_WIN32 + 'wgetcwd', + [rffi.CWCHARP, rffi.SIZE_T], rffi.CWCHARP, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('getcwd') +def getcwd(): + bufsize = 256 + while True: + buf = lltype.malloc(rffi.CCHARP.TO, bufsize, flavor='raw') + res = c_getcwd(buf, bufsize) + if res: + break # ok + error = get_saved_errno() + lltype.free(buf, flavor='raw') + if error != errno.ERANGE: + raise OSError(error, "getcwd failed") + # else try again with a larger buffer, up to some sane limit + bufsize *= 4 + if bufsize > 1024*1024: # xxx hard-coded upper limit + raise OSError(error, "getcwd result too large") + result = rffi.charp2str(res) + lltype.free(buf, flavor='raw') + return result + +@replace_os_function('getcwdu') +def getcwdu(): + bufsize = 256 + while True: + buf = lltype.malloc(rffi.CWCHARP.TO, bufsize, flavor='raw') + res = c_wgetcwd(buf, bufsize) + if res: + break # ok + error = get_saved_errno() + lltype.free(buf, flavor='raw') + if error != errno.ERANGE: + raise OSError(error, "getcwd failed") + # else try again with a larger buffer, up to some sane limit + bufsize *= 4 + if bufsize > 1024*1024: # xxx hard-coded upper limit + raise OSError(error, "getcwd result too large") + result = rffi.wcharp2unicode(res) + lltype.free(buf, flavor='raw') + return result + +if not _WIN32: + class CConfig: + _compilation_info_ = eci + DIRENT = rffi_platform.Struct('struct dirent', + [('d_name', lltype.FixedSizeArray(rffi.CHAR, 1))]) + + DIRP = rffi.COpaquePtr('DIR') + config = rffi_platform.configure(CConfig) + DIRENT = config['DIRENT'] + DIRENTP = lltype.Ptr(DIRENT) + c_opendir = external('opendir', [rffi.CCHARP], DIRP, + save_err=rffi.RFFI_SAVE_ERRNO) + # XXX macro=True is hack to make sure we get the correct kind of + # dirent struct (which depends on defines) + c_readdir = external('readdir', [DIRP], DIRENTP, + macro=True, save_err=rffi.RFFI_FULL_ERRNO_ZERO) + c_closedir = external('closedir', [DIRP], rffi.INT) -@specialize.argtype(0, 1) -def utime(path, times): - return os.utime(_as_bytes(path), times) +@replace_os_function('listdir') +@specialize.argtype(0) +def listdir(path): + if not _WIN32: + path = _as_bytes0(path) + dirp = c_opendir(path) + if not dirp: + raise OSError(get_saved_errno(), "opendir failed") + result = [] + while True: + direntp = c_readdir(dirp) + if not direntp: + error = get_saved_errno() + break + namep = rffi.cast(rffi.CCHARP, direntp.c_d_name) + name = rffi.charp2str(namep) + if name != '.' and name != '..': + result.append(name) + c_closedir(dirp) + if error: + raise OSError(error, "readdir failed") + return result + else: # _WIN32 case + traits = _preferred_traits(path) + win32traits = make_win32_traits(traits) + path = traits.as_str0(path) + + if traits.str is unicode: + if path and path[-1] not in (u'/', u'\\', u':'): + path += u'/' + mask = path + u'*.*' + else: + if path and path[-1] not in ('/', '\\', ':'): + path += '/' + mask = path + '*.*' + + filedata = lltype.malloc(win32traits.WIN32_FIND_DATA, flavor='raw') + try: + result = [] + hFindFile = win32traits.FindFirstFile(mask, filedata) + if hFindFile == rwin32.INVALID_HANDLE_VALUE: + error = rwin32.GetLastError_saved() + if error == win32traits.ERROR_FILE_NOT_FOUND: + return result + else: + raise WindowsError(error, "FindFirstFile failed") + while True: + name = traits.charp2str(rffi.cast(traits.CCHARP, + filedata.c_cFileName)) + if traits.str is unicode: + if not (name == u"." or name == u".."): + result.append(name) + else: + if not (name == "." or name == ".."): + result.append(name) + if not win32traits.FindNextFile(hFindFile, filedata): + break + # FindNextFile sets error to ERROR_NO_MORE_FILES if + # it got to the end of the directory + error = rwin32.GetLastError_saved() + win32traits.FindClose(hFindFile) + if error == win32traits.ERROR_NO_MORE_FILES: + return result + else: + raise WindowsError(error, "FindNextFile failed") + finally: + lltype.free(filedata, flavor='raw') + +#___________________________________________________________________ + +c_execv = external('execv', [rffi.CCHARP, rffi.CCHARPP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_execve = external('execve', + [rffi.CCHARP, rffi.CCHARPP, rffi.CCHARPP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_spawnv = external('spawnv', + [rffi.INT, rffi.CCHARP, rffi.CCHARPP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_spawnve = external('spawnve', + [rffi.INT, rffi.CCHARP, rffi.CCHARPP, rffi.CCHARPP], + rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('execv') +def execv(path, args): + rstring.check_str0(path) + # This list conversion already takes care of NUL bytes. + l_args = rffi.ll_liststr2charpp(args) + c_execv(path, l_args) + rffi.free_charpp(l_args) + raise OSError(get_saved_errno(), "execv failed") + +@replace_os_function('execve') +def execve(path, args, env): + envstrs = [] + for item in env.iteritems(): + envstr = "%s=%s" % item + envstrs.append(envstr) + + rstring.check_str0(path) + # This list conversion already takes care of NUL bytes. + l_args = rffi.ll_liststr2charpp(args) + l_env = rffi.ll_liststr2charpp(envstrs) + c_execve(path, l_args, l_env) + + rffi.free_charpp(l_env) + rffi.free_charpp(l_args) + raise OSError(get_saved_errno(), "execve failed") + +@replace_os_function('spawnv') +def spawnv(mode, path, args): + rstring.check_str0(path) + l_args = rffi.ll_liststr2charpp(args) + childpid = c_spawnv(mode, path, l_args) + rffi.free_charpp(l_args) + return handle_posix_error('spawnv', childpid) + +@replace_os_function('spawnve') +def spawnve(mode, path, args, env): + envstrs = [] + for item in env.iteritems(): + envstrs.append("%s=%s" % item) + rstring.check_str0(path) + l_args = rffi.ll_liststr2charpp(args) + l_env = rffi.ll_liststr2charpp(envstrs) + childpid = c_spawnve(mode, path, l_args, l_env) + rffi.free_charpp(l_env) + rffi.free_charpp(l_args) + return handle_posix_error('spawnve', childpid) + +c_fork = external('fork', [], rffi.PID_T, _nowrapper = True) +c_openpty = external('openpty', + [rffi.INTP, rffi.INTP, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], + rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_forkpty = external('forkpty', + [rffi.INTP, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], + rffi.PID_T, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('fork') +@jit.dont_look_inside +def fork(): + # NB. keep forkpty() up-to-date, too + ofs = debug.debug_offset() + opaqueaddr = rthread.gc_thread_before_fork() + childpid = c_fork() + rthread.gc_thread_after_fork(childpid, opaqueaddr) + childpid = handle_posix_error('fork', childpid) + if childpid == 0: + debug.debug_forked(ofs) + return childpid + +@replace_os_function('openpty') +@jit.dont_look_inside +def openpty(): + master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + slave_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + try: + handle_posix_error( + 'openpty', c_openpty(master_p, slave_p, None, None, None)) + return (widen(master_p[0]), widen(slave_p[0])) + finally: + lltype.free(master_p, flavor='raw') + lltype.free(slave_p, flavor='raw') + +@replace_os_function('forkpty') +@jit.dont_look_inside +def forkpty(): + master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + master_p[0] = rffi.cast(rffi.INT, -1) + try: + ofs = debug.debug_offset() + opaqueaddr = rthread.gc_thread_before_fork() + childpid = c_forkpty(master_p, None, None, None) + rthread.gc_thread_after_fork(childpid, opaqueaddr) + childpid = handle_posix_error('forkpty', childpid) + if childpid == 0: + debug.debug_forked(ofs) + return (childpid, master_p[0]) + finally: + lltype.free(master_p, flavor='raw') + +if _WIN32: + # emulate waitpid() with the _cwait() of Microsoft's compiler + c__cwait = external('_cwait', + [rffi.INTP, rffi.PID_T, rffi.INT], rffi.PID_T, + save_err=rffi.RFFI_SAVE_ERRNO) + @jit.dont_look_inside + def c_waitpid(pid, status_p, options): + result = c__cwait(status_p, pid, options) + # shift the status left a byte so this is more + # like the POSIX waitpid + status_p[0] = rffi.cast(rffi.INT, widen(status_p[0]) << 8) + return result +elif _CYGWIN: + c_waitpid = external('cygwin_waitpid', + [rffi.PID_T, rffi.INTP, rffi.INT], rffi.PID_T, + save_err=rffi.RFFI_SAVE_ERRNO) +else: + c_waitpid = external('waitpid', + [rffi.PID_T, rffi.INTP, rffi.INT], rffi.PID_T, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('waitpid') +def waitpid(pid, options): + status_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + status_p[0] = rffi.cast(rffi.INT, 0) + try: + result = handle_posix_error('waitpid', + c_waitpid(pid, status_p, options)) + status = widen(status_p[0]) + return (result, status) + finally: + lltype.free(status_p, flavor='raw') + +def _make_waitmacro(name): + c_func = external(name, [lltype.Signed], lltype.Signed, + macro=_MACRO_ON_POSIX) + returning_int = name in ('WEXITSTATUS', 'WSTOPSIG', 'WTERMSIG') + + @replace_os_function(name) + @func_renamer(name) + def _waitmacro(status): + if returning_int: + return c_func(status) + else: + return bool(c_func(status)) +WAIT_MACROS = ['WCOREDUMP', 'WIFCONTINUED', 'WIFSTOPPED', + 'WIFSIGNALED', 'WIFEXITED', + 'WEXITSTATUS', 'WSTOPSIG', 'WTERMSIG'] +for name in WAIT_MACROS: + _make_waitmacro(name) + +#___________________________________________________________________ + +c_getlogin = external('getlogin', [], rffi.CCHARP, + releasegil=False, save_err=rffi.RFFI_SAVE_ERRNO) +c_getloadavg = external('getloadavg', + [rffi.CArrayPtr(lltype.Float), rffi.INT], rffi.INT) + +@replace_os_function('getlogin') +def getlogin(): + result = c_getlogin() + if not result: + raise OSError(get_saved_errno(), "getlogin failed") + return rffi.charp2str(result) + +@replace_os_function('getloadavg') +def getloadavg(): + load = lltype.malloc(rffi.CArrayPtr(lltype.Float).TO, 3, flavor='raw') + try: + r = c_getloadavg(load, 3) + if r != 3: + raise OSError + return (load[0], load[1], load[2]) + finally: + lltype.free(load, flavor='raw') + +#___________________________________________________________________ + +c_readlink = external('readlink', + [rffi.CCHARP, rffi.CCHARP, rffi.SIZE_T], rffi.SSIZE_T, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('readlink') +def readlink(path): + path = _as_bytes0(path) + bufsize = 1023 + while True: + buf = lltype.malloc(rffi.CCHARP.TO, bufsize, flavor='raw') + res = widen(c_readlink(path, buf, bufsize)) + if res < 0: + lltype.free(buf, flavor='raw') + error = get_saved_errno() # failed + raise OSError(error, "readlink failed") + elif res < bufsize: + break # ok + else: + # buf too small, try again with a larger buffer + lltype.free(buf, flavor='raw') + bufsize *= 4 + # convert the result to a string + result = rffi.charp2strn(buf, res) + lltype.free(buf, flavor='raw') + return result + +c_isatty = external(UNDERSCORE_ON_WIN32 + 'isatty', [rffi.INT], rffi.INT) + +@replace_os_function('isatty') +def isatty(fd): + if not is_valid_fd(fd): + return False + return c_isatty(fd) != 0 + +c_ttyname = external('ttyname', [lltype.Signed], rffi.CCHARP, + releasegil=False, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('ttyname') +def ttyname(fd): + l_name = c_ttyname(fd) + if not l_name: + raise OSError(get_saved_errno(), "ttyname raised") + return rffi.charp2str(l_name) + +c_strerror = external('strerror', [rffi.INT], rffi.CCHARP, + releasegil=False) + +@replace_os_function('strerror') +def strerror(errnum): + res = c_strerror(errnum) + if not res: + raise ValueError("os_strerror failed") + return rffi.charp2str(res) + +c_system = external('system', [rffi.CCHARP], rffi.INT) + +@replace_os_function('system') +def system(command): + return widen(c_system(command)) + +c_unlink = external('unlink', [rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_mkdir = external('mkdir', [rffi.CCHARP, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_rmdir = external(UNDERSCORE_ON_WIN32 + 'rmdir', [rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_wrmdir = external(UNDERSCORE_ON_WIN32 + 'wrmdir', [rffi.CWCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('unlink') @specialize.argtype(0) -def chdir(path): - return os.chdir(_as_bytes(path)) +def unlink(path): + if not _WIN32: + handle_posix_error('unlink', c_unlink(_as_bytes0(path))) + else: + traits = _preferred_traits(path) + win32traits = make_win32_traits(traits) + if not win32traits.DeleteFile(traits.as_str0(path)): + raise rwin32.lastSavedWindowsError() +@replace_os_function('mkdir') @specialize.argtype(0) -def mkdir(path, mode=0777): - return os.mkdir(_as_bytes(path), mode) +def mkdir(path, mode=0o777): + if not _WIN32: + handle_posix_error('mkdir', c_mkdir(_as_bytes0(path), mode)) + else: + traits = _preferred_traits(path) + win32traits = make_win32_traits(traits) + if not win32traits.CreateDirectory(traits.as_str0(path), None): + raise rwin32.lastSavedWindowsError() +@replace_os_function('rmdir') @specialize.argtype(0) +@jit.dont_look_inside def rmdir(path): - return os.rmdir(_as_bytes(path)) + if _prefer_unicode(path): + handle_posix_error('wrmdir', c_wrmdir(_as_unicode0(path))) + else: + handle_posix_error('rmdir', c_rmdir(_as_bytes0(path))) + +c_chmod = external('chmod', [rffi.CCHARP, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_fchmod = external('fchmod', [rffi.INT, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO,) +c_rename = external('rename', [rffi.CCHARP, rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('chmod') +@specialize.argtype(0) +def chmod(path, mode): + if not _WIN32: + handle_posix_error('chmod', c_chmod(_as_bytes0(path), mode)) + else: + traits = _preferred_traits(path) + win32traits = make_win32_traits(traits) + path = traits.as_str0(path) + attr = win32traits.GetFileAttributes(path) + if attr == win32traits.INVALID_FILE_ATTRIBUTES: + raise rwin32.lastSavedWindowsError() + if mode & 0200: # _S_IWRITE + attr &= ~win32traits.FILE_ATTRIBUTE_READONLY + else: + attr |= win32traits.FILE_ATTRIBUTE_READONLY + if not win32traits.SetFileAttributes(path, attr): + raise rwin32.lastSavedWindowsError() + +@replace_os_function('fchmod') +def fchmod(fd, mode): + handle_posix_error('fchmod', c_fchmod(fd, mode)) + +@replace_os_function('rename') +@specialize.argtype(0, 1) +def rename(path1, path2): + if not _WIN32: + handle_posix_error('rename', + c_rename(_as_bytes0(path1), _as_bytes0(path2))) + else: + traits = _preferred_traits(path1) + win32traits = make_win32_traits(traits) + path1 = traits.as_str0(path1) + path2 = traits.as_str0(path2) + if not win32traits.MoveFile(path1, path2): + raise rwin32.lastSavedWindowsError() +#___________________________________________________________________ + +c_mkfifo = external('mkfifo', [rffi.CCHARP, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_mknod = external('mknod', [rffi.CCHARP, rffi.MODE_T, rffi.INT], rffi.INT, +# # xxx: actually ^^^ dev_t + macro=_MACRO_ON_POSIX, save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('mkfifo') @specialize.argtype(0) def mkfifo(path, mode): - os.mkfifo(_as_bytes(path), mode) + handle_posix_error('mkfifo', c_mkfifo(_as_bytes0(path), mode)) +@replace_os_function('mknod') @specialize.argtype(0) -def mknod(path, mode, device): - os.mknod(_as_bytes(path), mode, device) +def mknod(path, mode, dev): + handle_posix_error('mknod', c_mknod(_as_bytes0(path), mode, dev)) -@specialize.argtype(0, 1) -def symlink(src, dest): - os.symlink(_as_bytes(src), _as_bytes(dest)) +if _WIN32: + CreatePipe = external('CreatePipe', [rwin32.LPHANDLE, + rwin32.LPHANDLE, + rffi.VOIDP, + rwin32.DWORD], + rwin32.BOOL) + c_open_osfhandle = external('_open_osfhandle', [rffi.INTPTR_T, + rffi.INT], + rffi.INT) +else: + INT_ARRAY_P = rffi.CArrayPtr(rffi.INT) + c_pipe = external('pipe', [INT_ARRAY_P], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('pipe') +def pipe(): + if _WIN32: + pread = lltype.malloc(rwin32.LPHANDLE.TO, 1, flavor='raw') + pwrite = lltype.malloc(rwin32.LPHANDLE.TO, 1, flavor='raw') + try: + if not CreatePipe( + pread, pwrite, lltype.nullptr(rffi.VOIDP.TO), 0): + raise WindowsError(rwin32.GetLastError_saved(), + "CreatePipe failed") + hread = rffi.cast(rffi.INTPTR_T, pread[0]) + hwrite = rffi.cast(rffi.INTPTR_T, pwrite[0]) + finally: + lltype.free(pwrite, flavor='raw') + lltype.free(pread, flavor='raw') + fdread = c_open_osfhandle(hread, 0) + fdwrite = c_open_osfhandle(hwrite, 1) + return (fdread, fdwrite) + else: + filedes = lltype.malloc(INT_ARRAY_P.TO, 2, flavor='raw') + try: + handle_posix_error('pipe', c_pipe(filedes)) + return (widen(filedes[0]), widen(filedes[1])) + finally: + lltype.free(filedes, flavor='raw') + +c_link = external('link', [rffi.CCHARP, rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO,) +c_symlink = external('symlink', [rffi.CCHARP, rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) -if os.name == 'nt': - import nt - @specialize.argtype(0) - def _getfullpathname(path): - return nt._getfullpathname(_as_bytes(path)) +#___________________________________________________________________ +@replace_os_function('link') @specialize.argtype(0, 1) -def putenv(name, value): - os.environ[_as_bytes(name)] = _as_bytes(value) +def link(oldpath, newpath): + oldpath = _as_bytes0(oldpath) + newpath = _as_bytes0(newpath) + handle_posix_error('link', c_link(oldpath, newpath)) -@specialize.argtype(0) -def unsetenv(name): - del os.environ[_as_bytes(name)] +@replace_os_function('symlink') +@specialize.argtype(0, 1) +def symlink(oldpath, newpath): + oldpath = _as_bytes0(oldpath) + newpath = _as_bytes0(newpath) + handle_posix_error('symlink', c_symlink(oldpath, newpath)) + +c_umask = external(UNDERSCORE_ON_WIN32 + 'umask', [rffi.MODE_T], rffi.MODE_T) + +@replace_os_function('umask') +def umask(newmask): + return widen(c_umask(newmask)) + +c_chown = external('chown', [rffi.CCHARP, rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_lchown = external('lchown', [rffi.CCHARP, rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_fchown = external('fchown', [rffi.INT, rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('chown') +def chown(path, uid, gid): + handle_posix_error('chown', c_chown(path, uid, gid)) + +@replace_os_function('lchown') +def lchown(path, uid, gid): + handle_posix_error('lchown', c_lchown(path, uid, gid)) + +@replace_os_function('fchown') +def fchown(fd, uid, gid): + handle_posix_error('fchown', c_fchown(fd, uid, gid)) -if os.name == 'nt': +#___________________________________________________________________ + +UTIMBUFP = lltype.Ptr(UTIMBUF) +c_utime = external('utime', [rffi.CCHARP, UTIMBUFP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +if HAVE_UTIMES: + class CConfig: + _compilation_info_ = eci + TIMEVAL = rffi_platform.Struct('struct timeval', [ + ('tv_sec', rffi.LONG), + ('tv_usec', rffi.LONG)]) + config = rffi_platform.configure(CConfig) + TIMEVAL = config['TIMEVAL'] + TIMEVAL2P = rffi.CArrayPtr(TIMEVAL) + c_utimes = external('utimes', [rffi.CCHARP, TIMEVAL2P], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +if _WIN32: from rpython.rlib import rwin32 - os_kill = rwin32.os_kill + GetSystemTime = external( + 'GetSystemTime', + [lltype.Ptr(rwin32.SYSTEMTIME)], + lltype.Void, + calling_conv='win', + save_err=rffi.RFFI_SAVE_LASTERROR) + + SystemTimeToFileTime = external( + 'SystemTimeToFileTime', + [lltype.Ptr(rwin32.SYSTEMTIME), + lltype.Ptr(rwin32.FILETIME)], + rwin32.BOOL, + calling_conv='win', + save_err=rffi.RFFI_SAVE_LASTERROR) + + SetFileTime = external( + 'SetFileTime', + [rwin32.HANDLE, + lltype.Ptr(rwin32.FILETIME), + lltype.Ptr(rwin32.FILETIME), + lltype.Ptr(rwin32.FILETIME)], + rwin32.BOOL, + calling_conv='win') + + +@replace_os_function('utime') +@specialize.argtype(0, 1) +def utime(path, times): + if not _WIN32: + path = _as_bytes0(path) + if times is None: + error = c_utime(path, lltype.nullptr(UTIMBUFP.TO)) + else: + actime, modtime = times + if HAVE_UTIMES: + import math + l_times = lltype.malloc(TIMEVAL2P.TO, 2, flavor='raw') + fracpart, intpart = math.modf(actime) + rffi.setintfield(l_times[0], 'c_tv_sec', int(intpart)) + rffi.setintfield(l_times[0], 'c_tv_usec', int(fracpart * 1e6)) + fracpart, intpart = math.modf(modtime) + rffi.setintfield(l_times[1], 'c_tv_sec', int(intpart)) + rffi.setintfield(l_times[1], 'c_tv_usec', int(fracpart * 1e6)) + error = c_utimes(path, l_times) + lltype.free(l_times, flavor='raw') + else: + # we only have utime(), which does not allow + # sub-second resolution + l_utimbuf = lltype.malloc(UTIMBUFP.TO, flavor='raw') + l_utimbuf.c_actime = rffi.r_time_t(actime) + l_utimbuf.c_modtime = rffi.r_time_t(modtime) + error = c_utime(path, l_utimbuf) + lltype.free(l_utimbuf, flavor='raw') + handle_posix_error('utime', error) + else: # _WIN32 case + from rpython.rlib.rwin32file import time_t_to_FILE_TIME + traits = _preferred_traits(path) + win32traits = make_win32_traits(traits) + path = traits.as_str0(path) + hFile = win32traits.CreateFile(path, + win32traits.FILE_WRITE_ATTRIBUTES, 0, + None, win32traits.OPEN_EXISTING, + win32traits.FILE_FLAG_BACKUP_SEMANTICS, + rwin32.NULL_HANDLE) + if hFile == rwin32.INVALID_HANDLE_VALUE: + raise rwin32.lastSavedWindowsError() + ctime = lltype.nullptr(rwin32.FILETIME) + atime = lltype.malloc(rwin32.FILETIME, flavor='raw') + mtime = lltype.malloc(rwin32.FILETIME, flavor='raw') + try: + if times is None: + now = lltype.malloc(rwin32.SYSTEMTIME, flavor='raw') + try: + GetSystemTime(now) + if (not SystemTimeToFileTime(now, atime) or + not SystemTimeToFileTime(now, mtime)): + raise rwin32.lastSavedWindowsError() + finally: + lltype.free(now, flavor='raw') + else: + actime, modtime = times + time_t_to_FILE_TIME(actime, atime) + time_t_to_FILE_TIME(modtime, mtime) + if not SetFileTime(hFile, ctime, atime, mtime): + raise rwin32.lastSavedWindowsError() + finally: + rwin32.CloseHandle(hFile) + lltype.free(atime, flavor='raw') + lltype.free(mtime, flavor='raw') + +if not _WIN32: + TMSP = lltype.Ptr(TMS) + c_times = external('times', [TMSP], CLOCK_T, + save_err=rffi.RFFI_SAVE_ERRNO | + rffi.RFFI_ZERO_ERRNO_BEFORE) + + # Here is a random extra platform parameter which is important. + # Strictly speaking, this should probably be retrieved at runtime, not + # at translation time. + CLOCK_TICKS_PER_SECOND = float(os.sysconf('SC_CLK_TCK')) +else: + GetCurrentProcess = external( + 'GetCurrentProcess', [], + rwin32.HANDLE, calling_conv='win') + GetProcessTimes = external( + 'GetProcessTimes', [ + rwin32.HANDLE, + lltype.Ptr(rwin32.FILETIME), lltype.Ptr(rwin32.FILETIME), + lltype.Ptr(rwin32.FILETIME), lltype.Ptr(rwin32.FILETIME)], + rwin32.BOOL, calling_conv='win') + +@replace_os_function('times') +def times(): + if not _WIN32: + l_tmsbuf = lltype.malloc(TMSP.TO, flavor='raw') + try: + # note: times() can return a negative value (or even -1) + # even if there is no error + result = rffi.cast(lltype.Signed, c_times(l_tmsbuf)) + if result == -1: + errno = get_saved_errno() + if errno != 0: + raise OSError(errno, 'times() failed') + return ( + rffi.cast(lltype.Signed, l_tmsbuf.c_tms_utime) + / CLOCK_TICKS_PER_SECOND, + rffi.cast(lltype.Signed, l_tmsbuf.c_tms_stime) + / CLOCK_TICKS_PER_SECOND, + rffi.cast(lltype.Signed, l_tmsbuf.c_tms_cutime) + / CLOCK_TICKS_PER_SECOND, + rffi.cast(lltype.Signed, l_tmsbuf.c_tms_cstime) + / CLOCK_TICKS_PER_SECOND, + result / CLOCK_TICKS_PER_SECOND) + finally: + lltype.free(l_tmsbuf, flavor='raw') + else: + pcreate = lltype.malloc(rwin32.FILETIME, flavor='raw') + pexit = lltype.malloc(rwin32.FILETIME, flavor='raw') + pkernel = lltype.malloc(rwin32.FILETIME, flavor='raw') + puser = lltype.malloc(rwin32.FILETIME, flavor='raw') + try: + hProc = GetCurrentProcess() + GetProcessTimes(hProc, pcreate, pexit, pkernel, puser) + # The fields of a FILETIME structure are the hi and lo parts + # of a 64-bit value expressed in 100 nanosecond units + # (of course). + return ( + rffi.cast(lltype.Signed, pkernel.c_dwHighDateTime) * 429.4967296 + + rffi.cast(lltype.Signed, pkernel.c_dwLowDateTime) * 1E-7, + rffi.cast(lltype.Signed, puser.c_dwHighDateTime) * 429.4967296 + + rffi.cast(lltype.Signed, puser.c_dwLowDateTime) * 1E-7, + 0., 0., 0.) + finally: + lltype.free(puser, flavor='raw') + lltype.free(pkernel, flavor='raw') + lltype.free(pexit, flavor='raw') + lltype.free(pcreate, flavor='raw') + +c_kill = external('kill', [rffi.PID_T, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_killpg = external('killpg', [rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_exit = external('_exit', [rffi.INT], lltype.Void) +c_nice = external('nice', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_FULL_ERRNO_ZERO) + +@replace_os_function('kill') +def kill(pid, sig): + if not _WIN32: + return handle_posix_error('kill', c_kill(pid, sig)) + else: + if sig == rwin32.CTRL_C_EVENT or sig == rwin32.CTRL_BREAK_EVENT: + if rwin32.GenerateConsoleCtrlEvent(sig, pid) == 0: + raise rwin32.lastSavedWindowsError( + 'kill() failed generating event') + return + handle = rwin32.OpenProcess(rwin32.PROCESS_ALL_ACCESS, False, pid) + if not handle: + raise rwin32.lastSavedWindowsError('kill() failed opening process') + try: + if rwin32.TerminateProcess(handle, sig) == 0: + raise rwin32.lastSavedWindowsError( + 'kill() failed to terminate process') + finally: + rwin32.CloseHandle(handle) + +@replace_os_function('killpg') +def killpg(pgrp, sig): + return handle_posix_error('killpg', c_killpg(pgrp, sig)) + +@replace_os_function('_exit') +@jit.dont_look_inside +def exit(status): + debug.debug_flush() + c_exit(status) + +@replace_os_function('nice') +def nice(inc): + # Assume that the system provides a standard-compliant version + # of nice() that returns the new priority. Nowadays, FreeBSD + # might be the last major non-compliant system (xxx check me). + res = widen(c_nice(inc)) + if res == -1: + err = get_saved_errno() + if err != 0: + raise OSError(err, "os_nice failed") + return res + +c_ctermid = external('ctermid', [rffi.CCHARP], rffi.CCHARP) + +@replace_os_function('ctermid') +def ctermid(): + return rffi.charp2str(c_ctermid(lltype.nullptr(rffi.CCHARP.TO))) + +c_tmpnam = external('tmpnam', [rffi.CCHARP], rffi.CCHARP) + +@replace_os_function('tmpnam') +def tmpnam(): + return rffi.charp2str(c_tmpnam(lltype.nullptr(rffi.CCHARP.TO))) + +#___________________________________________________________________ + +c_getpid = external('getpid', [], rffi.PID_T, + releasegil=False, save_err=rffi.RFFI_SAVE_ERRNO) +c_getppid = external('getppid', [], rffi.PID_T, + releasegil=False, save_err=rffi.RFFI_SAVE_ERRNO) +c_setsid = external('setsid', [], rffi.PID_T, + save_err=rffi.RFFI_SAVE_ERRNO) +c_getsid = external('getsid', [rffi.PID_T], rffi.PID_T, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('getpid') +def getpid(): + return handle_posix_error('getpid', c_getpid()) + +@replace_os_function('getppid') +def getppid(): + return handle_posix_error('getppid', c_getppid()) + +@replace_os_function('setsid') +def setsid(): + return handle_posix_error('setsid', c_setsid()) + +@replace_os_function('getsid') +def getsid(pid): + return handle_posix_error('getsid', c_getsid(pid)) + +c_getpgid = external('getpgid', [rffi.PID_T], rffi.PID_T, + save_err=rffi.RFFI_SAVE_ERRNO) +c_setpgid = external('setpgid', [rffi.PID_T, rffi.PID_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('getpgid') +def getpgid(pid): + return handle_posix_error('getpgid', c_getpgid(pid)) + +@replace_os_function('setpgid') +def setpgid(pid, gid): + handle_posix_error('setpgid', c_setpgid(pid, gid)) + +PID_GROUPS_T = rffi.CArrayPtr(rffi.PID_T) +c_getgroups = external('getgroups', [rffi.INT, PID_GROUPS_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_setgroups = external('setgroups', [rffi.SIZE_T, PID_GROUPS_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_initgroups = external('initgroups', [rffi.CCHARP, rffi.PID_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('getgroups') +def getgroups(): + n = handle_posix_error('getgroups', + c_getgroups(0, lltype.nullptr(PID_GROUPS_T.TO))) + groups = lltype.malloc(PID_GROUPS_T.TO, n, flavor='raw') + try: + n = handle_posix_error('getgroups', c_getgroups(n, groups)) + return [widen(groups[i]) for i in range(n)] + finally: + lltype.free(groups, flavor='raw') + +@replace_os_function('setgroups') +def setgroups(gids): + n = len(gids) + groups = lltype.malloc(PID_GROUPS_T.TO, n, flavor='raw') + try: + for i in range(n): + groups[i] = rffi.cast(rffi.PID_T, gids[i]) + handle_posix_error('setgroups', c_setgroups(n, groups)) + finally: + lltype.free(groups, flavor='raw') + +@replace_os_function('initgroups') +def initgroups(user, group): + handle_posix_error('initgroups', c_initgroups(user, group)) + +if GETPGRP_HAVE_ARG: + c_getpgrp = external('getpgrp', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) else: - os_kill = os.kill + c_getpgrp = external('getpgrp', [], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +if SETPGRP_HAVE_ARG: + c_setpgrp = external('setpgrp', [rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +else: + c_setpgrp = external('setpgrp', [], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('getpgrp') +def getpgrp(): + if GETPGRP_HAVE_ARG: + return handle_posix_error('getpgrp', c_getpgrp(0)) + else: + return handle_posix_error('getpgrp', c_getpgrp()) + +@replace_os_function('setpgrp') +def setpgrp(): + if SETPGRP_HAVE_ARG: + handle_posix_error('setpgrp', c_setpgrp(0, 0)) + else: + handle_posix_error('setpgrp', c_setpgrp()) + +c_tcgetpgrp = external('tcgetpgrp', [rffi.INT], rffi.PID_T, + save_err=rffi.RFFI_SAVE_ERRNO) +c_tcsetpgrp = external('tcsetpgrp', [rffi.INT, rffi.PID_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('tcgetpgrp') +def tcgetpgrp(fd): + return handle_posix_error('tcgetpgrp', c_tcgetpgrp(fd)) + +@replace_os_function('tcsetpgrp') +def tcsetpgrp(fd, pgrp): + return handle_posix_error('tcsetpgrp', c_tcsetpgrp(fd, pgrp)) + +#___________________________________________________________________ + +c_getuid = external('getuid', [], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) +c_geteuid = external('geteuid', [], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) +c_setuid = external('setuid', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_seteuid = external('seteuid', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_getgid = external('getgid', [], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) +c_getegid = external('getegid', [], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) +c_setgid = external('setgid', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_setegid = external('setegid', [rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('getuid') +def getuid(): + return handle_posix_error('getuid', c_getuid()) + +@replace_os_function('geteuid') +def geteuid(): + return handle_posix_error('geteuid', c_geteuid()) + +@replace_os_function('setuid') +def setuid(uid): + handle_posix_error('setuid', c_setuid(uid)) + +@replace_os_function('seteuid') +def seteuid(uid): + handle_posix_error('seteuid', c_seteuid(uid)) + +@replace_os_function('getgid') +def getgid(): + return handle_posix_error('getgid', c_getgid()) + +@replace_os_function('getegid') +def getegid(): + return handle_posix_error('getegid', c_getegid()) + +@replace_os_function('setgid') +def setgid(gid): + handle_posix_error('setgid', c_setgid(gid)) + +@replace_os_function('setegid') +def setegid(gid): + handle_posix_error('setegid', c_setegid(gid)) + +c_setreuid = external('setreuid', [rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_setregid = external('setregid', [rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('setreuid') +def setreuid(ruid, euid): + handle_posix_error('setreuid', c_setreuid(ruid, euid)) + +@replace_os_function('setregid') +def setregid(rgid, egid): + handle_posix_error('setregid', c_setregid(rgid, egid)) + +c_getresuid = external('getresuid', [rffi.INTP] * 3, rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_getresgid = external('getresgid', [rffi.INTP] * 3, rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_setresuid = external('setresuid', [rffi.INT] * 3, rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) +c_setresgid = external('setresgid', [rffi.INT] * 3, rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('getresuid') +def getresuid(): + out = lltype.malloc(rffi.INTP.TO, 3, flavor='raw') + try: + handle_posix_error('getresuid', + c_getresuid(rffi.ptradd(out, 0), + rffi.ptradd(out, 1), + rffi.ptradd(out, 2))) + return (widen(out[0]), widen(out[1]), widen(out[2])) + finally: + lltype.free(out, flavor='raw') + +@replace_os_function('getresgid') +def getresgid(): + out = lltype.malloc(rffi.INTP.TO, 3, flavor='raw') + try: + handle_posix_error('getresgid', + c_getresgid(rffi.ptradd(out, 0), + rffi.ptradd(out, 1), + rffi.ptradd(out, 2))) + return (widen(out[0]), widen(out[1]), widen(out[2])) + finally: + lltype.free(out, flavor='raw') + +@replace_os_function('setresuid') +def setresuid(ruid, euid, suid): + handle_posix_error('setresuid', c_setresuid(ruid, euid, suid)) + +@replace_os_function('setresgid') +def setresgid(rgid, egid, sgid): + handle_posix_error('setresgid', c_setresgid(rgid, egid, sgid)) + +#___________________________________________________________________ + +c_chroot = external('chroot', [rffi.CCHARP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO, + macro=_MACRO_ON_POSIX) + +@replace_os_function('chroot') +def chroot(path): + handle_posix_error('chroot', c_chroot(_as_bytes0(path))) + +if not _WIN32: + CHARARRAY1 = lltype.FixedSizeArray(lltype.Char, 1) + class CConfig: + _compilation_info_ = ExternalCompilationInfo( + includes = ['sys/utsname.h'] + ) + UTSNAME = rffi_platform.Struct('struct utsname', [ + ('sysname', CHARARRAY1), + ('nodename', CHARARRAY1), + ('release', CHARARRAY1), + ('version', CHARARRAY1), + ('machine', CHARARRAY1)]) + config = rffi_platform.configure(CConfig) + UTSNAMEP = lltype.Ptr(config['UTSNAME']) + + c_uname = external('uname', [UTSNAMEP], rffi.INT, + compilation_info=CConfig._compilation_info_, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('uname') +def uname(): + l_utsbuf = lltype.malloc(UTSNAMEP.TO, flavor='raw') + try: + handle_posix_error('uname', c_uname(l_utsbuf)) + return ( + rffi.charp2str(rffi.cast(rffi.CCHARP, l_utsbuf.c_sysname)), + rffi.charp2str(rffi.cast(rffi.CCHARP, l_utsbuf.c_nodename)), + rffi.charp2str(rffi.cast(rffi.CCHARP, l_utsbuf.c_release)), + rffi.charp2str(rffi.cast(rffi.CCHARP, l_utsbuf.c_version)), + rffi.charp2str(rffi.cast(rffi.CCHARP, l_utsbuf.c_machine)), + ) + finally: + lltype.free(l_utsbuf, flavor='raw') + +# These are actually macros on some/most systems +c_makedev = external('makedev', [rffi.INT, rffi.INT], rffi.INT) +c_major = external('major', [rffi.INT], rffi.INT) +c_minor = external('minor', [rffi.INT], rffi.INT) + +@replace_os_function('makedev') +@jit.dont_look_inside +def makedev(maj, min): + return c_makedev(maj, min) + +@replace_os_function('major') +@jit.dont_look_inside +def major(dev): + return c_major(dev) + +@replace_os_function('minor') +@jit.dont_look_inside +def minor(dev): + return c_minor(dev) + +#___________________________________________________________________ + +c_sysconf = external('sysconf', [rffi.INT], rffi.LONG, + save_err=rffi.RFFI_FULL_ERRNO_ZERO) +c_fpathconf = external('fpathconf', [rffi.INT, rffi.INT], rffi.LONG, + save_err=rffi.RFFI_FULL_ERRNO_ZERO) +c_pathconf = external('pathconf', [rffi.CCHARP, rffi.INT], rffi.LONG, + save_err=rffi.RFFI_FULL_ERRNO_ZERO) +c_confstr = external('confstr', + [rffi.INT, rffi.CCHARP, rffi.SIZE_T], rffi.SIZE_T, + save_err=rffi.RFFI_FULL_ERRNO_ZERO) + +@replace_os_function('sysconf') +def sysconf(value): + res = c_sysconf(value) + if res == -1: + errno = get_saved_errno() + if errno != 0: + raise OSError(errno, "sysconf failed") + return res + +@replace_os_function('fpathconf') +def fpathconf(fd, value): + res = c_fpathconf(fd, value) + if res == -1: + errno = get_saved_errno() + if errno != 0: + raise OSError(errno, "fpathconf failed") + return res + +@replace_os_function('pathconf') +def pathconf(path, value): + res = c_pathconf(_as_bytes0(path), value) + if res == -1: + errno = get_saved_errno() + if errno != 0: + raise OSError(errno, "pathconf failed") + return res + +@replace_os_function('confstr') +def confstr(value): + n = intmask(c_confstr(value, lltype.nullptr(rffi.CCHARP.TO), 0)) + if n > 0: + buf = lltype.malloc(rffi.CCHARP.TO, n, flavor='raw') + try: + c_confstr(value, buf, n) + return rffi.charp2strn(buf, n) + finally: + lltype.free(buf, flavor='raw') + else: + errno = get_saved_errno() + if errno != 0: + raise OSError(errno, "confstr failed") + return None + +# ____________________________________________________________ +# Support for os.environ + +# XXX only for systems where os.environ is an instance of _Environ, +# which should cover Unix and Windows at least +assert type(os.environ) is not dict + +from rpython.rtyper.controllerentry import ControllerEntryForPrebuilt + +class EnvironExtRegistry(ControllerEntryForPrebuilt): + _about_ = os.environ + + def getcontroller(self): + from rpython.rlib.rposix_environ import OsEnvironController + return OsEnvironController() diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rposix_stat.py pypy-5.0.1+dfsg/rpython/rlib/rposix_stat.py --- pypy-4.0.1+dfsg/rpython/rlib/rposix_stat.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rposix_stat.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,622 @@ +"""Annotation and rtyping support for the result of os.stat(), os.lstat() +and os.fstat(). In RPython like in plain Python the stat result can be +indexed like a tuple but also exposes the st_xxx attributes. +""" + +import os, sys + +from rpython.flowspace.model import Constant +from rpython.flowspace.operation import op +from rpython.annotator import model as annmodel +from rpython.rtyper import extregistry +from rpython.tool.pairtype import pairtype +from rpython.rtyper.tool import rffi_platform as platform +from rpython.rtyper.llannotation import lltype_to_annotation +from rpython.rtyper.rmodel import Repr +from rpython.rtyper.rint import IntegerRepr +from rpython.rtyper.error import TyperError + +from rpython.rlib._os_support import _preferred_traits, string_traits +from rpython.rlib.objectmodel import specialize +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rposix import ( + replace_os_function, handle_posix_error, _as_bytes0) + +_WIN32 = sys.platform.startswith('win') +_LINUX = sys.platform.startswith('linux') + +if _WIN32: + from rpython.rlib import rwin32 + from rpython.rlib.rwin32file import make_win32_traits + +# Support for float times is here. +# - ALL_STAT_FIELDS contains Float fields if the system can retrieve +# sub-second timestamps. +# - TIMESPEC is defined when the "struct stat" contains st_atim field. + +if sys.platform.startswith('linux') or sys.platform.startswith('openbsd'): + TIMESPEC = platform.Struct('struct timespec', + [('tv_sec', rffi.TIME_T), + ('tv_nsec', rffi.LONG)]) +else: + TIMESPEC = None + +# all possible fields - some of them are not available on all platforms +ALL_STAT_FIELDS = [ + ("st_mode", lltype.Signed), + ("st_ino", lltype.SignedLongLong), + ("st_dev", lltype.SignedLongLong), + ("st_nlink", lltype.Signed), + ("st_uid", lltype.Signed), + ("st_gid", lltype.Signed), + ("st_size", lltype.SignedLongLong), + ("st_atime", lltype.Float), + ("st_mtime", lltype.Float), + ("st_ctime", lltype.Float), + ("st_blksize", lltype.Signed), + ("st_blocks", lltype.Signed), + ("st_rdev", lltype.Signed), + ("st_flags", lltype.Signed), + #("st_gen", lltype.Signed), -- new in CPy 2.5, not implemented + #("st_birthtime", lltype.Float), -- new in CPy 2.5, not implemented +] +N_INDEXABLE_FIELDS = 10 + +# For OO backends, expose only the portable fields (the first 10). +PORTABLE_STAT_FIELDS = ALL_STAT_FIELDS[:N_INDEXABLE_FIELDS] + +STATVFS_FIELDS = [ + ("f_bsize", lltype.Signed), + ("f_frsize", lltype.Signed), + ("f_blocks", lltype.Signed), + ("f_bfree", lltype.Signed), + ("f_bavail", lltype.Signed), + ("f_files", lltype.Signed), + ("f_ffree", lltype.Signed), + ("f_favail", lltype.Signed), + ("f_flag", lltype.Signed), + ("f_namemax", lltype.Signed), +] + + +# ____________________________________________________________ +# +# Annotation support + +class SomeStatResult(annmodel.SomeObject): + knowntype = os.stat_result + + def rtyper_makerepr(self, rtyper): + return StatResultRepr(rtyper) + + def rtyper_makekey(self): + return self.__class__, + + def getattr(self, s_attr): + assert s_attr.is_constant(), "non-constant attr name in getattr()" + attrname = s_attr.const + TYPE = STAT_FIELD_TYPES[attrname] + return lltype_to_annotation(TYPE) + + def _get_rmarshall_support_(self): # for rlib.rmarshal + # reduce and recreate stat_result objects from 10-tuples + # (we ignore the extra values here for simplicity and portability) + def stat_result_reduce(st): + return (st[0], st[1], st[2], st[3], st[4], + st[5], st[6], st[7], st[8], st[9]) + + def stat_result_recreate(tup): + return make_stat_result(tup + extra_zeroes) + s_reduced = annmodel.SomeTuple([lltype_to_annotation(TYPE) + for name, TYPE in PORTABLE_STAT_FIELDS]) + extra_zeroes = (0,) * (len(STAT_FIELDS) - len(PORTABLE_STAT_FIELDS)) + return s_reduced, stat_result_reduce, stat_result_recreate + + +class __extend__(pairtype(SomeStatResult, annmodel.SomeInteger)): + def getitem((s_sta, s_int)): + assert s_int.is_constant(), "os.stat()[index]: index must be constant" + index = s_int.const + assert 0 <= index < N_INDEXABLE_FIELDS, "os.stat()[index] out of range" + name, TYPE = STAT_FIELDS[index] + return lltype_to_annotation(TYPE) + + +class StatResultRepr(Repr): + + def __init__(self, rtyper): + self.rtyper = rtyper + self.stat_field_indexes = {} + for i, (name, TYPE) in enumerate(STAT_FIELDS): + self.stat_field_indexes[name] = i + + self.s_tuple = annmodel.SomeTuple( + [lltype_to_annotation(TYPE) for name, TYPE in STAT_FIELDS]) + self.r_tuple = rtyper.getrepr(self.s_tuple) + self.lowleveltype = self.r_tuple.lowleveltype + + def redispatch_getfield(self, hop, index): + rtyper = self.rtyper + s_index = rtyper.annotator.bookkeeper.immutablevalue(index) + hop2 = hop.copy() + spaceop = op.getitem(hop.args_v[0], Constant(index)) + spaceop.result = hop.spaceop.result + hop2.spaceop = spaceop + hop2.args_v = spaceop.args + hop2.args_s = [self.s_tuple, s_index] + hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] + return hop2.dispatch() + + def rtype_getattr(self, hop): + s_attr = hop.args_s[1] + attr = s_attr.const + try: + index = self.stat_field_indexes[attr] + except KeyError: + raise TyperError("os.stat().%s: field not available" % (attr,)) + return self.redispatch_getfield(hop, index) + + +class __extend__(pairtype(StatResultRepr, IntegerRepr)): + def rtype_getitem((r_sta, r_int), hop): + s_int = hop.args_s[1] + index = s_int.const + return r_sta.redispatch_getfield(hop, index) + +s_StatResult = SomeStatResult() + +def make_stat_result(tup): + """Turn a tuple into an os.stat_result object.""" + positional = tuple( + lltype.cast_primitive(TYPE, value) for value, (name, TYPE) in + zip(tup, STAT_FIELDS)[:N_INDEXABLE_FIELDS]) + kwds = {} + for value, (name, TYPE) in zip(tup, STAT_FIELDS)[N_INDEXABLE_FIELDS:]: + kwds[name] = lltype.cast_primitive(TYPE, value) + return os.stat_result(positional, kwds) + + +class MakeStatResultEntry(extregistry.ExtRegistryEntry): + _about_ = make_stat_result + + def compute_result_annotation(self, s_tup): + return s_StatResult + + def specialize_call(self, hop): + r_StatResult = hop.rtyper.getrepr(s_StatResult) + [v_result] = hop.inputargs(r_StatResult.r_tuple) + # no-op conversion from r_StatResult.r_tuple to r_StatResult + hop.exception_cannot_occur() + return v_result + + +class SomeStatvfsResult(annmodel.SomeObject): + if hasattr(os, 'statvfs_result'): + knowntype = os.statvfs_result + else: + knowntype = None # will not be used + + def rtyper_makerepr(self, rtyper): + return StatvfsResultRepr(rtyper) + + def rtyper_makekey(self): + return self.__class__, + + def getattr(self, s_attr): + assert s_attr.is_constant() + TYPE = STATVFS_FIELD_TYPES[s_attr.const] + return lltype_to_annotation(TYPE) + + +class __extend__(pairtype(SomeStatvfsResult, annmodel.SomeInteger)): + def getitem((s_stat, s_int)): + assert s_int.is_constant() + name, TYPE = STATVFS_FIELDS[s_int.const] + return lltype_to_annotation(TYPE) + + +s_StatvfsResult = SomeStatvfsResult() + + +class StatvfsResultRepr(Repr): + def __init__(self, rtyper): + self.rtyper = rtyper + self.statvfs_field_indexes = {} + for i, (name, TYPE) in enumerate(STATVFS_FIELDS): + self.statvfs_field_indexes[name] = i + + self.s_tuple = annmodel.SomeTuple( + [lltype_to_annotation(TYPE) for name, TYPE in STATVFS_FIELDS]) + self.r_tuple = rtyper.getrepr(self.s_tuple) + self.lowleveltype = self.r_tuple.lowleveltype + + def redispatch_getfield(self, hop, index): + rtyper = self.rtyper + s_index = rtyper.annotator.bookkeeper.immutablevalue(index) + hop2 = hop.copy() + spaceop = op.getitem(hop.args_v[0], Constant(index)) + spaceop.result = hop.spaceop.result + hop2.spaceop = spaceop + hop2.args_v = spaceop.args + hop2.args_s = [self.s_tuple, s_index] + hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] + return hop2.dispatch() + + def rtype_getattr(self, hop): + s_attr = hop.args_s[1] + attr = s_attr.const + try: + index = self.statvfs_field_indexes[attr] + except KeyError: + raise TyperError("os.statvfs().%s: field not available" % (attr,)) + return self.redispatch_getfield(hop, index) + + +class __extend__(pairtype(StatvfsResultRepr, IntegerRepr)): + def rtype_getitem((r_sta, r_int), hop): + s_int = hop.args_s[1] + index = s_int.const + return r_sta.redispatch_getfield(hop, index) + + +def make_statvfs_result(tup): + args = tuple( + lltype.cast_primitive(TYPE, value) for value, (name, TYPE) in + zip(tup, STATVFS_FIELDS)) + return os.statvfs_result(args) + +class MakeStatvfsResultEntry(extregistry.ExtRegistryEntry): + _about_ = make_statvfs_result + + def compute_result_annotation(self, s_tup): + return s_StatvfsResult + + def specialize_call(self, hop): + r_StatvfsResult = hop.rtyper.getrepr(s_StatvfsResult) + [v_result] = hop.inputargs(r_StatvfsResult.r_tuple) + hop.exception_cannot_occur() + return v_result + +# ____________________________________________________________ +# +# RFFI support + +if sys.platform.startswith('win'): + _name_struct_stat = '_stati64' + INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h'] +else: + if _LINUX: + _name_struct_stat = 'stat64' + else: + _name_struct_stat = 'stat' + INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h', 'unistd.h'] + +compilation_info = ExternalCompilationInfo( + # This must be set to 64 on some systems to enable large file support. + #pre_include_bits = ['#define _FILE_OFFSET_BITS 64'], + # ^^^ nowadays it's always set in all C files we produce. + includes=INCLUDES +) + +if TIMESPEC is not None: + class CConfig_for_timespec: + _compilation_info_ = compilation_info + TIMESPEC = TIMESPEC + TIMESPEC = lltype.Ptr( + platform.configure(CConfig_for_timespec)['TIMESPEC']) + + +def posix_declaration(try_to_add=None): + global STAT_STRUCT, STATVFS_STRUCT + + LL_STAT_FIELDS = STAT_FIELDS[:] + if try_to_add: + LL_STAT_FIELDS.append(try_to_add) + + if TIMESPEC is not None: + + def _expand(lst, originalname, timespecname): + for i, (_name, _TYPE) in enumerate(lst): + if _name == originalname: + # replace the 'st_atime' field of type rffi.DOUBLE + # with a field 'st_atim' of type 'struct timespec' + lst[i] = (timespecname, TIMESPEC.TO) + break + + _expand(LL_STAT_FIELDS, 'st_atime', 'st_atim') + _expand(LL_STAT_FIELDS, 'st_mtime', 'st_mtim') + _expand(LL_STAT_FIELDS, 'st_ctime', 'st_ctim') + + del _expand + else: + # Replace float fields with integers + for name in ('st_atime', 'st_mtime', 'st_ctime', 'st_birthtime'): + for i, (_name, _TYPE) in enumerate(LL_STAT_FIELDS): + if _name == name: + LL_STAT_FIELDS[i] = (_name, lltype.Signed) + break + + class CConfig: + _compilation_info_ = compilation_info + STAT_STRUCT = platform.Struct('struct %s' % _name_struct_stat, LL_STAT_FIELDS) + STATVFS_STRUCT = platform.Struct('struct statvfs', STATVFS_FIELDS) + + try: + config = platform.configure(CConfig, ignore_errors=try_to_add is not None) + except platform.CompilationError: + if try_to_add: + return # failed to add this field, give up + raise + + STAT_STRUCT = lltype.Ptr(config['STAT_STRUCT']) + STATVFS_STRUCT = lltype.Ptr(config['STATVFS_STRUCT']) + if try_to_add: + STAT_FIELDS.append(try_to_add) + + +# This lists only the fields that have been found on the underlying platform. +# Initially only the PORTABLE_STAT_FIELDS, but more may be added by the +# following loop. +STAT_FIELDS = PORTABLE_STAT_FIELDS[:] + +if sys.platform != 'win32': + posix_declaration() + for _i in range(len(PORTABLE_STAT_FIELDS), len(ALL_STAT_FIELDS)): + posix_declaration(ALL_STAT_FIELDS[_i]) + del _i + +# these two global vars only list the fields defined in the underlying platform +STAT_FIELD_TYPES = dict(STAT_FIELDS) # {'st_xxx': TYPE} +STAT_FIELD_NAMES = [_name for (_name, _TYPE) in STAT_FIELDS] +del _name, _TYPE + +STATVFS_FIELD_TYPES = dict(STATVFS_FIELDS) +STATVFS_FIELD_NAMES = [name for name, tp in STATVFS_FIELDS] + +def build_stat_result(st): + # only for LL backends + if TIMESPEC is not None: + atim = st.c_st_atim; atime = int(atim.c_tv_sec) + 1E-9 * int(atim.c_tv_nsec) + mtim = st.c_st_mtim; mtime = int(mtim.c_tv_sec) + 1E-9 * int(mtim.c_tv_nsec) + ctim = st.c_st_ctim; ctime = int(ctim.c_tv_sec) + 1E-9 * int(ctim.c_tv_nsec) + else: + atime = st.c_st_atime + mtime = st.c_st_mtime + ctime = st.c_st_ctime + + result = (st.c_st_mode, + st.c_st_ino, + st.c_st_dev, + st.c_st_nlink, + st.c_st_uid, + st.c_st_gid, + st.c_st_size, + atime, + mtime, + ctime) + + if "st_blksize" in STAT_FIELD_TYPES: result += (st.c_st_blksize,) + if "st_blocks" in STAT_FIELD_TYPES: result += (st.c_st_blocks,) + if "st_rdev" in STAT_FIELD_TYPES: result += (st.c_st_rdev,) + if "st_flags" in STAT_FIELD_TYPES: result += (st.c_st_flags,) + + return make_stat_result(result) + + +def build_statvfs_result(st): + return make_statvfs_result(( + st.c_f_bsize, + st.c_f_frsize, + st.c_f_blocks, + st.c_f_bfree, + st.c_f_bavail, + st.c_f_files, + st.c_f_ffree, + st.c_f_favail, + st.c_f_flag, + st.c_f_namemax + )) + + +# Implement and register os.stat() & variants + +if not _WIN32: + c_fstat = rffi.llexternal('fstat64' if _LINUX else 'fstat', + [rffi.INT, STAT_STRUCT], rffi.INT, + compilation_info=compilation_info, + save_err=rffi.RFFI_SAVE_ERRNO, + macro=True) + c_stat = rffi.llexternal('stat64' if _LINUX else 'stat', + [rffi.CCHARP, STAT_STRUCT], rffi.INT, + compilation_info=compilation_info, + save_err=rffi.RFFI_SAVE_ERRNO, + macro=True) + c_lstat = rffi.llexternal('lstat64' if _LINUX else 'lstat', + [rffi.CCHARP, STAT_STRUCT], rffi.INT, + compilation_info=compilation_info, + save_err=rffi.RFFI_SAVE_ERRNO, + macro=True) + + c_fstatvfs = rffi.llexternal('fstatvfs', + [rffi.INT, STATVFS_STRUCT], rffi.INT, + compilation_info=compilation_info, + save_err=rffi.RFFI_SAVE_ERRNO) + c_statvfs = rffi.llexternal('statvfs', + [rffi.CCHARP, STATVFS_STRUCT], rffi.INT, + compilation_info=compilation_info, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_os_function('fstat') +def fstat(fd): + if not _WIN32: + with lltype.scoped_alloc(STAT_STRUCT.TO) as stresult: + handle_posix_error('fstat', c_fstat(fd, stresult)) + return build_stat_result(stresult) + else: + handle = rwin32.get_osfhandle(fd) + win32traits = make_win32_traits(string_traits) + filetype = win32traits.GetFileType(handle) + if filetype == win32traits.FILE_TYPE_CHAR: + # console or LPT device + return make_stat_result((win32traits._S_IFCHR, + 0, 0, 0, 0, 0, + 0, 0, 0, 0)) + elif filetype == win32traits.FILE_TYPE_PIPE: + # socket or named pipe + return make_stat_result((win32traits._S_IFIFO, + 0, 0, 0, 0, 0, + 0, 0, 0, 0)) + elif filetype == win32traits.FILE_TYPE_UNKNOWN: + error = rwin32.GetLastError_saved() + if error != 0: + raise WindowsError(error, "os_fstat failed") + # else: unknown but valid file + + # normal disk file (FILE_TYPE_DISK) + info = lltype.malloc(win32traits.BY_HANDLE_FILE_INFORMATION, + flavor='raw', zero=True) + try: + res = win32traits.GetFileInformationByHandle(handle, info) + if res == 0: + raise WindowsError(rwin32.GetLastError_saved(), + "os_fstat failed") + return win32_by_handle_info_to_stat(win32traits, info) + finally: + lltype.free(info, flavor='raw') + +@replace_os_function('stat') +@specialize.argtype(0) +def stat(path): + if not _WIN32: + with lltype.scoped_alloc(STAT_STRUCT.TO) as stresult: + arg = _as_bytes0(path) + handle_posix_error('stat', c_stat(arg, stresult)) + return build_stat_result(stresult) + else: + traits = _preferred_traits(path) + path = traits.as_str0(path) + return win32_xstat(traits, path, traverse=True) + +@replace_os_function('lstat') +@specialize.argtype(0) +def lstat(path): + if not _WIN32: + with lltype.scoped_alloc(STAT_STRUCT.TO) as stresult: + arg = _as_bytes0(path) + handle_posix_error('lstat', c_lstat(arg, stresult)) + return build_stat_result(stresult) + else: + traits = _preferred_traits(path) + path = traits.as_str0(path) + return win32_xstat(traits, path, traverse=False) + +@replace_os_function('fstatvfs') +def fstatvfs(fd): + with lltype.scoped_alloc(STATVFS_STRUCT.TO) as stresult: + handle_posix_error('fstatvfs', c_fstatvfs(fd, stresult)) + return build_statvfs_result(stresult) + +@replace_os_function('statvfs') +@specialize.argtype(0) +def statvfs(path): + with lltype.scoped_alloc(STATVFS_STRUCT.TO) as stresult: + arg = _as_bytes0(path) + handle_posix_error('statvfs', c_statvfs(arg, stresult)) + return build_statvfs_result(stresult) + +#__________________________________________________ +# Helper functions for win32 +if _WIN32: + from rpython.rlib.rwin32file import FILE_TIME_to_time_t_float + + def make_longlong(high, low): + return (rffi.r_longlong(high) << 32) + rffi.r_longlong(low) + + # Seconds between 1.1.1601 and 1.1.1970 + secs_between_epochs = rffi.r_longlong(11644473600) + + @specialize.arg(0) + def win32_xstat(traits, path, traverse=False): + win32traits = make_win32_traits(traits) + with lltype.scoped_alloc( + win32traits.WIN32_FILE_ATTRIBUTE_DATA) as data: + res = win32traits.GetFileAttributesEx( + path, win32traits.GetFileExInfoStandard, data) + if res == 0: + errcode = rwin32.GetLastError_saved() + if errcode == win32traits.ERROR_SHARING_VIOLATION: + res = win32_attributes_from_dir( + win32traits, path, data) + if res == 0: + errcode = rwin32.GetLastError_saved() + raise WindowsError(errcode, "os_stat failed") + return win32_attribute_data_to_stat(win32traits, data) + + @specialize.arg(0) + def win32_attributes_to_mode(win32traits, attributes): + m = 0 + attributes = intmask(attributes) + if attributes & win32traits.FILE_ATTRIBUTE_DIRECTORY: + m |= win32traits._S_IFDIR | 0111 # IFEXEC for user,group,other + else: + m |= win32traits._S_IFREG + if attributes & win32traits.FILE_ATTRIBUTE_READONLY: + m |= 0444 + else: + m |= 0666 + return m + + @specialize.arg(0) + def win32_attribute_data_to_stat(win32traits, info): + st_mode = win32_attributes_to_mode(win32traits, info.c_dwFileAttributes) + st_size = make_longlong(info.c_nFileSizeHigh, info.c_nFileSizeLow) + ctime = FILE_TIME_to_time_t_float(info.c_ftCreationTime) + mtime = FILE_TIME_to_time_t_float(info.c_ftLastWriteTime) + atime = FILE_TIME_to_time_t_float(info.c_ftLastAccessTime) + + result = (st_mode, + 0, 0, 0, 0, 0, + st_size, + atime, mtime, ctime) + + return make_stat_result(result) + + def win32_by_handle_info_to_stat(win32traits, info): + # similar to the one above + st_mode = win32_attributes_to_mode(win32traits, info.c_dwFileAttributes) + st_size = make_longlong(info.c_nFileSizeHigh, info.c_nFileSizeLow) + ctime = FILE_TIME_to_time_t_float(info.c_ftCreationTime) + mtime = FILE_TIME_to_time_t_float(info.c_ftLastWriteTime) + atime = FILE_TIME_to_time_t_float(info.c_ftLastAccessTime) + + # specific to fstat() + st_ino = make_longlong(info.c_nFileIndexHigh, info.c_nFileIndexLow) + st_nlink = info.c_nNumberOfLinks + + result = (st_mode, + st_ino, 0, st_nlink, 0, 0, + st_size, + atime, mtime, ctime) + + return make_stat_result(result) + + @specialize.arg(0) + def win32_attributes_from_dir(win32traits, path, data): + filedata = lltype.malloc(win32traits.WIN32_FIND_DATA, flavor='raw') + try: + hFindFile = win32traits.FindFirstFile(path, filedata) + if hFindFile == rwin32.INVALID_HANDLE_VALUE: + return 0 + win32traits.FindClose(hFindFile) + data.c_dwFileAttributes = filedata.c_dwFileAttributes + rffi.structcopy(data.c_ftCreationTime, filedata.c_ftCreationTime) + rffi.structcopy(data.c_ftLastAccessTime, filedata.c_ftLastAccessTime) + rffi.structcopy(data.c_ftLastWriteTime, filedata.c_ftLastWriteTime) + data.c_nFileSizeHigh = filedata.c_nFileSizeHigh + data.c_nFileSizeLow = filedata.c_nFileSizeLow + return 1 + finally: + lltype.free(filedata, flavor='raw') + diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rshrinklist.py pypy-5.0.1+dfsg/rpython/rlib/rshrinklist.py --- pypy-4.0.1+dfsg/rpython/rlib/rshrinklist.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rshrinklist.py 2016-03-19 16:40:12.000000000 +0000 @@ -6,6 +6,8 @@ The twist is that occasionally append() will throw away the items for which must_keep() returns False. (It does so without changing the order.) + + See also rpython.rlib.rweaklist. """ _mixin_ = True diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rsocket.py pypy-5.0.1+dfsg/rpython/rlib/rsocket.py --- pypy-4.0.1+dfsg/rpython/rlib/rsocket.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rsocket.py 2016-03-19 16:40:12.000000000 +0000 @@ -516,6 +516,10 @@ """RPython-level socket object. """ fd = _c.INVALID_SOCKET + family = 0 + type = 0 + proto = 0 + timeout = -1.0 def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fd=_c.INVALID_SOCKET): @@ -531,6 +535,11 @@ self.proto = proto self.timeout = defaults.timeout + @staticmethod + def empty_rsocket(): + rsocket = instantiate(RSocket) + return rsocket + @rgc.must_be_light_finalizer def __del__(self): fd = self.fd diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rstacklet.py pypy-5.0.1+dfsg/rpython/rlib/rstacklet.py --- pypy-4.0.1+dfsg/rpython/rlib/rstacklet.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rstacklet.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,7 +1,7 @@ import sys from rpython.rlib import _rffi_stacklet as _c from rpython.rlib import jit -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import fetch_translated_config from rpython.rtyper.lltypesystem import lltype, llmemory DEBUG = False @@ -10,8 +10,8 @@ class StackletThread(object): @jit.dont_look_inside - def __init__(self, config): - self._gcrootfinder = _getgcrootfinder(config, we_are_translated()) + def __init__(self, _argument_ignored_for_backward_compatibility=None): + self._gcrootfinder = _getgcrootfinder(fetch_translated_config()) self._thrd = _c.newthread() if not self._thrd: raise MemoryError @@ -67,11 +67,8 @@ # ____________________________________________________________ -def _getgcrootfinder(config, translated): - if translated: - assert config is not None, ("you have to pass a valid config, " - "e.g. from 'driver.config'") - elif '__pypy__' in sys.builtin_module_names: +def _getgcrootfinder(config): + if config is None and '__pypy__' in sys.builtin_module_names: import py py.test.skip("cannot run the stacklet tests on top of pypy: " "calling directly the C function stacklet_switch() " diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rstring.py pypy-5.0.1+dfsg/rpython/rlib/rstring.py --- pypy-4.0.1+dfsg/rpython/rlib/rstring.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rstring.py 2016-03-19 16:40:12.000000000 +0000 @@ -485,6 +485,24 @@ else: return -1 + def prev_digit(self): + # After exhausting all n digits in next_digit(), you can walk them + # again in reverse order by calling prev_digit() exactly n times + i = self.i - 1 + assert i >= 0 + self.i = i + c = self.s[i] + digit = ord(c) + if '0' <= c <= '9': + digit -= ord('0') + elif 'A' <= c <= 'Z': + digit = (digit - ord('A')) + 10 + elif 'a' <= c <= 'z': + digit = (digit - ord('a')) + 10 + else: + raise AssertionError + return digit + # -------------- public API --------------------------------- INIT_SIZE = 100 # XXX tweak diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rstruct/nativefmttable.py pypy-5.0.1+dfsg/rpython/rlib/rstruct/nativefmttable.py --- pypy-4.0.1+dfsg/rpython/rlib/rstruct/nativefmttable.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rstruct/nativefmttable.py 2016-03-19 16:40:12.000000000 +0000 @@ -8,6 +8,7 @@ from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_singlefloat, widen from rpython.rlib.rstruct import standardfmttable as std +from rpython.rlib.rstruct.standardfmttable import native_is_bigendian from rpython.rlib.rstruct.error import StructError from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.lltypesystem import lltype, rffi @@ -15,8 +16,6 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo -native_is_bigendian = struct.pack("=i", 1) == struct.pack(">i", 1) - native_fmttable = { 'x': std.standard_fmttable['x'], 'c': std.standard_fmttable['c'], @@ -27,9 +26,6 @@ # ____________________________________________________________ -double_buf = lltype.malloc(rffi.DOUBLEP.TO, 1, flavor='raw', immortal=True) -float_buf = lltype.malloc(rffi.FLOATP.TO, 1, flavor='raw', immortal=True) - range_8_unroll = unrolling_iterable(list(reversed(range(8)))) range_4_unroll = unrolling_iterable(list(reversed(range(4)))) @@ -45,14 +41,6 @@ fmtiter.result.append(chr(value & 0xff)) value >>= 8 -@specialize.argtype(0) -def unpack_double(fmtiter): - input = fmtiter.read(sizeof_double) - p = rffi.cast(rffi.CCHARP, double_buf) - for i in range(sizeof_double): - p[i] = input[i] - doubleval = double_buf[0] - fmtiter.appendobj(doubleval) def pack_float(fmtiter): doubleval = fmtiter.accept_float_arg() @@ -68,16 +56,6 @@ fmtiter.result.append(chr(value & 0xff)) value >>= 8 -@specialize.argtype(0) -def unpack_float(fmtiter): - input = fmtiter.read(sizeof_float) - p = rffi.cast(rffi.CCHARP, float_buf) - for i in range(sizeof_float): - p[i] = input[i] - floatval = float_buf[0] - doubleval = float(floatval) - fmtiter.appendobj(doubleval) - # ____________________________________________________________ # # Use rffi_platform to get the native sizes and alignments from the C compiler @@ -134,10 +112,10 @@ if fmtchar == 'f': pack = pack_float - unpack = unpack_float + unpack = std.unpack_float elif fmtchar == 'd': pack = pack_double - unpack = unpack_double + unpack = std.unpack_double elif fmtchar == '?': pack = std.pack_bool unpack = std.unpack_bool diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rstruct/runpack.py pypy-5.0.1+dfsg/rpython/rlib/rstruct/runpack.py --- pypy-4.0.1+dfsg/rpython/rlib/rstruct/runpack.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rstruct/runpack.py 2016-03-19 16:40:12.000000000 +0000 @@ -38,6 +38,12 @@ def appendobj(self, value): self.value = value + + def get_buffer_as_string_maybe(self): + return self.mr.input, self.mr.inputpos + + def skip(self, size): + self.read(size) # XXX, could avoid taking the slice ReaderForPos.__name__ = 'ReaderForPos%d' % pos return ReaderForPos @@ -88,13 +94,6 @@ exec source.compile() in miniglobals self.unpack = miniglobals['unpack'] # override not-rpython version - def unpack(self, s): - # NOT_RPYTHON - res = unpack(self.fmt, s) - if len(res) == 1: - return res[0] - return res - def _freeze_(self): assert self.formats self._create_unpacking_func() @@ -103,6 +102,7 @@ def create_unpacker(unpack_str): fmtiter = FrozenUnpackIterator(unpack_str) fmtiter.interpret(unpack_str) + assert fmtiter._freeze_() return fmtiter create_unpacker._annspecialcase_ = 'specialize:memo' diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rstruct/standardfmttable.py pypy-5.0.1+dfsg/rpython/rlib/rstruct/standardfmttable.py --- pypy-4.0.1+dfsg/rpython/rlib/rstruct/standardfmttable.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rstruct/standardfmttable.py 2016-03-19 16:40:12.000000000 +0000 @@ -12,7 +12,12 @@ from rpython.rlib.rstruct import ieee from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.unroll import unrolling_iterable +from rpython.rlib.strstorage import str_storage_getitem +from rpython.rlib import rarithmetic +from rpython.rtyper.lltypesystem import rffi +native_is_bigendian = struct.pack("=i", 1) == struct.pack(">i", 1) +native_is_ieee754 = float.__getformat__('double').startswith('IEEE') def pack_pad(fmtiter, count): fmtiter.result.append_multiple_char('\x00', count) @@ -126,6 +131,24 @@ # ____________________________________________________________ +USE_FASTPATH = True # set to False by some tests +ALLOW_SLOWPATH = True # set to False by some tests + +class CannotUnpack(Exception): + pass + +@specialize.memo() +def unpack_fastpath(TYPE): + @specialize.argtype(0) + def do_unpack_fastpath(fmtiter): + size = rffi.sizeof(TYPE) + strbuf, pos = fmtiter.get_buffer_as_string_maybe() + if strbuf is None or pos % size != 0 or not USE_FASTPATH: + raise CannotUnpack + fmtiter.skip(size) + return str_storage_getitem(TYPE, strbuf, pos) + return do_unpack_fastpath + @specialize.argtype(0) def unpack_pad(fmtiter, count): fmtiter.read(count) @@ -153,15 +176,55 @@ end = count fmtiter.appendobj(data[1:end]) -def make_float_unpacker(size): +def make_ieee_unpacker(TYPE): @specialize.argtype(0) - def unpacker(fmtiter): - data = fmtiter.read(size) - fmtiter.appendobj(ieee.unpack_float(data, fmtiter.bigendian)) - return unpacker + def unpack_ieee(fmtiter): + size = rffi.sizeof(TYPE) + if fmtiter.bigendian != native_is_bigendian or not native_is_ieee754: + # fallback to the very slow unpacking code in ieee.py + data = fmtiter.read(size) + fmtiter.appendobj(ieee.unpack_float(data, fmtiter.bigendian)) + return + ## XXX check if the following code is still needed + ## if not str_storage_supported(TYPE): + ## # this happens e.g. on win32 and ARM32: we cannot read the string + ## # content as an array of doubles because it's not properly + ## # aligned. But we can read a longlong and convert to float + ## assert TYPE == rffi.DOUBLE + ## assert rffi.sizeof(TYPE) == 8 + ## return unpack_longlong2float(fmtiter) + try: + # fast path + val = unpack_fastpath(TYPE)(fmtiter) + except CannotUnpack: + # slow path, take the slice + input = fmtiter.read(size) + val = str_storage_getitem(TYPE, input, 0) + fmtiter.appendobj(float(val)) + return unpack_ieee + +@specialize.argtype(0) +def unpack_longlong2float(fmtiter): + from rpython.rlib.rstruct.runpack import runpack + from rpython.rlib.longlong2float import longlong2float + s = fmtiter.read(8) + llval = runpack('q', s) # this is a bit recursive, I know + doubleval = longlong2float(llval) + fmtiter.appendobj(doubleval) + + +unpack_double = make_ieee_unpacker(rffi.DOUBLE) +unpack_float = make_ieee_unpacker(rffi.FLOAT) # ____________________________________________________________ +def get_rffi_int_type(size, signed): + for TYPE in rffi.platform.numbertype_to_rclass: + if (rffi.sizeof(TYPE) == size and + rarithmetic.is_signed_integer_type(TYPE) == signed): + return TYPE + raise KeyError("Cannot find an int type size=%d, signed=%d" % (size, signed)) + def make_int_unpacker(size, signed, _memo={}): try: return _memo[size, signed] @@ -180,9 +243,30 @@ else: inttype = r_ulonglong unroll_range_size = unrolling_iterable(range(size)) + TYPE = get_rffi_int_type(size, signed) + + @specialize.argtype(0) + def unpack_int_fastpath_maybe(fmtiter): + if fmtiter.bigendian != native_is_bigendian or not native_is_ieee754: ## or not str_storage_supported(TYPE): + return False + try: + intvalue = unpack_fastpath(TYPE)(fmtiter) + except CannotUnpack: + return False + if not signed and size < native_int_size: + intvalue = rarithmetic.intmask(intvalue) + intvalue = inttype(intvalue) + fmtiter.appendobj(intvalue) + return True @specialize.argtype(0) def unpack_int(fmtiter): + if unpack_int_fastpath_maybe(fmtiter): + return + # slow path + if not ALLOW_SLOWPATH: + # we enter here only on some tests + raise ValueError("fastpath not taken :(") intvalue = inttype(0) s = fmtiter.read(size) idx = 0 @@ -217,9 +301,9 @@ 'p':{ 'size' : 1, 'pack' : pack_pascal, 'unpack' : unpack_pascal, 'needcount' : True }, 'f':{ 'size' : 4, 'pack' : make_float_packer(4), - 'unpack' : make_float_unpacker(4)}, + 'unpack' : unpack_float}, 'd':{ 'size' : 8, 'pack' : make_float_packer(8), - 'unpack' : make_float_unpacker(8)}, + 'unpack' : unpack_double}, '?':{ 'size' : 1, 'pack' : pack_bool, 'unpack' : unpack_bool}, } diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rstruct/test/test_runpack.py pypy-5.0.1+dfsg/rpython/rlib/rstruct/test/test_runpack.py --- pypy-4.0.1+dfsg/rpython/rlib/rstruct/test/test_runpack.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rstruct/test/test_runpack.py 2016-03-19 16:40:15.000000000 +0000 @@ -1,5 +1,6 @@ from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rlib.rstruct.runpack import runpack +from rpython.rlib.rstruct import standardfmttable from rpython.rlib.rarithmetic import LONG_BIT import struct @@ -37,3 +38,63 @@ return runpack(">d", "testtest") assert fn() == struct.unpack(">d", "testtest")[0] assert self.interpret(fn, []) == struct.unpack(">d", "testtest")[0] + + def test_native_floats(self): + """ + Check the 'd' and 'f' format characters on native packing. + """ + d_data = struct.pack("df", 12.34, 12.34) + def fn(): + d, f = runpack("@df", d_data) + return d, f + # + # direct test + d, f = fn() + assert d == 12.34 # no precision lost + assert f != 12.34 # precision lost + assert abs(f - 12.34) < 1E-6 + # + # translated test + res = self.interpret(fn, []) + d = res.item0 + f = res.item1 # convert from r_singlefloat + assert d == 12.34 # no precision lost + assert f != 12.34 # precision lost + assert abs(f - 12.34) < 1E-6 + + def test_unpack_standard_little(self): + def unpack(fmt, data): + def fn(): + return runpack(fmt, data) + return self.interpret(fn, []) + # + assert unpack("i", 'ABCD') == 0x41424344 + assert unpack(">i", '\xff\xff\xff\xfd') == -3 + assert unpack(">i", '\x80\x00\x00\x00') == -2147483648 + assert unpack(">I", '\x81BCD') == 0x81424344 + assert unpack(">q", 'ABCDEFGH') == 0x4142434445464748 + assert unpack(">q", '\xbeMLKJIHH') == -0x41B2B3B4B5B6B7B8 + assert unpack(">Q", '\x81BCDEFGH') == 0x8142434445464748 + + +class TestNoFastPath(TestRStruct): + + def setup_method(self, meth): + standardfmttable.USE_FASTPATH = False + + def teardown_method(self, meth): + standardfmttable.USE_FASTPATH = True diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rthread.py pypy-5.0.1+dfsg/rpython/rlib/rthread.py --- pypy-4.0.1+dfsg/rpython/rlib/rthread.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rthread.py 2016-03-19 16:40:12.000000000 +0000 @@ -79,7 +79,12 @@ @specialize.arg(0) def ll_start_new_thread(func): + from rpython.rlib import rgil _check_thread_enabled() + rgil.allocate() + # ^^^ convenience: any RPython program which uses explicitly + # rthread.start_new_thread() will initialize the GIL at that + # point. ident = c_thread_start(func) if ident == -1: raise error("can't start new thread") @@ -286,8 +291,6 @@ # ____________________________________________________________ # # Thread-locals. -# KEEP THE REFERENCE ALIVE, THE GC DOES NOT FOLLOW THEM SO FAR! -# We use _make_sure_does_not_move() to make sure the pointer will not move. class ThreadLocalField(object): @@ -305,7 +308,7 @@ offset = CDefinedIntSymbolic('RPY_TLOFS_%s' % self.fieldname, default='?') offset.loop_invariant = loop_invariant - self.offset = offset + self._offset = offset def getraw(): if we_are_translated(): @@ -346,6 +349,11 @@ class ThreadLocalReference(ThreadLocalField): + # A thread-local that points to an object. The object stored in such + # a thread-local is kept alive as long as the thread is not finished + # (but only with our own GCs! it seems not to work with Boehm...) + # (also, on Windows, if you're not making a DLL but an EXE, it will + # leak the objects when a thread finishes; see threadlocal.c.) _COUNT = 1 def __init__(self, Cls, loop_invariant=False): @@ -356,7 +364,7 @@ ThreadLocalField.__init__(self, lltype.Signed, 'tlref%d' % unique_id, loop_invariant=loop_invariant) setraw = self.setraw - offset = self.offset + offset = self._offset def get(): if we_are_translated(): @@ -373,20 +381,41 @@ assert isinstance(value, Cls) or value is None if we_are_translated(): from rpython.rtyper.annlowlevel import cast_instance_to_gcref - from rpython.rlib.rgc import _make_sure_does_not_move - from rpython.rlib.objectmodel import running_on_llinterp gcref = cast_instance_to_gcref(value) - if not running_on_llinterp: - if gcref: - _make_sure_does_not_move(gcref) value = lltype.cast_ptr_to_int(gcref) setraw(value) + rgc.register_custom_trace_hook(TRACETLREF, _lambda_trace_tlref) + rgc.ll_writebarrier(_tracetlref_obj) else: self.local.value = value self.get = get self.set = set + def _trace_tlref(gc, obj, callback, arg): + p = llmemory.NULL + llop.threadlocalref_acquire(lltype.Void) + while True: + p = llop.threadlocalref_enum(llmemory.Address, p) + if not p: + break + gc._trace_callback(callback, arg, p + offset) + llop.threadlocalref_release(lltype.Void) + _lambda_trace_tlref = lambda: _trace_tlref + TRACETLREF = lltype.GcStruct('TRACETLREF') + _tracetlref_obj = lltype.malloc(TRACETLREF, immortal=True) + + @staticmethod + def automatic_keepalive(config): + """Returns True if translated with a GC that keeps alive + the set() value until the end of the thread. Returns False + if you need to keep it alive yourself (but in that case, you + should also reset it to None before the thread finishes). + """ + return (config.translation.gctransformer == "framework" and + # see translator/c/src/threadlocal.c for the following line + (not _win32 or config.translation.shared)) + tlfield_thread_ident = ThreadLocalField(lltype.Signed, "thread_ident", loop_invariant=True) @@ -394,7 +423,8 @@ loop_invariant=True) tlfield_rpy_errno = ThreadLocalField(rffi.INT, "rpy_errno") tlfield_alt_errno = ThreadLocalField(rffi.INT, "alt_errno") -if sys.platform == "win32": +_win32 = (sys.platform == "win32") +if _win32: from rpython.rlib import rwin32 tlfield_rpy_lasterror = ThreadLocalField(rwin32.DWORD, "rpy_lasterror") tlfield_alt_lasterror = ThreadLocalField(rwin32.DWORD, "alt_lasterror") diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rtime.py pypy-5.0.1+dfsg/rpython/rlib/rtime.py --- pypy-4.0.1+dfsg/rpython/rlib/rtime.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rtime.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,237 @@ +""" +RPython implementations of time.time(), time.clock(), time.select(). +""" + +import sys +import math +import time as pytime +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rtyper.tool import rffi_platform +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.objectmodel import register_replacement_for +from rpython.rlib import jit +from rpython.rlib.rarithmetic import intmask, UINT_MAX +from rpython.rlib import rposix + +_WIN32 = sys.platform.startswith('win') + +if _WIN32: + TIME_H = 'time.h' + FTIME = '_ftime64' + STRUCT_TIMEB = 'struct __timeb64' + includes = ['winsock2.h', 'windows.h', + TIME_H, 'sys/types.h', 'sys/timeb.h'] + need_rusage = False +else: + TIME_H = 'sys/time.h' + FTIME = 'ftime' + STRUCT_TIMEB = 'struct timeb' + includes = [TIME_H, 'time.h', 'errno.h', 'sys/select.h', + 'sys/types.h', 'unistd.h', + 'sys/time.h', 'sys/resource.h'] + + if not sys.platform.startswith("openbsd"): + includes.append('sys/timeb.h') + + need_rusage = True + + +eci = ExternalCompilationInfo(includes=includes) + +class CConfig: + _compilation_info_ = eci + TIMEVAL = rffi_platform.Struct('struct timeval', [('tv_sec', rffi.INT), + ('tv_usec', rffi.INT)]) + HAVE_GETTIMEOFDAY = rffi_platform.Has('gettimeofday') + HAVE_FTIME = rffi_platform.Has(FTIME) + if need_rusage: + RUSAGE = rffi_platform.Struct('struct rusage', [('ru_utime', TIMEVAL), + ('ru_stime', TIMEVAL)]) + +if sys.platform.startswith('freebsd') or sys.platform.startswith('netbsd'): + libraries = ['compat'] +elif sys.platform == 'linux2': + libraries = ['rt'] +else: + libraries = [] + +class CConfigForFTime: + _compilation_info_ = ExternalCompilationInfo( + includes=[TIME_H, 'sys/timeb.h'], + libraries=libraries + ) + TIMEB = rffi_platform.Struct(STRUCT_TIMEB, [('time', rffi.INT), + ('millitm', rffi.INT)]) + +class CConfigForClockGetTime: + _compilation_info_ = ExternalCompilationInfo( + includes=['time.h'], + libraries=libraries + ) + TIMESPEC = rffi_platform.Struct('struct timespec', [('tv_sec', rffi.LONG), + ('tv_nsec', rffi.LONG)]) + +constant_names = ['RUSAGE_SELF', 'EINTR', 'CLOCK_PROCESS_CPUTIME_ID'] +for const in constant_names: + setattr(CConfig, const, rffi_platform.DefinedConstantInteger(const)) +defs_names = ['GETTIMEOFDAY_NO_TZ'] +for const in defs_names: + setattr(CConfig, const, rffi_platform.Defined(const)) + +def decode_timeval(t): + return (float(rffi.getintfield(t, 'c_tv_sec')) + + float(rffi.getintfield(t, 'c_tv_usec')) * 0.000001) + + +def external(name, args, result, compilation_info=eci, **kwds): + return rffi.llexternal(name, args, result, + compilation_info=compilation_info, **kwds) + +def replace_time_function(name): + func = getattr(pytime, name, None) + if func is None: + return lambda f: f + return register_replacement_for( + func, + sandboxed_name='ll_time.ll_time_%s' % name) + +config = rffi_platform.configure(CConfig) +globals().update(config) + +# Note: time.time() is used by the framework GC during collect(), +# which means that we have to be very careful about not allocating +# GC memory here. This is the reason for the _nowrapper=True. +if HAVE_GETTIMEOFDAY: + if GETTIMEOFDAY_NO_TZ: + c_gettimeofday = external('gettimeofday', + [lltype.Ptr(TIMEVAL)], rffi.INT, + _nowrapper=True, releasegil=False) + else: + c_gettimeofday = external('gettimeofday', + [lltype.Ptr(TIMEVAL), rffi.VOIDP], rffi.INT, + _nowrapper=True, releasegil=False) +if HAVE_FTIME: + globals().update(rffi_platform.configure(CConfigForFTime)) + c_ftime = external(FTIME, [lltype.Ptr(TIMEB)], + lltype.Void, + _nowrapper=True, releasegil=False) +c_time = external('time', [rffi.VOIDP], rffi.TIME_T, + _nowrapper=True, releasegil=False) + + +@replace_time_function('time') +def time(): + void = lltype.nullptr(rffi.VOIDP.TO) + result = -1.0 + if HAVE_GETTIMEOFDAY: + with lltype.scoped_alloc(TIMEVAL) as t: + errcode = -1 + if GETTIMEOFDAY_NO_TZ: + errcode = c_gettimeofday(t) + else: + errcode = c_gettimeofday(t, void) + + if rffi.cast(rffi.LONG, errcode) == 0: + result = decode_timeval(t) + if result != -1: + return result + else: # assume using ftime(3) + with lltype.scoped_alloc(TIMEB) as t: + c_ftime(t) + result = (float(intmask(t.c_time)) + + float(intmask(t.c_millitm)) * 0.001) + return result + return float(c_time(void)) + + +# _______________________________________________________________ +# time.clock() + +if _WIN32: + # hacking to avoid LARGE_INTEGER which is a union... + A = lltype.FixedSizeArray(lltype.SignedLongLong, 1) + QueryPerformanceCounter = external( + 'QueryPerformanceCounter', [lltype.Ptr(A)], lltype.Void, + releasegil=False) + QueryPerformanceFrequency = external( + 'QueryPerformanceFrequency', [lltype.Ptr(A)], rffi.INT, + releasegil=False) + class State(object): + divisor = 0.0 + counter_start = 0 + state = State() +elif CLOCK_PROCESS_CPUTIME_ID is not None: + # Linux and other POSIX systems with clock_gettime() + globals().update(rffi_platform.configure(CConfigForClockGetTime)) + TIMESPEC = TIMESPEC + CLOCK_PROCESS_CPUTIME_ID = CLOCK_PROCESS_CPUTIME_ID + eci_with_lrt = eci.merge(ExternalCompilationInfo(libraries=['rt'])) + c_clock_gettime = external('clock_gettime', + [lltype.Signed, lltype.Ptr(TIMESPEC)], + rffi.INT, releasegil=False, + compilation_info=eci_with_lrt) +else: + RUSAGE = RUSAGE + RUSAGE_SELF = RUSAGE_SELF or 0 + c_getrusage = external('getrusage', + [rffi.INT, lltype.Ptr(RUSAGE)], + lltype.Void, + releasegil=False) + +@replace_time_function('clock') +@jit.dont_look_inside # the JIT doesn't like FixedSizeArray +def clock(): + if _WIN32: + a = lltype.malloc(A, flavor='raw') + if state.divisor == 0.0: + QueryPerformanceCounter(a) + state.counter_start = a[0] + QueryPerformanceFrequency(a) + state.divisor = float(a[0]) + QueryPerformanceCounter(a) + diff = a[0] - state.counter_start + lltype.free(a, flavor='raw') + return float(diff) / state.divisor + elif CLOCK_PROCESS_CPUTIME_ID is not None: + with lltype.scoped_alloc(TIMESPEC) as a: + c_clock_gettime(CLOCK_PROCESS_CPUTIME_ID, a) + result = (float(rffi.getintfield(a, 'c_tv_sec')) + + float(rffi.getintfield(a, 'c_tv_nsec')) * 0.000000001) + return result + else: + with lltype.scoped_alloc(RUSAGE) as a: + c_getrusage(RUSAGE_SELF, a) + result = (decode_timeval(a.c_ru_utime) + + decode_timeval(a.c_ru_stime)) + return result + +# _______________________________________________________________ +# time.sleep() + +if _WIN32: + Sleep = external('Sleep', [rffi.ULONG], lltype.Void) +else: + c_select = external('select', [rffi.INT, rffi.VOIDP, + rffi.VOIDP, rffi.VOIDP, + lltype.Ptr(TIMEVAL)], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + +@replace_time_function('sleep') +def sleep(secs): + if _WIN32: + millisecs = secs * 1000.0 + while millisecs > UINT_MAX: + Sleep(UINT_MAX) + millisecs -= UINT_MAX + Sleep(rffi.cast(rffi.ULONG, int(millisecs))) + else: + void = lltype.nullptr(rffi.VOIDP.TO) + with lltype.scoped_alloc(TIMEVAL) as t: + frac = math.fmod(secs, 1.0) + rffi.setintfield(t, 'c_tv_sec', int(secs)) + rffi.setintfield(t, 'c_tv_usec', int(frac*1000000.0)) + + if rffi.cast(rffi.LONG, c_select(0, void, void, void, t)) != 0: + errno = rposix.get_saved_errno() + if errno != EINTR: + raise OSError(errno, "Select failed") diff -Nru pypy-4.0.1+dfsg/rpython/rlib/runicode.py pypy-5.0.1+dfsg/rpython/rlib/runicode.py --- pypy-4.0.1+dfsg/rpython/rlib/runicode.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/runicode.py 2016-03-19 16:40:12.000000000 +0000 @@ -1709,7 +1709,9 @@ rffi.cast(lltype.Bool, used_default_p[0])): errorhandler('strict', 'mbcs', "invalid character", s, 0, 0) - return buf.str(mbcssize) + result = buf.str(mbcssize) + assert result is not None + return result finally: if used_default_p: lltype.free(used_default_p, flavor='raw') diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rurandom.py pypy-5.0.1+dfsg/rpython/rlib/rurandom.py --- pypy-4.0.1+dfsg/rpython/rlib/rurandom.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rurandom.py 2016-03-19 16:40:12.000000000 +0000 @@ -86,27 +86,29 @@ else: # Posix implementation def init_urandom(): """NOT_RPYTHON - Return an array of one int, initialized to 0. - It is filled automatically the first time urandom() is called. """ - return lltype.malloc(rffi.CArray(lltype.Signed), 1, - immortal=True, zero=True) + return None def urandom(context, n): "Read n bytes from /dev/urandom." result = '' if n == 0: return result - if not context[0]: - context[0] = os.open("/dev/urandom", os.O_RDONLY, 0777) - while n > 0: - try: - data = os.read(context[0], n) - except OSError, e: - if e.errno != errno.EINTR: - raise - data = '' - result += data - n -= len(data) + # XXX should somehow cache the file descriptor. It's a mess. + # CPython has a 99% solution and hopes for the remaining 1% + # not to occur. For now, we just don't cache the file + # descriptor (any more... 6810f401d08e). + fd = os.open("/dev/urandom", os.O_RDONLY, 0777) + try: + while n > 0: + try: + data = os.read(fd, n) + except OSError, e: + if e.errno != errno.EINTR: + raise + data = '' + result += data + n -= len(data) + finally: + os.close(fd) return result - diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rvmprof/cintf.py pypy-5.0.1+dfsg/rpython/rlib/rvmprof/cintf.py --- pypy-4.0.1+dfsg/rpython/rlib/rvmprof/cintf.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rvmprof/cintf.py 2016-03-19 16:40:15.000000000 +0000 @@ -5,41 +5,35 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.tool import rffi_platform as platform - -from rpython.jit.backend import detect_cpu +from rpython.rlib import rthread class VMProfPlatformUnsupported(Exception): pass -def setup(): - if not detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64): - raise VMProfPlatformUnsupported("rvmprof only supports" - " x86-64 CPUs for now") - - - ROOT = py.path.local(rpythonroot).join('rpython', 'rlib', 'rvmprof') - SRC = ROOT.join('src') - - - if sys.platform.startswith('linux'): - libs = ['dl'] - else: - libs = [] - - eci_kwds = dict( - include_dirs = [SRC], - includes = ['rvmprof.h'], - libraries = libs, - separate_module_files = [SRC.join('rvmprof.c')], - post_include_bits=['#define RPYTHON_VMPROF\n'], - ) - eci = ExternalCompilationInfo(**eci_kwds) +ROOT = py.path.local(rpythonroot).join('rpython', 'rlib', 'rvmprof') +SRC = ROOT.join('src') + +if sys.platform.startswith('linux'): + _libs = ['dl'] +else: + _libs = [] +eci_kwds = dict( + include_dirs = [SRC], + includes = ['rvmprof.h', 'vmprof_stack.h'], + libraries = _libs, + separate_module_files = [SRC.join('rvmprof.c')], + post_include_bits=['#define RPYTHON_VMPROF\n'], + ) +global_eci = ExternalCompilationInfo(**eci_kwds) + +def setup(): + compile_extra = ['-DRPYTHON_LL2CTYPES'] platform.verify_eci(ExternalCompilationInfo( - compile_extra=['-DRPYTHON_LL2CTYPES'], + compile_extra=compile_extra, **eci_kwds)) - + eci = global_eci vmprof_init = rffi.llexternal("vmprof_init", [rffi.INT, rffi.DOUBLE, rffi.CCHARP], rffi.CCHARP, compilation_info=eci) @@ -55,7 +49,8 @@ rffi.INT, compilation_info=eci) vmprof_ignore_signals = rffi.llexternal("vmprof_ignore_signals", [rffi.INT], lltype.Void, - compilation_info=eci) + compilation_info=eci, + _nowrapper=True) return CInterface(locals()) @@ -67,112 +62,34 @@ def _freeze_(self): return True -def token2lltype(tok): - if tok == 'i': - return lltype.Signed - if tok == 'r': - return llmemory.GCREF - raise NotImplementedError(repr(tok)) - -def make_trampoline_function(name, func, token, restok): - from rpython.jit.backend import detect_cpu - - cont_name = 'rpyvmprof_f_%s_%s' % (name, token) - tramp_name = 'rpyvmprof_t_%s_%s' % (name, token) - orig_tramp_name = tramp_name - - func.c_name = cont_name - func._dont_inline_ = True - - if sys.platform == 'darwin': - # according to internet "At the time UNIX was written in 1974...." - # "... all C functions are prefixed with _" - cont_name = '_' + cont_name - tramp_name = '_' + tramp_name - PLT = "" - size_decl = "" - type_decl = "" - extra_align = "" - else: - PLT = "@PLT" - type_decl = "\t.type\t%s, @function" % (tramp_name,) - size_decl = "\t.size\t%s, .-%s" % ( - tramp_name, tramp_name) - extra_align = "\t.cfi_def_cfa_offset 8" - - assert detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64), ( - "rvmprof only supports x86-64 CPUs for now") - - # mapping of argument count (not counting the final uid argument) to - # the register that holds this uid argument - reg = {0: '%rdi', - 1: '%rsi', - 2: '%rdx', - 3: '%rcx', - 4: '%r8', - 5: '%r9', - } - try: - reg = reg[len(token)] - except KeyError: - raise NotImplementedError( - "not supported: %r takes more than 5 arguments" % (func,)) - - target = udir.join('module_cache') - target.ensure(dir=1) - target = target.join('trampoline_%s_%s.vmprof.s' % (name, token)) - # NOTE! the tabs in this file are absolutely essential, things - # that don't start with \t are silently ignored (: WAT!?) - target.write("""\ -\t.text -\t.globl\t%(tramp_name)s -%(type_decl)s -%(tramp_name)s: -\t.cfi_startproc -\tpushq\t%(reg)s -\t.cfi_def_cfa_offset 16 -\tcall %(cont_name)s%(PLT)s -\taddq\t$8, %%rsp -%(extra_align)s -\tret -\t.cfi_endproc -%(size_decl)s -""" % locals()) - - def tok2cname(tok): - if tok == 'i': - return 'long' - if tok == 'r': - return 'void *' - raise NotImplementedError(repr(tok)) - - header = 'RPY_EXTERN %s %s(%s);\n' % ( - tok2cname(restok), - orig_tramp_name, - ', '.join([tok2cname(tok) for tok in token] + ['long'])) - - header += """\ -static int cmp_%s(void *addr) { - if (addr == %s) return 1; -#ifdef VMPROF_ADDR_OF_TRAMPOLINE - return VMPROF_ADDR_OF_TRAMPOLINE(addr); -#undef VMPROF_ADDR_OF_TRAMPOLINE -#else - return 0; -#endif -#define VMPROF_ADDR_OF_TRAMPOLINE cmp_%s -} -""" % (tramp_name, orig_tramp_name, tramp_name) - - eci = ExternalCompilationInfo( - post_include_bits = [header], - separate_module_files = [str(target)], - ) - return rffi.llexternal( - orig_tramp_name, - [token2lltype(tok) for tok in token] + [lltype.Signed], - token2lltype(restok), - compilation_info=eci, - _nowrapper=True, sandboxsafe=True, - random_effects_on_gcobjs=True) +# --- copy a few declarations from src/vmprof_stack.h --- + +VMPROF_CODE_TAG = 1 + +VMPROFSTACK = lltype.ForwardReference() +PVMPROFSTACK = lltype.Ptr(VMPROFSTACK) +VMPROFSTACK.become(rffi.CStruct("vmprof_stack_s", + ('next', PVMPROFSTACK), + ('value', lltype.Signed), + ('kind', lltype.Signed))) +# ---------- + + +vmprof_tl_stack = rthread.ThreadLocalField(PVMPROFSTACK, "vmprof_tl_stack") +do_use_eci = rffi.llexternal_use_eci( + ExternalCompilationInfo(includes=['vmprof_stack.h'], + include_dirs = [SRC])) + +def enter_code(unique_id): + do_use_eci() + s = lltype.malloc(VMPROFSTACK, flavor='raw') + s.c_next = vmprof_tl_stack.get_or_make_raw() + s.c_value = unique_id + s.c_kind = VMPROF_CODE_TAG + vmprof_tl_stack.setraw(s) + return s + +def leave_code(s): + vmprof_tl_stack.setraw(s.c_next) + lltype.free(s, flavor='raw') diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rvmprof/rvmprof.py pypy-5.0.1+dfsg/rpython/rlib/rvmprof/rvmprof.py --- pypy-4.0.1+dfsg/rpython/rlib/rvmprof/rvmprof.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rvmprof/rvmprof.py 2016-03-19 16:40:12.000000000 +0000 @@ -4,12 +4,19 @@ from rpython.rlib.rvmprof import cintf from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import rffi, llmemory +from rpython.rtyper.lltypesystem.lloperation import llop MAX_FUNC_NAME = 1023 # ____________________________________________________________ +# keep in sync with vmprof_stack.h +VMPROF_CODE_TAG = 1 +VMPROF_BLACKHOLE_TAG = 2 +VMPROF_JITTED_TAG = 3 +VMPROF_JITTING_TAG = 4 +VMPROF_GC_TAG = 5 class VMProfError(Exception): def __init__(self, msg): @@ -19,17 +26,16 @@ class VMProf(object): + _immutable_fields_ = ['is_enabled?'] + def __init__(self): "NOT_RPYTHON: use _get_vmprof()" self._code_classes = set() self._gather_all_code_objs = lambda: None self._cleanup_() - if sys.maxint == 2147483647: - self._code_unique_id = 0 # XXX this is wrong, it won't work on 32bit - else: - self._code_unique_id = 0x7000000000000000 + self._code_unique_id = 4 self.cintf = cintf.setup() - + def _cleanup_(self): self.is_enabled = False @@ -127,7 +133,6 @@ if self.cintf.vmprof_register_virtual_function(name, uid, 500000) < 0: raise VMProfError("vmprof buffers full! disk full or too slow") - def vmprof_execute_code(name, get_code_fn, result_class=None): """Decorator to be used on the function that interprets a code object. @@ -136,12 +141,7 @@ 'get_code_fn(*args)' is called to extract the code object from the arguments given to the decorated function. - The original function can return None, an integer, or an instance. - In the latter case (only), 'result_class' must be set. - - NOTE: for now, this assumes that the decorated functions only takes - instances or plain integer arguments, and at most 5 of them - (including 'self' if applicable). + 'result_class' is ignored (backward compatibility). """ def decorate(func): try: @@ -149,52 +149,19 @@ except cintf.VMProfPlatformUnsupported: return func - if hasattr(func, 'im_self'): - assert func.im_self is None - func = func.im_func - - def lower(*args): - if len(args) == 0: - return (), "" - ll_args, token = lower(*args[1:]) - ll_arg = args[0] - if isinstance(ll_arg, int): - tok = "i" - else: - tok = "r" - ll_arg = cast_instance_to_gcref(ll_arg) - return (ll_arg,) + ll_args, tok + token - - @specialize.memo() - def get_ll_trampoline(token): - if result_class is None: - restok = "i" - else: - restok = "r" - return cintf.make_trampoline_function(name, func, token, restok) - def decorated_function(*args): - # go through the asm trampoline ONLY if we are translated but not - # being JITted. - # - # If we are not translated, we obviously don't want to go through - # the trampoline because there is no C function it can call. - # # If we are being JITted, we want to skip the trampoline, else the # JIT cannot see through it. - # - if we_are_translated() and not jit.we_are_jitted(): - # if we are translated, call the trampoline + if not jit.we_are_jitted(): unique_id = get_code_fn(*args)._vmprof_unique_id - ll_args, token = lower(*args) - ll_trampoline = get_ll_trampoline(token) - ll_result = ll_trampoline(*ll_args + (unique_id,)) - if result_class is not None: - return cast_base_ptr_to_instance(result_class, ll_result) - else: - return ll_result + x = cintf.enter_code(unique_id) + try: + return func(*args) + finally: + cintf.leave_code(x) else: return func(*args) + decorated_function.__name__ = func.__name__ + '_rvmprof' return decorated_function diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/rvmprof.c pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/rvmprof.c --- pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/rvmprof.c 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/rvmprof.c 2016-03-19 16:40:12.000000000 +0000 @@ -1,23 +1,27 @@ #define _GNU_SOURCE 1 - #ifdef RPYTHON_LL2CTYPES /* only for testing: ll2ctypes sets RPY_EXTERN from the command-line */ -# ifndef RPY_EXTERN -# define RPY_EXTERN RPY_EXPORTED -# endif -# define RPY_EXPORTED extern __attribute__((visibility("default"))) -# define VMPROF_ADDR_OF_TRAMPOLINE(addr) 0 - +#ifndef RPY_EXTERN +#define RPY_EXTERN RPY_EXPORTED +#endif +#ifdef _WIN32 +#define RPY_EXPORTED __declspec(dllexport) #else +#define RPY_EXPORTED extern __attribute__((visibility("default"))) +#endif +#else # include "common_header.h" +# include "structdef.h" +# include "src/threadlocal.h" # include "rvmprof.h" -# ifndef VMPROF_ADDR_OF_TRAMPOLINE -# error "RPython program using rvmprof, but not calling vmprof_execute_code()" -# endif #endif +#if defined(__unix__) || defined(__APPLE__) #include "vmprof_main.h" +#else +#include "vmprof_main_win32.h" +#endif diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/rvmprof.h pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/rvmprof.h --- pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/rvmprof.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/rvmprof.h 2016-03-19 16:40:12.000000000 +0000 @@ -4,3 +4,7 @@ RPY_EXTERN int vmprof_enable(void); RPY_EXTERN int vmprof_disable(void); RPY_EXTERN int vmprof_register_virtual_function(char *, long, int); +RPY_EXTERN void* vmprof_stack_new(void); +RPY_EXTERN int vmprof_stack_append(void*, long); +RPY_EXTERN long vmprof_stack_pop(void*); +RPY_EXTERN void vmprof_stack_free(void*); diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_common.h pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_common.h --- pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_common.h 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_common.h 2016-03-19 16:40:15.000000000 +0000 @@ -0,0 +1,121 @@ +#include + +#define MAX_FUNC_NAME 1024 + +static int profile_file = -1; +static long prepare_interval_usec = 0; +static long profile_interval_usec = 0; +static int opened_profile(char *interp_name); + +#define MARKER_STACKTRACE '\x01' +#define MARKER_VIRTUAL_IP '\x02' +#define MARKER_TRAILER '\x03' +#define MARKER_INTERP_NAME '\x04' /* deprecated */ +#define MARKER_HEADER '\x05' + +#define VERSION_BASE '\x00' +#define VERSION_THREAD_ID '\x01' +#define VERSION_TAG '\x02' + +#define MAX_STACK_DEPTH \ + ((SINGLE_BUF_SIZE - sizeof(struct prof_stacktrace_s)) / sizeof(void *)) + +typedef struct prof_stacktrace_s { + char padding[sizeof(long) - 1]; + char marker; + long count, depth; + void *stack[]; +} prof_stacktrace_s; + + +RPY_EXTERN +char *vmprof_init(int fd, double interval, char *interp_name) +{ + if (interval < 1e-6 || interval >= 1.0) + return "bad value for 'interval'"; + prepare_interval_usec = (int)(interval * 1000000.0); + + if (prepare_concurrent_bufs() < 0) + return "out of memory"; + + assert(fd >= 0); + profile_file = fd; + if (opened_profile(interp_name) < 0) { + profile_file = -1; + return strerror(errno); + } + return NULL; +} + +static int _write_all(const char *buf, size_t bufsize); + +static int opened_profile(char *interp_name) +{ + struct { + long hdr[5]; + char interp_name[259]; + } header; + + size_t namelen = strnlen(interp_name, 255); + + header.hdr[0] = 0; + header.hdr[1] = 3; + header.hdr[2] = 0; + header.hdr[3] = prepare_interval_usec; + header.hdr[4] = 0; + header.interp_name[0] = MARKER_HEADER; + header.interp_name[1] = '\x00'; + header.interp_name[2] = VERSION_TAG; + header.interp_name[3] = namelen; + memcpy(&header.interp_name[4], interp_name, namelen); + return _write_all((char*)&header, 5 * sizeof(long) + 4 + namelen); +} + +/* ************************************************************* + * functions to dump the stack trace + * ************************************************************* + */ + + +static int get_stack_trace(vmprof_stack_t* stack, intptr_t *result, int max_depth, intptr_t pc) +{ + int n = 0; + intptr_t addr = 0; + int bottom_jitted = 0; + // check if the pc is in JIT +#ifdef PYPY_JIT_CODEMAP + if (pypy_find_codemap_at_addr((intptr_t)pc, &addr)) { + // the bottom part is jitted, means we can fill up the first part + // from the JIT + n = vmprof_write_header_for_jit_addr(result, n, pc, max_depth); + stack = stack->next; // skip the first item as it contains garbage + } +#endif + while (n < max_depth - 1 && stack) { + if (stack->kind == VMPROF_CODE_TAG) { + result[n] = stack->kind; + result[n + 1] = stack->value; + n += 2; + } +#ifdef PYPY_JIT_CODEMAP + else if (stack->kind == VMPROF_JITTED_TAG) { + pc = ((intptr_t*)(stack->value - sizeof(intptr_t)))[0]; + n = vmprof_write_header_for_jit_addr(result, n, pc, max_depth); + } +#endif + stack = stack->next; + } + return n; +} + +#ifndef RPYTHON_LL2CTYPES +static vmprof_stack_t *get_vmprof_stack(void) +{ + return RPY_THREADLOCALREF_GET(vmprof_tl_stack); +} +#else +static vmprof_stack_t *get_vmprof_stack(void) +{ + return 0; +} +#endif diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h --- pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h 2016-03-19 16:40:12.000000000 +0000 @@ -1,119 +1,49 @@ -#ifdef PYPY_JIT_CODEMAP void *pypy_find_codemap_at_addr(long addr, long *start_addr); long pypy_yield_codemap_at_addr(void *codemap_raw, long addr, long *current_pos_addr); long pypy_jit_stack_depth_at_loc(long loc); -#endif - - -#ifdef CPYTHON_GET_CUSTOM_OFFSET -static void *tramp_start, *tramp_end; -#endif - - -static ptrdiff_t vmprof_unw_get_custom_offset(void* ip, void *cp) { - -#if defined(PYPY_JIT_CODEMAP) - - intptr_t ip_l = (intptr_t)ip; - return pypy_jit_stack_depth_at_loc(ip_l); -#elif defined(CPYTHON_GET_CUSTOM_OFFSET) - if (ip >= tramp_start && ip <= tramp_end) { - // XXX the return value is wrong for all the places before push and - // after pop, fix - void *bp; - void *sp; - - /* This is a stage2 trampoline created by hotpatch: - - push %rbx - push %rbp - mov %rsp,%rbp - and $0xfffffffffffffff0,%rsp // make sure the stack is aligned - movabs $0x7ffff687bb10,%rbx - callq *%rbx - leaveq - pop %rbx - retq - - the stack layout is like this: - - +-----------+ high addresses - | ret addr | - +-----------+ - | saved rbx | start of the function frame - +-----------+ - | saved rbp | - +-----------+ - | ........ | <-- rbp - +-----------+ low addresses - - So, the trampoline frame starts at rbp+16, and the return address, - is at rbp+24. The vmprof API requires us to return the offset of - the frame relative to sp, hence we have this weird computation. - - XXX (antocuni): I think we could change the API to return directly - the frame address instead of the offset; however, this require a - change in the PyPy code too - */ - - unw_get_reg (cp, UNW_REG_SP, (unw_word_t*)&sp); - unw_get_reg (cp, UNW_X86_64_RBP, (unw_word_t*)&bp); - return bp+16+8-sp; - } - return -1; - -#else - - return -1; - -#endif -} - -static long vmprof_write_header_for_jit_addr(void **result, long n, - void *ip, int max_depth) +static long vmprof_write_header_for_jit_addr(intptr_t *result, long n, + intptr_t ip, int max_depth) { #ifdef PYPY_JIT_CODEMAP void *codemap; long current_pos = 0; - intptr_t id; + intptr_t ident; long start_addr = 0; intptr_t addr = (intptr_t)ip; int start, k; - void *tmp; + intptr_t tmp; codemap = pypy_find_codemap_at_addr(addr, &start_addr); - if (codemap == NULL) - // not a jit code at all + if (codemap == NULL || n >= max_depth - 2) + // not a jit code at all or almost max depth return n; // modify the last entry to point to start address and not the random one // in the middle - result[n - 1] = (void*)start_addr; - result[n] = (void*)2; - n++; + result[n++] = VMPROF_ASSEMBLER_TAG; + result[n++] = start_addr; start = n; while (n < max_depth) { - id = pypy_yield_codemap_at_addr(codemap, addr, ¤t_pos); - if (id == -1) + ident = pypy_yield_codemap_at_addr(codemap, addr, ¤t_pos); + if (ident == -1) // finish break; - if (id == 0) + if (ident == 0) continue; // not main codemap - result[n++] = (void *)id; + result[n++] = VMPROF_JITTED_TAG; + result[n++] = ident; } - k = 0; + k = 1; + while (k < (n - start) / 2) { tmp = result[start + k]; - result[start + k] = result[n - k - 1]; - result[n - k - 1] = tmp; - k++; - } - if (n < max_depth) { - result[n++] = (void*)3; + result[start + k] = result[n - k]; + result[n - k] = tmp; + k += 2; } #endif return n; diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_getpc.h pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_getpc.h --- pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_getpc.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_getpc.h 2016-03-19 16:40:15.000000000 +0000 @@ -54,6 +54,7 @@ // It will cause problems for FreeBSD though!, because it turns off // the needed __BSD_VISIBLE. #ifdef __APPLE__ +#include #define _XOPEN_SOURCE 500 #endif @@ -111,47 +112,9 @@ // PC_FROM_UCONTEXT in config.h. The only thing we need to do here, // then, is to do the magic call-unrolling for systems that support it. -// -- Special case 1: linux x86, for which we have CallUnrollInfo #if defined(__linux) && defined(__i386) && defined(__GNUC__) -static const CallUnrollInfo callunrollinfo[] = { - // Entry to a function: push %ebp; mov %esp,%ebp - // Top-of-stack contains the caller IP. - { 0, - {0x55, 0x89, 0xe5}, 3, - 0 - }, - // Entry to a function, second instruction: push %ebp; mov %esp,%ebp - // Top-of-stack contains the old frame, caller IP is +4. - { -1, - {0x55, 0x89, 0xe5}, 3, - 4 - }, - // Return from a function: RET. - // Top-of-stack contains the caller IP. - { 0, - {0xc3}, 1, - 0 - } -}; - -void* GetPC(ucontext_t *signal_ucontext) { - // See comment above struct CallUnrollInfo. Only try instruction - // flow matching if both eip and esp looks reasonable. - const int eip = signal_ucontext->uc_mcontext.gregs[REG_EIP]; - const int esp = signal_ucontext->uc_mcontext.gregs[REG_ESP]; - if ((eip & 0xffff0000) != 0 && (~eip & 0xffff0000) != 0 && - (esp & 0xffff0000) != 0) { - char* eip_char = reinterpret_cast(eip); - for (int i = 0; i < sizeof(callunrollinfo)/sizeof(*callunrollinfo); ++i) { - if (!memcmp(eip_char + callunrollinfo[i].pc_offset, - callunrollinfo[i].ins, callunrollinfo[i].ins_size)) { - // We have a match. - void **retaddr = (void**)(esp + callunrollinfo[i].return_sp_offset); - return *retaddr; - } - } - } - return (void*)eip; +intptr_t GetPC(ucontext_t *signal_ucontext) { + return signal_ucontext->uc_mcontext.gregs[REG_EIP]; } // Special case #2: Windows, which has to do something totally different. @@ -170,7 +133,7 @@ typedef int ucontext_t; #endif -void* GetPC(ucontext_t *signal_ucontext) { +intptr_t GetPC(ucontext_t *signal_ucontext) { RAW_LOG(ERROR, "GetPC is not yet implemented on Windows\n"); return NULL; } @@ -180,11 +143,15 @@ // the right value for your system, and add it to the list in // configure.ac (or set it manually in your config.h). #else -void* GetPC(ucontext_t *signal_ucontext) { +intptr_t GetPC(ucontext_t *signal_ucontext) { #ifdef __APPLE__ - return (void*)(signal_ucontext->uc_mcontext->__ss.__rip); +#if ((ULONG_MAX) == (UINT_MAX)) + return (signal_ucontext->uc_mcontext->__ss.__eip); +#else + return (signal_ucontext->uc_mcontext->__ss.__rip); +#endif #else - return (void*)signal_ucontext->PC_FROM_UCONTEXT; // defined in config.h + return signal_ucontext->PC_FROM_UCONTEXT; // defined in config.h #endif } diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_main.h pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_main.h --- pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_main.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_main.h 2016-03-19 16:40:12.000000000 +0000 @@ -25,84 +25,29 @@ #include #include #include +#include #include #include #include #include +#include #include +#include "vmprof_stack.h" #include "vmprof_getpc.h" -#ifdef __APPLE__ -#include "libunwind.h" -#else -#include "vmprof_unwind.h" -#endif #include "vmprof_mt.h" - +#include "vmprof_get_custom_offset.h" +#include "vmprof_common.h" /************************************************************/ -// functions copied from libunwind using dlopen - -#ifndef __APPLE__ // should be linux only probably -static int (*unw_get_reg)(unw_cursor_t*, int, unw_word_t*) = NULL; -static int (*unw_step)(unw_cursor_t*) = NULL; -static int (*unw_init_local)(unw_cursor_t *, unw_context_t *) = NULL; -static int (*unw_get_proc_info)(unw_cursor_t *, unw_proc_info_t *) = NULL; -#endif - -static int profile_file = -1; static long prepare_interval_usec; +static long saved_profile_file; static struct profbuf_s *volatile current_codes; static void *(*mainloop_get_virtual_ip)(char *) = 0; static int opened_profile(char *interp_name); static void flush_codes(void); -#ifdef __APPLE__ -#define UNWIND_NAME "/usr/lib/system/libunwind.dylib" -#define UNW_PREFIX "unw" -#else -#define UNWIND_NAME "libunwind.so" -#define UNW_PREFIX "_ULx86_64" -#endif - -RPY_EXTERN -char *vmprof_init(int fd, double interval, char *interp_name) -{ - if (interval < 1e-6 || interval >= 1.0) - return "bad value for 'interval'"; - prepare_interval_usec = (int)(interval * 1000000.0); - -#ifndef __APPLE__ - if (!unw_get_reg) { - void *libhandle; - - if (!(libhandle = dlopen(UNWIND_NAME, RTLD_LAZY | RTLD_LOCAL))) - goto error; - if (!(unw_get_reg = dlsym(libhandle, UNW_PREFIX "_get_reg"))) - goto error; - if (!(unw_get_proc_info = dlsym(libhandle, UNW_PREFIX "_get_proc_info"))) - goto error; - if (!(unw_init_local = dlsym(libhandle, UNW_PREFIX "_init_local"))) - goto error; - if (!(unw_step = dlsym(libhandle, UNW_PREFIX "_step"))) - goto error; - } -#endif - if (prepare_concurrent_bufs() < 0) - return "out of memory"; - - assert(fd >= 0); - profile_file = fd; - if (opened_profile(interp_name) < 0) { - profile_file = -1; - return strerror(errno); - } - return NULL; - - error: - return dlerror(); -} /************************************************************/ @@ -131,131 +76,10 @@ * ************************************************************* */ -#define MAX_FUNC_NAME 128 -#define MAX_STACK_DEPTH \ - ((SINGLE_BUF_SIZE - sizeof(struct prof_stacktrace_s)) / sizeof(void *)) - -#define MARKER_STACKTRACE '\x01' -#define MARKER_VIRTUAL_IP '\x02' -#define MARKER_TRAILER '\x03' -#define MARKER_INTERP_NAME '\x04' /* deprecated */ -#define MARKER_HEADER '\x05' - -#define VERSION_BASE '\x00' -#define VERSION_THREAD_ID '\x01' - -struct prof_stacktrace_s { - char padding[sizeof(long) - 1]; - char marker; - long count, depth; - void *stack[]; -}; - -static long profile_interval_usec = 0; static char atfork_hook_installed = 0; -/* ****************************************************** - * libunwind workaround for process JIT frames correctly - * ****************************************************** - */ - -#include "vmprof_get_custom_offset.h" - -typedef struct { - void* _unused1; - void* _unused2; - void* sp; - void* ip; - void* _unused3[sizeof(unw_cursor_t)/sizeof(void*) - 4]; -} vmprof_hacked_unw_cursor_t; - -static int vmprof_unw_step(unw_cursor_t *cp, int first_run) -{ - void* ip; - void* sp; - ptrdiff_t sp_offset; - unw_get_reg (cp, UNW_REG_IP, (unw_word_t*)&ip); - unw_get_reg (cp, UNW_REG_SP, (unw_word_t*)&sp); - if (!first_run) { - // make sure we're pointing to the CALL and not to the first - // instruction after. If the callee adjusts the stack for us - // it's not safe to be at the instruction after - ip -= 1; - } - sp_offset = vmprof_unw_get_custom_offset(ip, cp); - - if (sp_offset == -1) { - // it means that the ip is NOT in JITted code, so we can use the - // stardard unw_step - return unw_step(cp); - } - else { - // this is a horrible hack to manually walk the stack frame, by - // setting the IP and SP in the cursor - vmprof_hacked_unw_cursor_t *cp2 = (vmprof_hacked_unw_cursor_t*)cp; - void* bp = (void*)sp + sp_offset; - cp2->sp = bp; - bp -= sizeof(void*); - cp2->ip = ((void**)bp)[0]; - // the ret is on the top of the stack minus WORD - return 1; - } -} - - -/* ************************************************************* - * functions to dump the stack trace - * ************************************************************* - */ - -static int get_stack_trace(void** result, int max_depth, ucontext_t *ucontext) -{ - void *ip; - int n = 0; - unw_cursor_t cursor; -#ifdef __APPLE__ - unw_context_t uc; - unw_getcontext(&uc); -#else - unw_context_t uc = *ucontext; -#endif - - int ret = unw_init_local(&cursor, &uc); - assert(ret >= 0); - (void)ret; - - while (n < max_depth) { - if (unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *) &ip) < 0) { - break; - } - - unw_proc_info_t pip; - unw_get_proc_info(&cursor, &pip); - - /* if n==0, it means that the signal handler interrupted us while we - were in the trampoline, so we are not executing (yet) the real main - loop function; just skip it */ - if (VMPROF_ADDR_OF_TRAMPOLINE((void*)pip.start_ip) && n > 0) { - // found main loop stack frame - void* sp; - unw_get_reg(&cursor, UNW_REG_SP, (unw_word_t *) &sp); - if (mainloop_get_virtual_ip) - ip = mainloop_get_virtual_ip((char *)sp); - else - ip = *(void **)sp; - } - - int first_run = (n == 0); - result[n++] = ip; - n = vmprof_write_header_for_jit_addr(result, n, ip, max_depth); - if (vmprof_unw_step(&cursor, first_run) <= 0) - break; - } - return n; -} - -static void *get_current_thread_id(void) +static intptr_t get_current_thread_id(void) { /* xxx This function is a hack on two fronts: @@ -269,7 +93,7 @@ An alternative would be to try to look if the information is available in the ucontext_t in the caller. */ - return (void *)pthread_self(); + return (intptr_t)pthread_self(); } @@ -278,8 +102,43 @@ * ************************************************************* */ +#include + +volatile int spinlock; +jmp_buf restore_point; + +static void segfault_handler(int arg) +{ + longjmp(restore_point, SIGSEGV); +} + static void sigprof_handler(int sig_nr, siginfo_t* info, void *ucontext) { +#ifdef __APPLE__ + // TERRIBLE HACK AHEAD + // on OS X, the thread local storage is sometimes uninitialized + // when the signal handler runs - it means it's impossible to read errno + // or call any syscall or read PyThread_Current or pthread_self. Additionally, + // it seems impossible to read the register gs. + // here we register segfault handler (all guarded by a spinlock) and call + // longjmp in case segfault happens while reading a thread local + while (__sync_lock_test_and_set(&spinlock, 1)) { + } + signal(SIGSEGV, &segfault_handler); + int fault_code = setjmp(restore_point); + if (fault_code == 0) { + pthread_self(); + get_current_thread_id(); + } else { + signal(SIGSEGV, SIG_DFL); + __sync_synchronize(); + spinlock = 0; + return; + } + signal(SIGSEGV, SIG_DFL); + __sync_synchronize(); + spinlock = 0; +#endif long val = __sync_fetch_and_add(&signal_handler_value, 2L); if ((val & 1) == 0) { @@ -296,9 +155,8 @@ struct prof_stacktrace_s *st = (struct prof_stacktrace_s *)p->data; st->marker = MARKER_STACKTRACE; st->count = 1; - st->stack[0] = GetPC((ucontext_t*)ucontext); - depth = get_stack_trace(st->stack+1, MAX_STACK_DEPTH-2, ucontext); - depth++; // To account for pc value in stack[0]; + depth = get_stack_trace(get_vmprof_stack(), st->stack, + MAX_STACK_DEPTH-2, GetPC((ucontext_t*)ucontext)); st->depth = depth; st->stack[depth++] = get_current_thread_id(); p->data_offset = offsetof(struct prof_stacktrace_s, marker); @@ -363,12 +221,15 @@ static void atfork_disable_timer(void) { if (profile_interval_usec > 0) { + saved_profile_file = profile_file; + profile_file = -1; remove_sigprof_timer(); } } static void atfork_enable_timer(void) { if (profile_interval_usec > 0) { + profile_file = saved_profile_file; install_sigprof_timer(); } } @@ -415,7 +276,7 @@ return -1; } -static int _write_all(const void *buf, size_t bufsize) +static int _write_all(const char *buf, size_t bufsize) { while (bufsize > 0) { ssize_t count = write(profile_file, buf, bufsize); @@ -427,71 +288,13 @@ return 0; } -static int opened_profile(char *interp_name) -{ - struct { - long hdr[5]; - char interp_name[259]; - } header; - - size_t namelen = strnlen(interp_name, 255); - current_codes = NULL; - - header.hdr[0] = 0; - header.hdr[1] = 3; - header.hdr[2] = 0; - header.hdr[3] = prepare_interval_usec; - header.hdr[4] = 0; - header.interp_name[0] = MARKER_HEADER; - header.interp_name[1] = '\x00'; - header.interp_name[2] = VERSION_THREAD_ID; - header.interp_name[3] = namelen; - memcpy(&header.interp_name[4], interp_name, namelen); - return _write_all(&header, 5 * sizeof(long) + 4 + namelen); -} - static int close_profile(void) { - char buf[4096]; - ssize_t size; unsigned char marker = MARKER_TRAILER; if (_write_all(&marker, 1) < 0) return -1; -#ifdef __linux__ - // copy /proc/self/maps to the end of the profile file - int srcfd = open("/proc/self/maps", O_RDONLY); - if (srcfd < 0) - return -1; - - while ((size = read(srcfd, buf, sizeof buf)) > 0) { - if (_write_all(buf, size) < 0) { - close(srcfd); - return -1; - } - } - close(srcfd); -#else - // freebsd and mac -#if defined(__APPLE__) - sprintf(buf, "vmmap %d", getpid()); -#else - sprintf(buf, "procstat -v %d", getpid()); -#endif - FILE *srcf = popen(buf, "r"); - if (!srcf) - return -1; - - while ((size = fread(buf, 1, sizeof buf, srcf))) { - if (_write_all(buf, size) < 0) { - pclose(srcf); - return -1; - } - } - pclose(srcf); -#endif - /* don't close() the file descriptor from here */ profile_file = -1; return 0; @@ -522,6 +325,9 @@ struct profbuf_s *p; char *t; + if (profile_file == -1) + return 0; // silently don't write it + retry: p = current_codes; if (p != NULL) { @@ -529,7 +335,7 @@ /* grabbed 'current_codes': we will append the current block to it if it contains enough room */ size_t freesize = SINGLE_BUF_SIZE - p->data_size; - if (freesize < blocklen) { + if (freesize < (size_t)blocklen) { /* full: flush it */ commit_buffer(profile_file, p); p = NULL; diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_main_win32.h pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_main_win32.h --- pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_main_win32.h 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_main_win32.h 2016-03-19 16:40:15.000000000 +0000 @@ -0,0 +1,174 @@ + +#include "windows.h" + +HANDLE write_mutex; + +int prepare_concurrent_bufs(void) +{ + if (!(write_mutex = CreateMutex(NULL, FALSE, NULL))) + return -1; + return 0; +} + +#if defined(_MSC_VER) +#include +typedef SSIZE_T ssize_t; +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "vmprof_stack.h" +#include "vmprof_get_custom_offset.h" +#include "vmprof_common.h" +#include + +// This file has been inspired (but not copied from since the LICENSE +// would not allow it) from verysleepy profiler + +#define SINGLE_BUF_SIZE 8192 + +volatile int thread_started = 0; +volatile int enabled = 0; + +static int _write_all(const char *buf, size_t bufsize) +{ + int res; + res = WaitForSingleObject(write_mutex, INFINITE); + if (profile_file == -1) { + ReleaseMutex(write_mutex); + return -1; + } + while (bufsize > 0) { + ssize_t count = write(profile_file, buf, bufsize); + if (count <= 0) { + ReleaseMutex(write_mutex); + return -1; /* failed */ + } + buf += count; + bufsize -= count; + } + ReleaseMutex(write_mutex); + return 0; +} + +RPY_EXTERN +int vmprof_register_virtual_function(char *code_name, long code_uid, + int auto_retry) +{ + char buf[2048]; + int namelen = strnlen(code_name, 1023); + buf[0] = MARKER_VIRTUAL_IP; + *(long*)(buf + 1) = code_uid; + *(long*)(buf + 1 + sizeof(long)) = namelen; + memcpy(buf + 1 + 2 * sizeof(long), code_name, namelen); + _write_all(buf, namelen + 2 * sizeof(long) + 1); + return 0; +} + +int vmprof_snapshot_thread(struct pypy_threadlocal_s *p, prof_stacktrace_s *stack) +{ + void *addr; + vmprof_stack_t *cur; + long tid; + HANDLE hThread; + long depth; + DWORD result; + CONTEXT ctx; + +#ifdef RPYTHON_LL2CTYPES + return 0; // not much we can do +#else +#ifndef RPY_TLOFS_thread_ident + return 0; // we can't freeze threads, unsafe +#else + hThread = OpenThread(THREAD_ALL_ACCESS, FALSE, p->thread_ident); + if (!hThread) { + return -1; + } + result = SuspendThread(hThread); + if(result == 0xffffffff) + return -1; // possible, e.g. attached debugger or thread alread suspended + ctx.ContextFlags = CONTEXT_FULL; + if (!GetThreadContext(hThread, &ctx)) + return -1; + depth = get_stack_trace(p->vmprof_tl_stack, + stack->stack, MAX_STACK_DEPTH-2, ctx.Eip); + stack->depth = depth; + stack->stack[depth++] = (void*)p->thread_ident; + stack->count = 1; + stack->marker = MARKER_STACKTRACE; + ResumeThread(hThread); + return depth; +#endif +#endif +} + +long __stdcall vmprof_mainloop(void *arg) +{ +#ifndef RPYTHON_LL2CTYPES + struct pypy_threadlocal_s *p; + prof_stacktrace_s *stack = (prof_stacktrace_s*)malloc(SINGLE_BUF_SIZE); + int depth; + + while (1) { + //Sleep(profile_interval_usec * 1000); + Sleep(10); + if (!enabled) { + continue; + } + _RPython_ThreadLocals_Acquire(); + p = _RPython_ThreadLocals_Head(); // the first one is one behind head + p = _RPython_ThreadLocals_Enum(p); + while (p) { + if (p->ready == 42) { + depth = vmprof_snapshot_thread(p, stack); + if (depth > 0) { + _write_all((char*)stack + offsetof(prof_stacktrace_s, marker), + depth * sizeof(void *) + + sizeof(struct prof_stacktrace_s) - + offsetof(struct prof_stacktrace_s, marker)); + } + } + p = _RPython_ThreadLocals_Enum(p); + } + _RPython_ThreadLocals_Release(); + } +#endif +} + +RPY_EXTERN +int vmprof_enable(void) +{ + if (!thread_started) { + if (!CreateThread(NULL, 0, vmprof_mainloop, NULL, 0, NULL)) { + return -1; + } + thread_started = 1; + } + enabled = 1; + return 0; +} + +RPY_EXTERN +int vmprof_disable(void) +{ + char marker = MARKER_TRAILER; + + enabled = 0; + if (_write_all(&marker, 1) < 0) + return -1; + profile_file = -1; + return 0; +} + +RPY_EXTERN +void vmprof_ignore_signals(int ignored) +{ +} diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_stack.h pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_stack.h --- pypy-4.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_stack.h 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rvmprof/src/vmprof_stack.h 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,29 @@ +#ifndef _VMPROF_STACK_H_ +#define _VMPROF_STACK_H_ + +#ifdef _WIN32 +#define intptr_t long // XXX windows VC++ 2008 lacks stdint.h +#else +#include +#endif + +#define VMPROF_CODE_TAG 1 /* <- also in cintf.py */ +#define VMPROF_BLACKHOLE_TAG 2 +#define VMPROF_JITTED_TAG 3 +#define VMPROF_JITTING_TAG 4 +#define VMPROF_GC_TAG 5 +#define VMPROF_ASSEMBLER_TAG 6 +// whatever we want here + +typedef struct vmprof_stack_s { + struct vmprof_stack_s* next; + intptr_t value; + intptr_t kind; +} vmprof_stack_t; + +// the kind is WORD so we consume exactly 3 WORDs and we don't have +// to worry too much. There is a potential for squeezing it with bit +// patterns into one WORD, but I don't want to care RIGHT NOW, potential +// for future optimization potential + +#endif diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rvmprof/test/test_rvmprof.py pypy-5.0.1+dfsg/rpython/rlib/rvmprof/test/test_rvmprof.py --- pypy-4.0.1+dfsg/rpython/rlib/rvmprof/test/test_rvmprof.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rvmprof/test/test_rvmprof.py 2016-03-19 16:40:12.000000000 +0000 @@ -3,6 +3,7 @@ from rpython.rlib import rvmprof from rpython.translator.c.test.test_genc import compile from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.nonconst import NonConstant def test_vmprof_execute_code_1(): @@ -100,13 +101,16 @@ s = 0 for i in range(num): s += (i << 1) - if s % 32423423423 == 0: + if s % 2123423423 == 0: print s return s tmpfilename = str(udir.join('test_rvmprof')) def f(): + if NonConstant(False): + # Hack to give os.open() the correct annotation + os.open('foo', 1, 1) code = MyCode() rvmprof.register_code(code, get_name) fd = os.open(tmpfilename, os.O_WRONLY | os.O_CREAT, 0666) @@ -143,4 +147,3 @@ finally: assert os.path.exists(tmpfilename) os.unlink(tmpfilename) - \ No newline at end of file diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rvmprof/test/test_ztranslation.py pypy-5.0.1+dfsg/rpython/rlib/rvmprof/test/test_ztranslation.py --- pypy-4.0.1+dfsg/rpython/rlib/rvmprof/test/test_ztranslation.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rvmprof/test/test_ztranslation.py 2016-03-19 16:40:12.000000000 +0000 @@ -3,9 +3,9 @@ sys.path += ['../../../..'] # for subprocess in test_interpreted import py from rpython.tool.udir import udir -from rpython.rlib import rvmprof +from rpython.rlib import rvmprof, rthread from rpython.translator.c.test.test_genc import compile - +from rpython.rlib.nonconst import NonConstant class MyCode: def __init__(self, count): @@ -38,6 +38,10 @@ PROF_FILE = str(udir.join('test_ztranslation.prof')) def main(argv=[]): + rthread.get_ident() # force TLOFS_thread_ident + if NonConstant(False): + # Hack to give os.open() the correct annotation + os.open('foo', 1, 1) code1 = MyCode(6500) fd = os.open(PROF_FILE, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0666) rvmprof.enable(fd, 0.01) @@ -60,8 +64,14 @@ def test_interpreted(): # takes forever if the Python process is already big... import subprocess - subprocess.check_call([sys.executable, os.path.basename(__file__)], - cwd=(os.path.dirname(__file__) or '.')) + me = os.path.basename(__file__) + if me.endswith('pyc') or me.endswith('pyo'): + me = me[:-1] + env = os.environ.copy() + env['PYTHONPATH'] = '' + subprocess.check_call([sys.executable, me], + cwd=(os.path.dirname(__file__) or '.'), + env=env) def test_compiled(): fn = compile(main, [], gcpolicy="minimark") diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rweaklist.py pypy-5.0.1+dfsg/rpython/rlib/rweaklist.py --- pypy-4.0.1+dfsg/rpython/rlib/rweaklist.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rweaklist.py 2016-03-19 16:40:12.000000000 +0000 @@ -5,6 +5,13 @@ class RWeakListMixin(object): + """A mixin base class. A collection that weakly maps indexes to objects. + After an object goes away, its index is marked free and will be reused + by some following add_handle() call. So add_handle() might not append + the object at the end of the list, but can put it anywhere. + + See also rpython.rlib.rshrinklist. + """ _mixin_ = True def initialize(self): diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rwin32file.py pypy-5.0.1+dfsg/rpython/rlib/rwin32file.py --- pypy-4.0.1+dfsg/rpython/rlib/rwin32file.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rwin32file.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,225 @@ +""" +Win32 API functions around files. +""" + +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rtyper.tool import rffi_platform as platform +from rpython.rlib.objectmodel import specialize + +@specialize.memo() +def make_win32_traits(traits): + from rpython.rlib import rwin32 + + if traits.str is unicode: + suffix = 'W' + else: + suffix = 'A' + + class CConfig: + _compilation_info_ = ExternalCompilationInfo( + includes = ['windows.h', 'winbase.h', 'sys/stat.h'], + ) + WIN32_FIND_DATA = platform.Struct( + 'struct _WIN32_FIND_DATA' + suffix, + # Only interesting fields + [('dwFileAttributes', rwin32.DWORD), + ('nFileSizeHigh', rwin32.DWORD), + ('nFileSizeLow', rwin32.DWORD), + ('ftCreationTime', rwin32.FILETIME), + ('ftLastAccessTime', rwin32.FILETIME), + ('ftLastWriteTime', rwin32.FILETIME), + ('cFileName', lltype.FixedSizeArray(traits.CHAR, 250))]) + ERROR_FILE_NOT_FOUND = platform.ConstantInteger( + 'ERROR_FILE_NOT_FOUND') + ERROR_NO_MORE_FILES = platform.ConstantInteger( + 'ERROR_NO_MORE_FILES') + + GetFileExInfoStandard = platform.ConstantInteger( + 'GetFileExInfoStandard') + FILE_ATTRIBUTE_DIRECTORY = platform.ConstantInteger( + 'FILE_ATTRIBUTE_DIRECTORY') + FILE_ATTRIBUTE_READONLY = platform.ConstantInteger( + 'FILE_ATTRIBUTE_READONLY') + INVALID_FILE_ATTRIBUTES = platform.ConstantInteger( + 'INVALID_FILE_ATTRIBUTES') + ERROR_SHARING_VIOLATION = platform.ConstantInteger( + 'ERROR_SHARING_VIOLATION') + _S_IFDIR = platform.ConstantInteger('_S_IFDIR') + _S_IFREG = platform.ConstantInteger('_S_IFREG') + _S_IFCHR = platform.ConstantInteger('_S_IFCHR') + _S_IFIFO = platform.ConstantInteger('_S_IFIFO') + FILE_TYPE_UNKNOWN = platform.ConstantInteger('FILE_TYPE_UNKNOWN') + FILE_TYPE_CHAR = platform.ConstantInteger('FILE_TYPE_CHAR') + FILE_TYPE_PIPE = platform.ConstantInteger('FILE_TYPE_PIPE') + + FILE_WRITE_ATTRIBUTES = platform.ConstantInteger( + 'FILE_WRITE_ATTRIBUTES') + OPEN_EXISTING = platform.ConstantInteger( + 'OPEN_EXISTING') + FILE_FLAG_BACKUP_SEMANTICS = platform.ConstantInteger( + 'FILE_FLAG_BACKUP_SEMANTICS') + VOLUME_NAME_DOS = platform.ConstantInteger('VOLUME_NAME_DOS') + VOLUME_NAME_NT = platform.ConstantInteger('VOLUME_NAME_NT') + + WIN32_FILE_ATTRIBUTE_DATA = platform.Struct( + 'WIN32_FILE_ATTRIBUTE_DATA', + [('dwFileAttributes', rwin32.DWORD), + ('nFileSizeHigh', rwin32.DWORD), + ('nFileSizeLow', rwin32.DWORD), + ('ftCreationTime', rwin32.FILETIME), + ('ftLastAccessTime', rwin32.FILETIME), + ('ftLastWriteTime', rwin32.FILETIME)]) + + BY_HANDLE_FILE_INFORMATION = platform.Struct( + 'BY_HANDLE_FILE_INFORMATION', + [('dwFileAttributes', rwin32.DWORD), + ('ftCreationTime', rwin32.FILETIME), + ('ftLastAccessTime', rwin32.FILETIME), + ('ftLastWriteTime', rwin32.FILETIME), + ('dwVolumeSerialNumber', rwin32.DWORD), + ('nFileSizeHigh', rwin32.DWORD), + ('nFileSizeLow', rwin32.DWORD), + ('nNumberOfLinks', rwin32.DWORD), + ('nFileIndexHigh', rwin32.DWORD), + ('nFileIndexLow', rwin32.DWORD)]) + + config = platform.configure(CConfig) + + def external(*args, **kwargs): + kwargs['compilation_info'] = CConfig._compilation_info_ + llfunc = rffi.llexternal(calling_conv='win', *args, **kwargs) + return staticmethod(llfunc) + + class Win32Traits: + apisuffix = suffix + + for name in '''WIN32_FIND_DATA WIN32_FILE_ATTRIBUTE_DATA BY_HANDLE_FILE_INFORMATION + GetFileExInfoStandard + FILE_ATTRIBUTE_DIRECTORY FILE_ATTRIBUTE_READONLY + INVALID_FILE_ATTRIBUTES + _S_IFDIR _S_IFREG _S_IFCHR _S_IFIFO + FILE_TYPE_UNKNOWN FILE_TYPE_CHAR FILE_TYPE_PIPE + FILE_WRITE_ATTRIBUTES OPEN_EXISTING FILE_FLAG_BACKUP_SEMANTICS + VOLUME_NAME_DOS VOLUME_NAME_NT + ERROR_FILE_NOT_FOUND ERROR_NO_MORE_FILES + ERROR_SHARING_VIOLATION + '''.split(): + locals()[name] = config[name] + LPWIN32_FIND_DATA = lltype.Ptr(WIN32_FIND_DATA) + GET_FILEEX_INFO_LEVELS = rffi.ULONG # an enumeration + + FindFirstFile = external('FindFirstFile' + suffix, + [traits.CCHARP, LPWIN32_FIND_DATA], + rwin32.HANDLE, + save_err=rffi.RFFI_SAVE_LASTERROR) + FindNextFile = external('FindNextFile' + suffix, + [rwin32.HANDLE, LPWIN32_FIND_DATA], + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) + FindClose = external('FindClose', + [rwin32.HANDLE], + rwin32.BOOL) + + GetFileAttributes = external( + 'GetFileAttributes' + suffix, + [traits.CCHARP], + rwin32.DWORD, + save_err=rffi.RFFI_SAVE_LASTERROR) + + SetFileAttributes = external( + 'SetFileAttributes' + suffix, + [traits.CCHARP, rwin32.DWORD], + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) + + GetFileAttributesEx = external( + 'GetFileAttributesEx' + suffix, + [traits.CCHARP, GET_FILEEX_INFO_LEVELS, + lltype.Ptr(WIN32_FILE_ATTRIBUTE_DATA)], + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) + + GetFileInformationByHandle = external( + 'GetFileInformationByHandle', + [rwin32.HANDLE, lltype.Ptr(BY_HANDLE_FILE_INFORMATION)], + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) + + GetFileType = external( + 'GetFileType', + [rwin32.HANDLE], + rwin32.DWORD, + save_err=rffi.RFFI_SAVE_LASTERROR) + + LPSTRP = rffi.CArrayPtr(traits.CCHARP) + + GetFullPathName = external( + 'GetFullPathName' + suffix, + [traits.CCHARP, rwin32.DWORD, + traits.CCHARP, LPSTRP], + rwin32.DWORD, + save_err=rffi.RFFI_SAVE_LASTERROR) + + GetCurrentDirectory = external( + 'GetCurrentDirectory' + suffix, + [rwin32.DWORD, traits.CCHARP], + rwin32.DWORD, + save_err=rffi.RFFI_SAVE_LASTERROR) + + SetCurrentDirectory = external( + 'SetCurrentDirectory' + suffix, + [traits.CCHARP], + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) + + CreateDirectory = external( + 'CreateDirectory' + suffix, + [traits.CCHARP, rffi.VOIDP], + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) + + SetEnvironmentVariable = external( + 'SetEnvironmentVariable' + suffix, + [traits.CCHARP, traits.CCHARP], + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) + + CreateFile = external( + 'CreateFile' + apisuffix, + [traits.CCHARP, rwin32.DWORD, rwin32.DWORD, + rwin32.LPSECURITY_ATTRIBUTES, rwin32.DWORD, rwin32.DWORD, + rwin32.HANDLE], + rwin32.HANDLE, + save_err=rffi.RFFI_SAVE_LASTERROR) + + DeleteFile = external( + 'DeleteFile' + suffix, + [traits.CCHARP], + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) + + MoveFile = external( + 'MoveFile' + suffix, + [traits.CCHARP, traits.CCHARP], + rwin32.BOOL, + save_err=rffi.RFFI_SAVE_LASTERROR) + + return Win32Traits + +def make_longlong(high, low): + return (rffi.r_longlong(high) << 32) + rffi.r_longlong(low) + +# Seconds between 1.1.1601 and 1.1.1970 +secs_between_epochs = rffi.r_longlong(11644473600) + +def FILE_TIME_to_time_t_float(filetime): + ft = make_longlong(filetime.c_dwHighDateTime, filetime.c_dwLowDateTime) + # FILETIME is in units of 100 nsec + return float(ft) * (1.0 / 10000000.0) - secs_between_epochs + +def time_t_to_FILE_TIME(time, filetime): + ft = rffi.r_longlong((time + secs_between_epochs) * 10000000) + filetime.c_dwHighDateTime = rffi.r_uint(ft >> 32) + filetime.c_dwLowDateTime = rffi.r_uint(ft) # masking off high bits + diff -Nru pypy-4.0.1+dfsg/rpython/rlib/rwin32.py pypy-5.0.1+dfsg/rpython/rlib/rwin32.py --- pypy-4.0.1+dfsg/rpython/rlib/rwin32.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/rwin32.py 2016-03-19 16:40:12.000000000 +0000 @@ -5,7 +5,7 @@ import os import errno -from rpython.rtyper.module.ll_os_environ import make_env_impls +from rpython.rlib.rposix_environ import make_env_impls from rpython.rtyper.tool import rffi_platform from rpython.tool.udir import udir from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -289,7 +289,7 @@ buflen -= 1 if buflen <= 0: - result = fake_FormatError(code) + result = 'Windows Error %d' % (code,) else: result = rffi.charpsize2str(s_buf, buflen) finally: @@ -298,9 +298,6 @@ return result - def fake_FormatError(code): - return 'Windows Error %d' % (code,) - def lastSavedWindowsError(context="Windows Error"): code = GetLastError_saved() return WindowsError(code, context) @@ -445,19 +442,4 @@ def GetConsoleOutputCP(): return rffi.cast(lltype.Signed, _GetConsoleOutputCP()) - def os_kill(pid, sig): - if sig == CTRL_C_EVENT or sig == CTRL_BREAK_EVENT: - if GenerateConsoleCtrlEvent(sig, pid) == 0: - raise lastSavedWindowsError('os_kill failed generating event') - return - handle = OpenProcess(PROCESS_ALL_ACCESS, False, pid) - if handle == NULL_HANDLE: - raise lastSavedWindowsError('os_kill failed opening process') - try: - if TerminateProcess(handle, sig) == 0: - raise lastSavedWindowsError( - 'os_kill failed to terminate process') - finally: - CloseHandle(handle) - _wenviron_items, _wgetenv, _wputenv = make_env_impls(win32=True) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/_stacklet_shadowstack.py pypy-5.0.1+dfsg/rpython/rlib/_stacklet_shadowstack.py --- pypy-4.0.1+dfsg/rpython/rlib/_stacklet_shadowstack.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/_stacklet_shadowstack.py 2016-03-19 16:40:12.000000000 +0000 @@ -30,6 +30,11 @@ mixlevelannotator.finish() lltype.attachRuntimeTypeInfo(STACKLET, destrptr=destrptr) +# Note: it's important that this is a light finalizer, otherwise +# the GC will call it but still expect the object to stay around for +# a while---and it can't stay around, because s_sscopy points to +# freed nonsense and customtrace() will crash +@rgc.must_be_light_finalizer def stacklet_destructor(stacklet): sscopy = stacklet.s_sscopy if sscopy: diff -Nru pypy-4.0.1+dfsg/rpython/rlib/strstorage.py pypy-5.0.1+dfsg/rpython/rlib/strstorage.py --- pypy-4.0.1+dfsg/rpython/rlib/strstorage.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/strstorage.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,39 @@ +# Support for str_storage: i.e., reading primitive types out of RPython string +# +# There are various possible ways to implement it, however not all of them are +# easily supported by the JIT: +# +# 1. use _get_raw_str_buf and cast the chars buffer to RAW_STORAGE_PTR: this +# works well without the JIT, but the cast to RAW_STORAGE_PTR needs to +# happen inside a short "no GC" section (like the one in +# rstr.py:copy_string_contents), which has no chance to work during +# tracing +# +# 2. cast rpy_string to a GcStruct which has the very +# same layout, with the only difference that its 'chars' field is no +# longer an Array(Char) but e.e. an Array(Signed). Then, we just need to +# read the appropriate index into the array. To support this solution, +# the JIT's optimizer needed a few workarounds. This was removed. +# +# 3. use the newly introduced 'llop.gc_load_indexed'. +# + + +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.lltypesystem.rstr import STR +from rpython.rtyper.annlowlevel import llstr +from rpython.rlib.objectmodel import specialize + + +@specialize.ll() +def str_storage_getitem(TP, s, byte_offset): + # WARNING: the 'byte_offset' is, as its name says, measured in bytes; + # however, it should be aligned for TP, otherwise on some platforms this + # code will crash! + lls = llstr(s) + base_ofs = (llmemory.offsetof(STR, 'chars') + + llmemory.itemoffsetof(STR.chars, 0)) + scale_factor = llmemory.sizeof(lltype.Char) + return llop.gc_load_indexed(TP, lls, byte_offset, + scale_factor, base_ofs) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_buffer.py pypy-5.0.1+dfsg/rpython/rlib/test/test_buffer.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_buffer.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_buffer.py 2016-03-19 16:40:12.000000000 +0000 @@ -32,3 +32,35 @@ a = RPythonAnnotator() s = a.build_types(func, [int]) assert s == SomeInteger(nonneg=True) + + +def test_as_str_and_offset_maybe(): + buf = StringBuffer('hello world') + assert buf.as_str_and_offset_maybe() == ('hello world', 0) + # + sbuf = SubBuffer(buf, 6, 5) + assert sbuf.getslice(0, 5, 1, 5) == 'world' + assert sbuf.as_str_and_offset_maybe() == ('hello world', 6) + # + ssbuf = SubBuffer(sbuf, 3, 2) + assert ssbuf.getslice(0, 2, 1, 2) == 'ld' + assert ssbuf.as_str_and_offset_maybe() == ('hello world', 9) + # + ss2buf = SubBuffer(sbuf, 1, -1) + assert ss2buf.as_str() == 'orld' + assert ss2buf.getlength() == 4 + ss3buf = SubBuffer(ss2buf, 1, -1) + assert ss3buf.as_str() == 'rld' + assert ss3buf.getlength() == 3 + # + ss4buf = SubBuffer(buf, 3, 4) + assert ss4buf.as_str() == 'lo w' + ss5buf = SubBuffer(ss4buf, 1, -1) + assert ss5buf.as_str() == 'o w' + assert ss5buf.getlength() == 3 + +def test_repeated_subbuffer(): + buf = StringBuffer('x' * 10000) + for i in range(9999, 9, -1): + buf = SubBuffer(buf, 1, i) + assert buf.getlength() == 10 diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_jit.py pypy-5.0.1+dfsg/rpython/rlib/test/test_jit.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_jit.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_jit.py 2016-03-19 16:40:12.000000000 +0000 @@ -4,7 +4,8 @@ from rpython.annotator.model import UnionError from rpython.rlib.jit import (hint, we_are_jitted, JitDriver, elidable_promote, JitHintError, oopspec, isconstant, conditional_call, - elidable, unroll_safe, dont_look_inside) + elidable, unroll_safe, dont_look_inside, + enter_portal_frame, leave_portal_frame) from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.lltypesystem import lltype @@ -300,3 +301,11 @@ mix = MixLevelHelperAnnotator(t.rtyper) mix.getgraph(later, [annmodel.s_Bool], annmodel.s_None) mix.finish() + + def test_enter_leave_portal_frame(self): + from rpython.translator.interactive import Translation + def g(): + enter_portal_frame(1) + leave_portal_frame() + t = Translation(g, []) + t.compile_c() # does not crash diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_objectmodel.py pypy-5.0.1+dfsg/rpython/rlib/test/test_objectmodel.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_objectmodel.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_objectmodel.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,8 +1,13 @@ from collections import OrderedDict import py -from rpython.rlib.objectmodel import * -from rpython.rlib import types -from rpython.annotator import model +from rpython.rlib.objectmodel import ( + r_dict, UnboxedValue, Symbolic, compute_hash, compute_identity_hash, + compute_unique_id, current_object_addr_as_int, we_are_translated, + prepare_dict_update, reversed_dict, specialize, enforceargs, newlist_hint, + resizelist_hint, is_annotation_constant, always_inline, NOT_CONSTANT, + iterkeys_with_hash, iteritems_with_hash, contains_with_hash, + setitem_with_hash, getitem_with_hash, delitem_with_hash, import_from_mixin, + fetch_translated_config) from rpython.translator.translator import TranslationContext, graphof from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.test.test_llinterp import interpret @@ -72,9 +77,9 @@ rdic = r_dict(operator.eq, hash) rdic['x'] = rdic assert str(rdic) == "r_dict({'x': r_dict({...})})" - assert repr(rdic)== "r_dict({'x': r_dict({...})})" + assert repr(rdic) == "r_dict({'x': r_dict({...})})" -def test_r_dict(): +def func_r_dict(): # NB. this test function is also annotated/rtyped by the next tests d = r_dict(strange_key_eq, strange_key_hash) return play_with_r_dict(d) @@ -82,10 +87,11 @@ class Strange: def key_eq(strange, key1, key2): return key1[0] == key2[0] # only the 1st character is relevant + def key_hash(strange, key): return ord(key[0]) -def test_r_dict_bm(): +def func_r_dict_bm(): # NB. this test function is also annotated by the next tests strange = Strange() d = r_dict(strange.key_eq, strange.key_hash) @@ -94,7 +100,7 @@ def test_annotate_r_dict(): t = TranslationContext() a = t.buildannotator() - a.build_types(test_r_dict, []) + a.build_types(func_r_dict, []) #t.view() graph = graphof(t, strange_key_eq) assert a.binding(graph.getargs()[0]).knowntype == str @@ -105,7 +111,7 @@ def test_annotate_r_dict_bm(): t = TranslationContext() a = t.buildannotator() - a.build_types(test_r_dict_bm, []) + a.build_types(func_r_dict_bm, []) #t.view() strange_key_eq = Strange.key_eq.im_func strange_key_hash = Strange.key_hash.im_func @@ -113,17 +119,18 @@ Strange_def = a.bookkeeper.getuniqueclassdef(Strange) graph = graphof(t, strange_key_eq) - assert a.binding(graph.getargs()[0]).knowntype == Strange_def + assert a.binding(graph.getargs()[0]).classdef == Strange_def assert a.binding(graph.getargs()[1]).knowntype == str assert a.binding(graph.getargs()[2]).knowntype == str graph = graphof(t, strange_key_hash) - assert a.binding(graph.getargs()[0]).knowntype == Strange_def + assert a.binding(graph.getargs()[0]).classdef == Strange_def assert a.binding(graph.getargs()[1]).knowntype == str def test_unboxed_value(): class Base(object): __slots__ = () + class C(Base, UnboxedValue): __slots__ = 'smallint' @@ -196,7 +203,7 @@ class TestObjectModel(BaseRtypingTest): def test_we_are_translated(self): - assert we_are_translated() == False + assert we_are_translated() is False def fn(): return we_are_translated() @@ -204,11 +211,11 @@ assert res is True def test_rtype_r_dict(self): - res = self.interpret(test_r_dict, []) + res = self.interpret(func_r_dict, []) assert res is True def test_rtype_r_dict_bm(self): - res = self.interpret(test_r_dict_bm, []) + res = self.interpret(func_r_dict_bm, []) assert res is True def test_rtype_constant_r_dicts(self): @@ -290,24 +297,27 @@ def test_access_in_try(self): h = lambda x: 1 - eq = lambda x,y: x==y + eq = lambda x, y: x == y + def f(d): try: return d[2] except ZeroDivisionError: return 42 return -1 + def g(n): d = r_dict(eq, h) d[1] = n - d[2] = 2*n + d[2] = 2 * n return f(d) + res = self.interpret(g, [3]) assert res == 6 def test_access_in_try_set(self): h = lambda x: 1 - eq = lambda x,y: x==y + eq = lambda x, y: x == y def f(d): try: d[2] = 77 @@ -339,7 +349,7 @@ assert res == 42 # "did not crash" def test_reversed_dict(self): - d1 = {2:3, 4:5, 6:7} + d1 = {2: 3, 4: 5, 6: 7} def g(): n1 = 0 for key in d1: @@ -358,7 +368,7 @@ pass def f(i): assert compute_hash(i) == compute_hash(42) - assert compute_hash(i+1.0) == compute_hash(43.0) + assert compute_hash(i + 1.0) == compute_hash(43.0) assert compute_hash("Hello" + str(i)) == compute_hash("Hello42") if i == 42: p = None @@ -373,13 +383,11 @@ assert compute_hash(INFINITY) == 314159 assert compute_hash(-INFINITY) == -271828 assert compute_hash(NAN) == 0 - return i*2 + return i * 2 res = self.interpret(f, [42]) assert res == 84 def test_isconstant(self): - from rpython.rlib.objectmodel import is_annotation_constant, specialize - @specialize.arg_or_var(0) def f(arg): if is_annotation_constant(arg): @@ -396,7 +404,7 @@ def f(): x = [1] y = ['b'] - objectmodel.keepalive_until_here(x,y) + objectmodel.keepalive_until_here(x, y) return 1 res = self.interpret(f, []) @@ -410,8 +418,8 @@ def f(i): assert compute_hash(None) == 0 assert compute_hash(i) == h_42 - assert compute_hash(i+1.0) == h_43_dot_0 - assert compute_hash((i+3)/6.0) == h_7_dot_5 + assert compute_hash(i + 1.0) == h_43_dot_0 + assert compute_hash((i + 3) / 6.0) == h_7_dot_5 assert compute_hash("Hello" + str(i)) == h_Hello42 if i == 42: p = None @@ -420,18 +428,25 @@ assert compute_hash(p) == h_None assert compute_hash(("world", None, i, 7.5)) == h_tuple assert compute_hash(q) == h_q - return i*2 - h_42 = compute_hash(42) + return i * 2 + h_42 = compute_hash(42) h_43_dot_0 = compute_hash(43.0) - h_7_dot_5 = compute_hash(7.5) - h_Hello42 = compute_hash("Hello42") - h_None = compute_hash(None) - h_tuple = compute_hash(("world", None, 42, 7.5)) - h_q = compute_hash(q) + h_7_dot_5 = compute_hash(7.5) + h_Hello42 = compute_hash("Hello42") + h_None = compute_hash(None) + h_tuple = compute_hash(("world", None, 42, 7.5)) + h_q = compute_hash(q) res = self.interpret(f, [42]) assert res == 84 + def test_fetch_translated_config(self): + assert fetch_translated_config() is None + def f(): + return fetch_translated_config().translation.continuation + res = self.interpret(f, []) + assert res is False + def test_specialize_decorator(): def f(): @@ -466,18 +481,18 @@ @always_inline def f(a, b, c): return a, b, c - assert f._always_inline_ == True + assert f._always_inline_ is True def test_enforceargs_defaults(): @enforceargs(int, int) def f(a, b=40): - return a+b + return a + b assert f(2) == 42 def test_enforceargs_keywords(): @enforceargs(b=int) def f(a, b, c): - return a+b + return a + b assert f._annenforceargs_ == (None, int, None) def test_enforceargs_int_float_promotion(): @@ -511,7 +526,7 @@ def f(a, b, c): return a, b, c assert f._annenforceargs_ == (int, str, None) - assert f(1, 2, 3) == (1, 2, 3) # no typecheck + assert f(1, 2, 3) == (1, 2, 3) # no typecheck def test_enforceargs_translates(): from rpython.rtyper.lltypesystem import lltype @@ -522,6 +537,18 @@ TYPES = [v.concretetype for v in graph.getargs()] assert TYPES == [lltype.Signed, lltype.Float] +def test_enforceargs_not_constant(): + from rpython.translator.translator import TranslationContext, graphof + @enforceargs(NOT_CONSTANT) + def f(a): + return a + def f42(): + return f(42) + t = TranslationContext() + a = t.buildannotator() + s = a.build_types(f42, []) + assert not hasattr(s, 'const') + def getgraph(f, argtypes): from rpython.translator.translator import TranslationContext, graphof @@ -539,39 +566,36 @@ def test_newlist(): - from rpython.annotator.model import SomeInteger def f(z): x = newlist_hint(sizehint=38) if z < 0: x.append(1) return len(x) - graph = getgraph(f, [SomeInteger()]) + graph = getgraph(f, [int]) for llop in graph.startblock.operations: if llop.opname == 'malloc_varsize': break assert llop.args[2].value == 38 def test_newlist_nonconst(): - from rpython.annotator.model import SomeInteger def f(z): x = newlist_hint(sizehint=z) return len(x) - graph = getgraph(f, [SomeInteger()]) + graph = getgraph(f, [int]) for llop in graph.startblock.operations: if llop.opname == 'malloc_varsize': break assert llop.args[2] is graph.startblock.inputargs[0] def test_resizelist_hint(): - from rpython.annotator.model import SomeInteger def f(z): x = [] resizelist_hint(x, 39) return len(x) - graph = getgraph(f, [SomeInteger()]) + graph = getgraph(f, [int]) for _, op in graph.iterblockops(): if op.opname == 'direct_call': break @@ -591,7 +615,7 @@ def test_iterkeys_with_hash(): def f(i): - d = {i+.0: 5, i+.5: 6} + d = {i + .0: 5, i + .5: 6} total = 0 for k, h in iterkeys_with_hash(d): total += k * h @@ -605,7 +629,7 @@ def test_iteritems_with_hash(): def f(i): - d = {i+.0: 5, i+.5: 6} + d = {i + .0: 5, i + .5: 6} total = 0 for k, v, h in iteritems_with_hash(d): total += k * h * v @@ -619,9 +643,9 @@ def test_contains_with_hash(): def f(i): - d = {i+.5: 5} - assert contains_with_hash(d, i+.5, compute_hash(i+.5)) - assert not contains_with_hash(d, i+.3, compute_hash(i+.3)) + d = {i + .5: 5} + assert contains_with_hash(d, i + .5, compute_hash(i + .5)) + assert not contains_with_hash(d, i + .3, compute_hash(i + .3)) return 0 f(29) @@ -630,9 +654,9 @@ def test_setitem_with_hash(): def f(i): d = {} - setitem_with_hash(d, i+.5, compute_hash(i+.5), 42) - setitem_with_hash(d, i+.6, compute_hash(i+.6), -612) - return d[i+.5] + setitem_with_hash(d, i + .5, compute_hash(i + .5), 42) + setitem_with_hash(d, i + .6, compute_hash(i + .6), -612) + return d[i + .5] assert f(29) == 42 res = interpret(f, [27]) @@ -640,8 +664,8 @@ def test_getitem_with_hash(): def f(i): - d = {i+.5: 42, i+.6: -612} - return getitem_with_hash(d, i+.5, compute_hash(i+.5)) + d = {i + .5: 42, i + .6: -612} + return getitem_with_hash(d, i + .5, compute_hash(i + .5)) assert f(29) == 42 res = interpret(f, [27]) @@ -649,10 +673,10 @@ def test_delitem_with_hash(): def f(i): - d = {i+.5: 42, i+.6: -612} - delitem_with_hash(d, i+.5, compute_hash(i+.5)) + d = {i + .5: 42, i + .6: -612} + delitem_with_hash(d, i + .5, compute_hash(i + .5)) try: - delitem_with_hash(d, i+.5, compute_hash(i+.5)) + delitem_with_hash(d, i + .5, compute_hash(i + .5)) except KeyError: pass else: @@ -682,39 +706,51 @@ def test_import_from_mixin(): class M: # old-style - def f(self): pass + def f(self): + pass class A: # old-style import_from_mixin(M) assert A.f.im_func is not M.f.im_func class M(object): - def f(self): pass + def f(self): + pass class A: # old-style import_from_mixin(M) assert A.f.im_func is not M.f.im_func class M: # old-style - def f(self): pass + def f(self): + pass class A(object): import_from_mixin(M) assert A.f.im_func is not M.f.im_func class M(object): - def f(self): pass + def f(self): + pass class A(object): import_from_mixin(M) assert A.f.im_func is not M.f.im_func class MBase(object): - a = 42; b = 43; c = 1000 - def f(self): return "hi" - def g(self): return self.c - 1 + a = 42 + b = 43 + c = 1000 + def f(self): + return "hi" + def g(self): + return self.c - 1 + class M(MBase): a = 84 - def f(self): return "there" + def f(self): + return "there" + class A(object): import_from_mixin(M) c = 88 + assert A.f.im_func is not M.f.im_func assert A.f.im_func is not MBase.f.im_func assert A.g.im_func is not MBase.g.im_func @@ -776,7 +812,6 @@ assert B._immutable_fields_ == ['b', 'a'] assert A._immutable_fields_ == ['a'] - class B(object): import_from_mixin(A) @@ -785,7 +820,6 @@ class C(A): _immutable_fields_ = ['c'] - class B(object): import_from_mixin(C) @@ -797,9 +831,9 @@ assert B._immutable_fields_ == ['b', 'c', 'a'] - class B(object): _immutable_fields_ = ['b'] + class BA(B): import_from_mixin(C) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_posix.py pypy-5.0.1+dfsg/rpython/rlib/test/test_posix.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_posix.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_posix.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,304 @@ +import py.test +from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rtyper.annlowlevel import hlstr +from rpython.tool.udir import udir +from rpython.rlib.rarithmetic import is_valid_int + +import os +exec 'import %s as posix' % os.name + +def setup_module(module): + testf = udir.join('test.txt') + module.path = testf.strpath + +class TestPosix(BaseRtypingTest): + + def setup_method(self, meth): + # prepare/restore the file before each test + testfile = open(path, 'wb') + testfile.write('This is a test') + testfile.close() + + def test_open(self): + def f(): + ff = posix.open(path, posix.O_RDONLY, 0777) + return ff + func = self.interpret(f, []) + assert is_valid_int(func) + + def test_fstat(self): + def fo(fi): + g = posix.fstat(fi) + return g + fi = os.open(path,os.O_RDONLY,0777) + func = self.interpret(fo,[fi]) + stat = os.fstat(fi) + for i in range(len(stat)): + assert long(getattr(func, 'item%d' % i)) == stat[i] + + + def test_stat(self): + def fo(): + g = posix.stat(path) + return g + func = self.interpret(fo,[]) + stat = os.stat(path) + for i in range(len(stat)): + assert long(getattr(func, 'item%d' % i)) == stat[i] + + def test_stat_exception(self): + def fo(): + try: + posix.stat('I/do/not/exist') + except OSError: + return True + else: + return False + res = self.interpret(fo,[]) + assert res + + def test_times(self): + py.test.skip("llinterp does not like tuple returns") + from rpython.rtyper.test.test_llinterp import interpret + times = interpret(lambda: posix.times(), ()) + assert isinstance(times, tuple) + assert len(times) == 5 + for value in times: + assert is_valid_int(value) + + + def test_lseek(self): + def f(fi, pos): + posix.lseek(fi, pos, 0) + fi = os.open(path, os.O_RDONLY, 0777) + func = self.interpret(f, [fi, 5]) + res = os.read(fi, 2) + assert res =='is' + + def test_isatty(self): + def f(fi): + posix.isatty(fi) + fi = os.open(path, os.O_RDONLY, 0777) + func = self.interpret(f, [fi]) + assert not func + os.close(fi) + func = self.interpret(f, [fi]) + assert not func + + def test_getcwd(self): + def f(): + return posix.getcwd() + res = self.interpret(f,[]) + cwd = os.getcwd() + #print res.chars,cwd + assert self.ll_to_string(res) == cwd + + def test_write(self): + def f(fi): + if fi > 0: + text = 'This is a test' + else: + text = '333' + return posix.write(fi,text) + fi = os.open(path,os.O_WRONLY,0777) + text = 'This is a test' + func = self.interpret(f,[fi]) + os.close(fi) + fi = os.open(path,os.O_RDONLY,0777) + res = os.read(fi,20) + assert res == text + + def test_read(self): + def f(fi,len): + return posix.read(fi,len) + fi = os.open(path,os.O_WRONLY,0777) + text = 'This is a test' + os.write(fi,text) + os.close(fi) + fi = os.open(path,os.O_RDONLY,0777) + res = self.interpret(f,[fi,20]) + assert self.ll_to_string(res) == text + + @py.test.mark.skipif("not hasattr(os, 'chown')") + def test_chown(self): + f = open(path, "w") + f.write("xyz") + f.close() + def f(): + try: + posix.chown(path, os.getuid(), os.getgid()) + return 1 + except OSError: + return 2 + + assert self.interpret(f, []) == 1 + os.unlink(path) + assert self.interpret(f, []) == 2 + + def test_close(self): + def f(fi): + return posix.close(fi) + fi = os.open(path,os.O_WRONLY,0777) + text = 'This is a test' + os.write(fi,text) + res = self.interpret(f,[fi]) + py.test.raises( OSError, os.fstat, fi) + + @py.test.mark.skipif("not hasattr(os, 'ftruncate')") + def test_ftruncate(self): + def f(fi,len): + os.ftruncate(fi,len) + fi = os.open(path,os.O_RDWR,0777) + func = self.interpret(f,[fi,6]) + assert os.fstat(fi).st_size == 6 + + @py.test.mark.skipif("not hasattr(os, 'getuid')") + def test_getuid(self): + def f(): + return os.getuid() + assert self.interpret(f, []) == f() + + @py.test.mark.skipif("not hasattr(os, 'getgid')") + def test_getgid(self): + def f(): + return os.getgid() + assert self.interpret(f, []) == f() + + @py.test.mark.skipif("not hasattr(os, 'setuid')") + def test_os_setuid(self): + def f(): + os.setuid(os.getuid()) + return os.getuid() + assert self.interpret(f, []) == f() + + @py.test.mark.skipif("not hasattr(os, 'sysconf')") + def test_os_sysconf(self): + def f(i): + return os.sysconf(i) + assert self.interpret(f, [13]) == f(13) + + @py.test.mark.skipif("not hasattr(os, 'confstr')") + def test_os_confstr(self): + def f(i): + try: + return os.confstr(i) + except OSError: + return "oooops!!" + some_value = os.confstr_names.values()[-1] + res = self.interpret(f, [some_value]) + assert hlstr(res) == f(some_value) + res = self.interpret(f, [94781413]) + assert hlstr(res) == "oooops!!" + + @py.test.mark.skipif("not hasattr(os, 'pathconf')") + def test_os_pathconf(self): + def f(i): + return os.pathconf("/tmp", i) + i = os.pathconf_names["PC_NAME_MAX"] + some_value = self.interpret(f, [i]) + assert some_value >= 31 + + @py.test.mark.skipif("not hasattr(os, 'chroot')") + def test_os_chroot(self): + def f(): + try: + os.chroot('!@$#!#%$#^#@!#!$$#^') + except OSError: + return 1 + return 0 + + assert self.interpret(f, []) == 1 + + def test_os_wstar(self): + from rpython.rlib import rposix + for name in rposix.WAIT_MACROS: + if not hasattr(os, name): + continue + def fun(s): + return getattr(os, name)(s) + + for value in [0, 1, 127, 128, 255]: + res = self.interpret(fun, [value]) + assert res == fun(value) + + @py.test.mark.skipif("not hasattr(os, 'getgroups')") + def test_getgroups(self): + def f(): + return os.getgroups() + ll_a = self.interpret(f, []) + assert self.ll_to_list(ll_a) == f() + + @py.test.mark.skipif("not hasattr(os, 'setgroups')") + def test_setgroups(self): + def f(): + try: + os.setgroups(os.getgroups()) + except OSError: + pass + self.interpret(f, []) + + @py.test.mark.skipif("not hasattr(os, 'initgroups')") + def test_initgroups(self): + def f(): + try: + os.initgroups('sUJJeumz', 4321) + except OSError: + return 1 + return 0 + res = self.interpret(f, []) + assert res == 1 + + @py.test.mark.skipif("not hasattr(os, 'tcgetpgrp')") + def test_tcgetpgrp(self): + def f(fd): + try: + return os.tcgetpgrp(fd) + except OSError: + return 42 + res = self.interpret(f, [9999]) + assert res == 42 + + @py.test.mark.skipif("not hasattr(os, 'tcsetpgrp')") + def test_tcsetpgrp(self): + def f(fd, pgrp): + try: + os.tcsetpgrp(fd, pgrp) + except OSError: + return 1 + return 0 + res = self.interpret(f, [9999, 1]) + assert res == 1 + + @py.test.mark.skipif("not hasattr(os, 'getresuid')") + def test_getresuid(self): + def f(): + a, b, c = os.getresuid() + return a + b * 37 + c * 1291 + res = self.interpret(f, []) + a, b, c = os.getresuid() + assert res == a + b * 37 + c * 1291 + + @py.test.mark.skipif("not hasattr(os, 'getresgid')") + def test_getresgid(self): + def f(): + a, b, c = os.getresgid() + return a + b * 37 + c * 1291 + res = self.interpret(f, []) + a, b, c = os.getresgid() + assert res == a + b * 37 + c * 1291 + + @py.test.mark.skipif("not hasattr(os, 'setresuid')") + def test_setresuid(self): + def f(): + a, b, c = os.getresuid() + a = (a + 1) - 1 + os.setresuid(a, b, c) + self.interpret(f, []) + + @py.test.mark.skipif("not hasattr(os, 'setresgid')") + def test_setresgid(self): + def f(): + a, b, c = os.getresgid() + a = (a + 1) - 1 + os.setresgid(a, b, c) + self.interpret(f, []) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rawrefcount.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rawrefcount.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rawrefcount.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rawrefcount.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,268 @@ +import weakref +from rpython.rlib import rawrefcount, objectmodel, rgc +from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY, REFCNT_FROM_PYPY_LIGHT +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.annlowlevel import llhelper +from rpython.translator.c.test.test_standalone import StandaloneTests +from rpython.config.translationoption import get_combined_translation_config + + +class W_Root(object): + def __init__(self, intval=0): + self.intval = intval + def __nonzero__(self): + raise Exception("you cannot do that, you must use space.is_true()") + +PyObjectS = lltype.Struct('PyObjectS', + ('c_ob_refcnt', lltype.Signed), + ('c_ob_pypy_link', lltype.Signed)) +PyObject = lltype.Ptr(PyObjectS) + + +class TestRawRefCount: + + def setup_method(self, meth): + rawrefcount.init() + + def test_create_link_pypy(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.to_obj(W_Root, ob) == None + rawrefcount.create_link_pypy(p, ob) + assert ob.c_ob_refcnt == 0 + ob.c_ob_refcnt += REFCNT_FROM_PYPY_LIGHT + assert rawrefcount.from_obj(PyObject, p) == ob + assert rawrefcount.to_obj(W_Root, ob) == p + lltype.free(ob, flavor='raw') + + def test_create_link_pyobj(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.to_obj(W_Root, ob) == None + rawrefcount.create_link_pyobj(p, ob) + assert ob.c_ob_refcnt == 0 + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.to_obj(W_Root, ob) == p + lltype.free(ob, flavor='raw') + + def test_collect_p_dies(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY_LIGHT + assert rawrefcount._p_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + del ob, p + rawrefcount._collect() + assert rawrefcount._p_list == [] + assert wr_ob() is None + assert wr_p() is None + + def test_collect_p_keepalive_pyobject(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY_LIGHT + assert rawrefcount._p_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + ob.c_ob_refcnt += 1 # <= + del ob, p + rawrefcount._collect() + ob = wr_ob() + p = wr_p() + assert ob is not None and p is not None + assert rawrefcount._p_list == [ob] + assert rawrefcount.to_obj(W_Root, ob) == p + assert rawrefcount.from_obj(PyObject, p) == ob + lltype.free(ob, flavor='raw') + + def test_collect_p_keepalive_w_root(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY_LIGHT + assert rawrefcount._p_list == [ob] + wr_ob = weakref.ref(ob) + del ob # p remains + rawrefcount._collect() + ob = wr_ob() + assert ob is not None + assert rawrefcount._p_list == [ob] + assert rawrefcount.to_obj(W_Root, ob) == p + assert rawrefcount.from_obj(PyObject, p) == ob + lltype.free(ob, flavor='raw') + + def test_collect_o_dies(self): + trigger = []; rawrefcount.init(lambda: trigger.append(1)) + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + rawrefcount.create_link_pyobj(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount._o_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + del ob, p + rawrefcount._collect() + ob = wr_ob() + assert ob is not None + assert trigger == [1] + assert rawrefcount.next_dead(PyObject) == ob + assert rawrefcount.next_dead(PyObject) == lltype.nullptr(PyObjectS) + assert rawrefcount.next_dead(PyObject) == lltype.nullptr(PyObjectS) + assert rawrefcount._o_list == [] + assert wr_p() is None + assert ob.c_ob_refcnt == 1 # from the pending list + assert ob.c_ob_pypy_link == 0 + lltype.free(ob, flavor='raw') + + def test_collect_o_keepalive_pyobject(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + p.pyobj = ob + rawrefcount.create_link_pyobj(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount._o_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + ob.c_ob_refcnt += 1 # <= + del p + rawrefcount._collect() + p = wr_p() + assert p is None # was unlinked + assert ob.c_ob_refcnt == 1 # != REFCNT_FROM_PYPY_OBJECT + 1 + assert rawrefcount._o_list == [] + assert rawrefcount.to_obj(W_Root, ob) == None + lltype.free(ob, flavor='raw') + + def test_collect_o_keepalive_w_root(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + p.pyobj = ob + rawrefcount.create_link_pyobj(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount._o_list == [ob] + wr_ob = weakref.ref(ob) + del ob # p remains + rawrefcount._collect() + ob = wr_ob() + assert ob is not None + assert rawrefcount._o_list == [ob] + assert rawrefcount.to_obj(W_Root, ob) == p + assert p.pyobj == ob + lltype.free(ob, flavor='raw') + + def test_collect_s_dies(self): + trigger = []; rawrefcount.init(lambda: trigger.append(1)) + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount._p_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + del ob, p + rawrefcount._collect() + ob = wr_ob() + assert ob is not None + assert trigger == [1] + assert rawrefcount._d_list == [ob] + assert rawrefcount._p_list == [] + assert wr_p() is None + assert ob.c_ob_refcnt == 1 # from _d_list + assert ob.c_ob_pypy_link == 0 + lltype.free(ob, flavor='raw') + + def test_collect_s_keepalive_pyobject(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + p.pyobj = ob + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount._p_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + ob.c_ob_refcnt += 1 # <= + del ob, p + rawrefcount._collect() + ob = wr_ob() + p = wr_p() + assert ob is not None and p is not None + assert rawrefcount._p_list == [ob] + assert rawrefcount.to_obj(W_Root, ob) == p + lltype.free(ob, flavor='raw') + + def test_collect_s_keepalive_w_root(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + p.pyobj = ob + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount._p_list == [ob] + wr_ob = weakref.ref(ob) + del ob # p remains + rawrefcount._collect() + ob = wr_ob() + assert ob is not None + assert rawrefcount._p_list == [ob] + assert rawrefcount.to_obj(W_Root, ob) == p + lltype.free(ob, flavor='raw') + + +class TestTranslated(StandaloneTests): + + def test_full_translation(self): + class State: + pass + state = State() + state.seen = [] + def dealloc_trigger(): + state.seen.append(1) + + def make_p(): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount.from_obj(PyObject, p) == ob + assert rawrefcount.to_obj(W_Root, ob) == p + return ob, p + + FTYPE = rawrefcount.RAWREFCOUNT_DEALLOC_TRIGGER + + def entry_point(argv): + ll_dealloc_trigger_callback = llhelper(FTYPE, dealloc_trigger) + rawrefcount.init(ll_dealloc_trigger_callback) + ob, p = make_p() + if state.seen != []: + print "OB COLLECTED REALLY TOO SOON" + return 1 + rgc.collect() + if state.seen != []: + print "OB COLLECTED TOO SOON" + return 1 + objectmodel.keepalive_until_here(p) + p = None + rgc.collect() + if state.seen != [1]: + print "OB NOT COLLECTED" + return 1 + if rawrefcount.next_dead(PyObject) != ob: + print "NEXT_DEAD != OB" + return 1 + if rawrefcount.next_dead(PyObject) != lltype.nullptr(PyObjectS): + print "NEXT_DEAD second time != NULL" + return 1 + print "OK!" + lltype.free(ob, flavor='raw') + return 0 + + self.config = get_combined_translation_config(translating=True) + self.config.translation.gc = "incminimark" + t, cbuilder = self.compile(entry_point) + data = cbuilder.cmdexec('hi there') + assert data.startswith('OK!\n') diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rawstorage.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rawstorage.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rawstorage.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rawstorage.py 2016-03-19 16:40:12.000000000 +0000 @@ -32,7 +32,6 @@ assert res == 3.14 free_raw_storage(r) - class TestRawStorage(BaseRtypingTest): def test_storage_int(self): diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rbigint.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rbigint.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rbigint.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rbigint.py 2016-03-19 16:40:15.000000000 +0000 @@ -825,7 +825,19 @@ def __init__(self, base, sign, digits): self.base = base self.sign = sign - self.next_digit = iter(digits + [-1]).next + self.i = 0 + self._digits = digits + def next_digit(self): + i = self.i + if i == len(self._digits): + return -1 + self.i = i + 1 + return self._digits[i] + def prev_digit(self): + i = self.i - 1 + assert i >= 0 + self.i = i + return self._digits[i] x = parse_digit_string(Parser(10, 1, [6])) assert x.eq(rbigint.fromint(6)) x = parse_digit_string(Parser(10, 1, [6, 2, 3])) @@ -847,6 +859,16 @@ x = parse_digit_string(Parser(7, -1, [0, 0, 0])) assert x.tobool() is False + for base in [2, 4, 8, 16, 32]: + for inp in [[0], [1], [1, 0], [0, 1], [1, 0, 1], [1, 0, 0, 1], + [1, 0, 0, base-1, 0, 1], [base-1, 1, 0, 0, 0, 1, 0], + [base-1]]: + inp = inp * 97 + x = parse_digit_string(Parser(base, -1, inp)) + num = sum(inp[i] * (base ** (len(inp)-1-i)) + for i in range(len(inp))) + assert x.eq(rbigint.fromlong(-num)) + BASE = 2 ** SHIFT diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rerased.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rerased.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rerased.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rerased.py 2016-03-19 16:40:12.000000000 +0000 @@ -3,6 +3,7 @@ import copy from rpython.rlib.rerased import * +from rpython.rlib.rerased import _some_erased from rpython.annotator import model as annmodel from rpython.annotator.annrpython import RPythonAnnotator from rpython.rtyper.rclass import OBJECTPTR @@ -71,7 +72,7 @@ return eraseX(X()) a = make_annotator() s = a.build_types(f, []) - assert isinstance(s, SomeErased) + assert s == _some_erased() def test_annotate_2(): def f(): @@ -191,7 +192,7 @@ def interpret(self, *args, **kwargs): kwargs["taggedpointers"] = True - return BaseRtypingTest.interpret(self, *args, **kwargs) + return BaseRtypingTest.interpret(*args, **kwargs) def test_rtype_1(self): def f(): return eraseX(X()) @@ -299,10 +300,28 @@ self.interpret(l, [1]) self.interpret(l, [2]) + def test_rtype_store_in_struct(self): + erase, unerase = new_erasing_pair("list of ints") + S = lltype.GcStruct('S', ('gcref', llmemory.GCREF)) + def make_s(l): + s = lltype.malloc(S) + s.gcref = erase(l) + return s + def l(flag): + l = [flag] + if flag > 5: + s = make_s(l) + p = s.gcref + else: + p = erase(l) + assert unerase(p) is l + self.interpret(l, [3]) + self.interpret(l, [8]) + def test_union(): - s_e1 = SomeErased() + s_e1 = _some_erased() s_e1.const = 1 - s_e2 = SomeErased() + s_e2 = _some_erased() s_e2.const = 3 assert not annmodel.pair(s_e1, s_e2).union().is_constant() diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rgil.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rgil.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rgil.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rgil.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,47 @@ +from rpython.rlib import rgil +from rpython.translator.c.test.test_standalone import StandaloneTests + + +class BaseTestGIL(StandaloneTests): + + def test_simple(self): + def main(argv): + rgil.release() + # don't have the GIL here + rgil.acquire() + rgil.yield_thread() + print "OK" # there is also a release/acquire pair here + return 0 + + main([]) + + t, cbuilder = self.compile(main) + data = cbuilder.cmdexec('') + assert data == "OK\n" + + def test_after_thread_switch(self): + class Foo: + pass + foo = Foo() + foo.counter = 0 + def seeme(): + foo.counter += 1 + def main(argv): + rgil.invoke_after_thread_switch(seeme) + print "Test" # one release/acquire pair here + print foo.counter + print foo.counter + return 0 + + t, cbuilder = self.compile(main) + data = cbuilder.cmdexec('') + assert data == "Test\n1\n2\n" + + +class TestGILAsmGcc(BaseTestGIL): + gc = 'minimark' + gcrootfinder = 'asmgcc' + +class TestGILShadowStack(BaseTestGIL): + gc = 'minimark' + gcrootfinder = 'shadowstack' diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rmarshal.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rmarshal.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rmarshal.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rmarshal.py 2016-03-19 16:40:12.000000000 +0000 @@ -167,7 +167,7 @@ def test_stat_result(): import os from rpython.translator.c.test.test_genc import compile - from rpython.rtyper.module.ll_os_stat import s_StatResult + from rpython.rlib.rposix_stat import s_StatResult marshal_stat_result = get_marshaller(s_StatResult) unmarshal_stat_result = get_unmarshaller(s_StatResult) def f(path): @@ -190,3 +190,13 @@ assert sttuple[4] == st[4] assert sttuple[5] == st[5] assert len(sttuple) == 10 + +def test_longlong(): + # get_loader for (r_longolong, nonneg=True) used to return + # load_int_nonneg on 32-bit, instead of load_longlong. + for nonneg in [True, False]: + s_longlong = annmodel.SomeInteger(knowntype=r_longlong, nonneg=nonneg) + load = get_loader(s_longlong) + loader = Loader("I\x01\x23\x45\x67\x89\xab\xcd\x0e") + res = load(loader) + assert res == 0x0ecdab8967452301 diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rpath.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rpath.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rpath.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rpath.py 2016-03-19 16:40:12.000000000 +0000 @@ -68,8 +68,8 @@ assert rpath._nt_rabspath('d:\\foo\\bar\\..') == 'd:\\foo' assert rpath._nt_rabspath('d:\\foo\\bar\\..\\x') == 'd:\\foo\\x' curdrive = _ = rpath._nt_rsplitdrive(os.getcwd()) - assert len(curdrive) == 2 and curdrive[1] == ':' - assert rpath.rabspath('\\foo') == '%s\\foo' % curdrive + assert len(curdrive) == 2 and curdrive[0][1] == ':' + assert rpath.rabspath('\\foo') == '%s\\foo' % curdrive[0] def test_risabs_posix(): assert rpath._posix_risabs('/foo/bar') diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rposix_environ.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rposix_environ.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rposix_environ.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rposix_environ.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,39 @@ +from rpython.translator.c.test.test_genc import compile +import os + +def test_environ_items(): + def foo(x): + if x: + return len(os.environ.items()) + else: + return 0 + + f = compile(foo, [int], backendopt=False) + assert f(1) > 0 + +def test_unset_error(): + import sys + def foo(x): + if x: + os.environ['TEST'] = 'STRING' + assert os.environ['TEST'] == 'STRING' + del os.environ['TEST'] + try: + del os.environ['key='] + except (KeyError, OSError): + return 1 + return 2 + else: + return 0 + + f = compile(foo, [int], backendopt=False) + if sys.platform.startswith('win'): + # Do not open error dialog box + import ctypes + SEM_NOGPFAULTERRORBOX = 0x0002 # From MSDN + old_err_mode = ctypes.windll.kernel32.GetErrorMode() + new_err_mode = old_err_mode | SEM_NOGPFAULTERRORBOX + ctypes.windll.kernel32.SetErrorMode(new_err_mode) + assert f(1) == 1 + if sys.platform.startswith('win'): + ctypes.windll.kernel32.SetErrorMode(old_err_mode) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rposix.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rposix.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rposix.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rposix.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,26 +1,309 @@ from rpython.rtyper.test.test_llinterp import interpret +from rpython.translator.c.test.test_genc import compile +from rpython.tool.pytest.expecttest import ExpectTest from rpython.tool.udir import udir -from rpython.rlib import rposix +from rpython.rlib import rposix, rposix_stat, rstring import os, sys +import errno import py +class TestPosixFunction: + def test_access(self): + filename = str(udir.join('test_access.txt')) + fd = file(filename, 'w') + fd.close() + + for mode in os.R_OK, os.W_OK, os.X_OK, os.R_OK | os.W_OK | os.X_OK: + result = rposix.access(filename, mode) + assert result == os.access(filename, mode) + + def test_times(self): + """ + posix.times should compile as an RPython function and should return a + five-tuple giving float-representations (seconds, effectively) of the four + fields from the underlying struct tms and the return value. + """ + times = eval(compile(lambda: str(os.times()), ())()) + assert isinstance(times, tuple) + assert len(times) == 5 + for value in times: + assert isinstance(value, float) + + def test_getlogin(self): + if not hasattr(os, 'getlogin'): + py.test.skip('posix specific function') + try: + expected = os.getlogin() + except OSError, e: + py.test.skip("the underlying os.getlogin() failed: %s" % e) + data = rposix.getlogin() + assert data == expected + + def test_utimes(self): + if os.name != 'nt': + py.test.skip('Windows specific feature') + # Windows support centiseconds + def f(fname, t1): + os.utime(fname, (t1, t1)) + + fname = udir.join('test_utimes.txt') + fname.ensure() + t1 = 1159195039.25 + compile(f, (str, float))(str(fname), t1) + assert t1 == os.stat(str(fname)).st_mtime + if sys.version_info < (2, 7): + py.test.skip('requires Python 2.7') + t1 = 5000000000.0 + compile(f, (str, float))(str(fname), t1) + assert t1 == os.stat(str(fname)).st_mtime + + def test__getfullpathname(self): + if os.name != 'nt': + py.test.skip('nt specific function') + posix = __import__(os.name) + sysdrv = os.getenv('SystemDrive', 'C:') + stuff = sysdrv + 'stuff' + data = rposix.getfullpathname(stuff) + assert data == posix._getfullpathname(stuff) + # the most intriguing failure of ntpath.py should not repeat, here: + assert not data.endswith(stuff) + + def test_getcwd(self): + assert rposix.getcwd() == os.getcwd() + + def test_chdir(self): + def check_special_envvar(): + if sys.platform != 'win32': + return + pwd = os.getcwd() + import ctypes + buf = ctypes.create_string_buffer(1000) + len = ctypes.windll.kernel32.GetEnvironmentVariableA('=%c:' % pwd[0], buf, 1000) + if (len == 0) and "WINGDB_PYTHON" in os.environ: + # the ctypes call seems not to work in the Wing debugger + return + assert str(buf.value).lower() == pwd.lower() + # ctypes returns the drive letter in uppercase, + # os.getcwd does not, + # but there may be uppercase in os.getcwd path + + pwd = os.getcwd() + try: + check_special_envvar() + rposix.chdir('..') + assert os.getcwd() == os.path.dirname(pwd) + check_special_envvar() + finally: + os.chdir(pwd) + + def test_mkdir(self): + filename = str(udir.join('test_mkdir.dir')) + rposix.mkdir(filename, 0) + exc = py.test.raises(OSError, rposix.mkdir, filename, 0) + assert exc.value.errno == errno.EEXIST + if sys.platform == 'win32': + assert exc.type is WindowsError + + def test_strerror(self): + assert rposix.strerror(2) == os.strerror(2) + + def test_system(self): + filename = str(udir.join('test_system.txt')) + arg = '%s -c "print 1+1" > %s' % (sys.executable, filename) + data = rposix.system(arg) + assert data == 0 + assert file(filename).read().strip() == '2' + os.unlink(filename) + + + def test_execve(self): + if os.name != 'posix': + py.test.skip('posix specific function') + + EXECVE_ENV = {"foo": "bar", "baz": "quux"} + + def run_execve(program, args=None, env=None, do_path_lookup=False): + if args is None: + args = [program] + else: + args = [program] + args + if env is None: + env = {} + # we cannot directly call execve() because it replaces the + # current process. + fd_read, fd_write = os.pipe() + childpid = os.fork() + if childpid == 0: + # in the child + os.close(fd_read) + os.dup2(fd_write, 1) # stdout + os.close(fd_write) + if do_path_lookup: + os.execvp(program, args) + else: + rposix.execve(program, args, env) + assert 0, "should not arrive here" + else: + # in the parent + os.close(fd_write) + child_stdout = [] + while True: + data = os.read(fd_read, 4096) + if not data: break # closed + child_stdout.append(data) + pid, status = os.waitpid(childpid, 0) + os.close(fd_read) + return status, ''.join(child_stdout) + + # Test exit status and code + result, child_stdout = run_execve("/usr/bin/which", ["true"], do_path_lookup=True) + result, child_stdout = run_execve(child_stdout.strip()) # /bin/true or /usr/bin/true + assert os.WIFEXITED(result) + assert os.WEXITSTATUS(result) == 0 + result, child_stdout = run_execve("/usr/bin/which", ["false"], do_path_lookup=True) + result, child_stdout = run_execve(child_stdout.strip()) # /bin/false or /usr/bin/false + assert os.WIFEXITED(result) + assert os.WEXITSTATUS(result) == 1 + + # Test environment + result, child_stdout = run_execve("/usr/bin/env", env=EXECVE_ENV) + assert os.WIFEXITED(result) + assert os.WEXITSTATUS(result) == 0 + assert dict([line.split('=') for line in child_stdout.splitlines()]) == EXECVE_ENV + + # The following won't actually execute anything, so they don't need + # a child process helper. + + # If the target does not exist, an OSError should result + info = py.test.raises( + OSError, rposix.execve, "this/file/is/non/existent", [], {}) + assert info.value.errno == errno.ENOENT + + # If the target is not executable, an OSError should result + info = py.test.raises( + OSError, rposix.execve, "/etc/passwd", [], {}) + assert info.value.errno == errno.EACCES + + def test_os_write(self): + #Same as test in rpython/test/test_rbuiltin + fname = str(udir.join('os_test.txt')) + fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0777) + assert fd >= 0 + rposix.write(fd, 'Hello world') + os.close(fd) + with open(fname) as fid: + assert fid.read() == "Hello world" + fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0777) + os.close(fd) + py.test.raises(OSError, rposix.write, fd, 'Hello world') + + def test_os_close(self): + fname = str(udir.join('os_test.txt')) + fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0777) + assert fd >= 0 + os.write(fd, 'Hello world') + rposix.close(fd) + py.test.raises(OSError, rposix.close, fd) + + def test_os_lseek(self): + fname = str(udir.join('os_test.txt')) + fd = os.open(fname, os.O_RDWR|os.O_CREAT, 0777) + assert fd >= 0 + os.write(fd, 'Hello world') + rposix.lseek(fd,0,0) + assert os.read(fd, 11) == 'Hello world' + os.close(fd) + py.test.raises(OSError, rposix.lseek, fd, 0, 0) + + def test_os_fsync(self): + fname = str(udir.join('os_test.txt')) + fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0777) + assert fd >= 0 + os.write(fd, 'Hello world') + rposix.fsync(fd) + os.close(fd) + fid = open(fname) + assert fid.read() == 'Hello world' + fid.close() + py.test.raises(OSError, rposix.fsync, fd) + + @py.test.mark.skipif("not hasattr(os, 'fdatasync')") + def test_os_fdatasync(self): + fname = str(udir.join('os_test.txt')) + fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0777) + assert fd >= 0 + os.write(fd, 'Hello world') + rposix.fdatasync(fd) + fid = open(fname) + assert fid.read() == 'Hello world' + os.close(fd) + py.test.raises(OSError, rposix.fdatasync, fd) + + def test_os_kill(self): + import subprocess + import signal + proc = subprocess.Popen([sys.executable, "-c", + "import time;" + "time.sleep(10)", + ], + ) + rposix.kill(proc.pid, signal.SIGTERM) + if os.name == 'nt': + expected = signal.SIGTERM + else: + expected = -signal.SIGTERM + assert proc.wait() == expected + + def test_isatty(self): + assert rposix.isatty(-1) is False + + +class TestOsExpect(ExpectTest): + def setup_class(cls): + if not hasattr(os, 'ttyname'): + py.test.skip("no ttyname") + + def test_ttyname(self): + def f(): + import os + from rpython.rtyper.test.test_llinterp import interpret + + def ll_to_string(s): + return ''.join(s.chars) + + def f(num): + try: + return os.ttyname(num) + except OSError: + return '' + + assert ll_to_string(interpret(f, [0])) == f(0) + assert ll_to_string(interpret(f, [338])) == '' + + self.run_test(f) + + def ll_to_string(s): return ''.join(s.chars) class UnicodeWithEncoding: + is_unicode = True + def __init__(self, unistr): self.unistr = unistr if sys.platform == 'win32': def as_bytes(self): from rpython.rlib.runicode import unicode_encode_mbcs - return unicode_encode_mbcs(self.unistr, len(self.unistr), - "strict") + res = unicode_encode_mbcs(self.unistr, len(self.unistr), + "strict") + return rstring.assert_str0(res) else: def as_bytes(self): from rpython.rlib.runicode import unicode_encode_utf_8 - return unicode_encode_utf_8(self.unistr, len(self.unistr), - "strict") + res = unicode_encode_utf_8(self.unistr, len(self.unistr), + "strict") + return rstring.assert_str0(res) def as_unicode(self): return self.unistr @@ -44,7 +327,7 @@ def test_open(self): def f(): try: - fd = rposix.open(self.path, os.O_RDONLY, 0777) + fd = os.open(self.path, os.O_RDONLY, 0777) try: text = os.read(fd, 50) return text @@ -57,7 +340,7 @@ def test_stat(self): def f(): - return rposix.stat(self.path).st_mtime + return rposix_stat.stat(self.path).st_mtime if sys.platform == 'win32': # double vs. float, be satisfied with sub-millisec resolution assert abs(interpret(f, []) - os.stat(self.ufilename).st_mtime) < 1e-4 @@ -169,3 +452,16 @@ def _get_filename(self): return (unicode(udir.join('test_open')) + u'\u65e5\u672c.txt') # "Japan" + +class TestRegisteredFunctions: + def test_dup(self): + def f(): + os.dup(4) + os.dup2(5, 6) + compile(f, ()) + + def test_open(self): + def f(): + os.open('/tmp/t', 0, 0) + os.open(u'/tmp/t', 0, 0) + compile(f, ()) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rposix_stat.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rposix_stat.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rposix_stat.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rposix_stat.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,58 @@ +import os, sys +import py +from rpython.rlib import rposix_stat +from rpython.tool.udir import udir + +class TestPosixStatFunctions: + @py.test.mark.skipif("sys.platform == 'win32'", + reason="win32 only has the portable fields") + def test_has_all_fields(self): + assert rposix_stat.STAT_FIELDS == rposix_stat.ALL_STAT_FIELDS[:13] + + def test_stat(self): + def check(f): + # msec resolution, +- rounding error + expected = int(os.stat(f).st_mtime * 1000) + assert abs(int(rposix_stat.stat(f).st_mtime * 1000) - expected) < 2 + assert abs(int(rposix_stat.stat(unicode(f)).st_mtime * 1000) - expected) < 2 + + if sys.platform == 'win32': + check('c:/') + check(os.environ['TEMP']) + else: + check('/') + check('/tmp') + check(sys.executable) + + def test_fstat(self): + stat = rposix_stat.fstat(0) # stdout + assert stat.st_mode != 0 + + def test_stat_large_number(self): + fname = udir.join('test_stat_large_number.txt') + fname.ensure() + t1 = 5000000000.0 + try: + os.utime(str(fname), (t1, t1)) + except OverflowError: + py.test.skip("This platform doesn't support setting stat times " + "to large values") + assert rposix_stat.stat(str(fname)).st_mtime == t1 + + @py.test.mark.skipif(not hasattr(os, 'statvfs'), + reason='posix specific function') + def test_statvfs(self): + try: + os.statvfs('.') + except OSError, e: + py.test.skip("the underlying os.statvfs() failed: %s" % e) + rposix_stat.statvfs('.') + + @py.test.mark.skipif(not hasattr(os, 'fstatvfs'), + reason='posix specific function') + def test_fstatvfs(self): + try: + os.fstatvfs(0) + except OSError, e: + py.test.skip("the underlying os.fstatvfs() failed: %s" % e) + rposix_stat.fstatvfs(0) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rsocket.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rsocket.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rsocket.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rsocket.py 2016-03-19 16:40:12.000000000 +0000 @@ -143,7 +143,7 @@ def test_simple_tcp(): - import thread + from rpython.rlib import rthread sock = RSocket() try_ports = [1023] + range(20000, 30000, 437) for port in try_ports: @@ -169,14 +169,14 @@ connected[0] = True finally: lock.release() - lock = thread.allocate_lock() - lock.acquire() - thread.start_new_thread(connecting, ()) + lock = rthread.allocate_lock() + lock.acquire(True) + rthread.start_new_thread(connecting, ()) print 'waiting for connection' fd1, addr2 = sock.accept() s1 = RSocket(fd=fd1) print 'connection accepted' - lock.acquire() + lock.acquire(True) assert connected[0] print 'connecting side knows that the connection was accepted too' assert addr.eq(s2.getpeername()) @@ -188,7 +188,9 @@ buf = s2.recv(100) assert buf == '?' print 'received ok' - thread.start_new_thread(s2.sendall, ('x'*50000,)) + def sendstuff(): + s2.sendall('x'*50000) + rthread.start_new_thread(sendstuff, ()) buf = '' while len(buf) < 50000: data = s1.recv(50100) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rstacklet.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rstacklet.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rstacklet.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rstacklet.py 2016-03-19 16:40:12.000000000 +0000 @@ -17,10 +17,9 @@ class Runner: STATUSMAX = 5000 - config = None def init(self, seed): - self.sthread = rstacklet.StackletThread(self.config) + self.sthread = rstacklet.StackletThread() self.random = rrandom.Random(seed) def done(self): @@ -301,12 +300,11 @@ config.translation.gcrootfinder = cls.gcrootfinder GCROOTFINDER = cls.gcrootfinder cls.config = config - cls.old_values = Runner.config, Runner.STATUSMAX - Runner.config = config + cls.old_status_max = Runner.STATUSMAX Runner.STATUSMAX = 25000 def teardown_class(cls): - Runner.config, Runner.STATUSMAX = cls.old_values + Runner.STATUSMAX = cls.old_status_max def test_demo1(self): t, cbuilder = self.compile(entry_point) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rthread.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rthread.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rthread.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rthread.py 2016-03-19 16:40:15.000000000 +0000 @@ -1,17 +1,11 @@ import gc, time from rpython.rlib.rthread import * from rpython.rlib.rarithmetic import r_longlong +from rpython.rlib import objectmodel from rpython.translator.c.test.test_boehm import AbstractGCTestClass from rpython.rtyper.lltypesystem import lltype, rffi import py -def setup_module(mod): - # Hack to avoid a deadlock if the module is run after other test files :-( - # In this module, we assume that rthread.start_new_thread() is not - # providing us with a GIL equivalent, except in test_gc_locking - # which installs its own aroundstate. - rffi.aroundstate._cleanup_() - def test_lock(): l = allocate_lock() ok1 = l.acquire(True) @@ -31,6 +25,7 @@ py.test.fail("Did not raise") def test_tlref_untranslated(): + import thread class FooBar(object): pass t = ThreadLocalReference(FooBar) @@ -43,7 +38,7 @@ time.sleep(0.2) results.append(t.get() is x) for i in range(5): - start_new_thread(subthread, ()) + thread.start_new_thread(subthread, ()) time.sleep(0.5) assert results == [True] * 15 @@ -99,7 +94,6 @@ def test_gc_locking(self): import time - from rpython.rlib.objectmodel import invoke_around_extcall from rpython.rlib.debug import ll_assert class State: @@ -123,17 +117,6 @@ ll_assert(j == self.j, "2: bad j") run._dont_inline_ = True - def before_extcall(): - release_NOAUTO(state.gil) - before_extcall._gctransformer_hint_cannot_collect_ = True - # ^^^ see comments in gil.py about this hint - - def after_extcall(): - acquire_NOAUTO(state.gil, True) - gc_thread_run() - after_extcall._gctransformer_hint_cannot_collect_ = True - # ^^^ see comments in gil.py about this hint - def bootstrap(): # after_extcall() is called before we arrive here. # We can't just acquire and release the GIL manually here, @@ -154,14 +137,9 @@ start_new_thread(bootstrap, ()) def f(): - state.gil = allocate_ll_lock() - acquire_NOAUTO(state.gil, True) state.bootstrapping = allocate_lock() state.answers = [] state.finished = 0 - # the next line installs before_extcall() and after_extcall() - # to be called automatically around external function calls. - invoke_around_extcall(before_extcall, after_extcall) g(10, 1) done = False @@ -179,10 +157,7 @@ return len(state.answers) expected = 89 - try: - fn = self.getcompiled(f, []) - finally: - rffi.aroundstate._cleanup_() + fn = self.getcompiled(f, []) answers = fn() assert answers == expected @@ -266,3 +241,60 @@ class TestUsingFramework(AbstractThreadTests): gcpolicy = 'minimark' + + def test_tlref_keepalive(self, no__thread=True): + import weakref + from rpython.config.translationoption import SUPPORT__THREAD + + if not (SUPPORT__THREAD or no__thread): + py.test.skip("no __thread support here") + + class FooBar(object): + pass + t = ThreadLocalReference(FooBar) + + def tset(): + x1 = FooBar() + t.set(x1) + return weakref.ref(x1) + tset._dont_inline_ = True + + class WrFromThread: + pass + wr_from_thread = WrFromThread() + + def f(): + config = objectmodel.fetch_translated_config() + assert t.automatic_keepalive(config) is True + wr = tset() + import gc; gc.collect() # 'x1' should not be collected + x2 = t.get() + assert x2 is not None + assert wr() is not None + assert wr() is x2 + return wr + + def thread_entry_point(): + wr = f() + wr_from_thread.wr = wr + wr_from_thread.seen = True + + def main(): + wr_from_thread.seen = False + start_new_thread(thread_entry_point, ()) + wr1 = f() + time.sleep(0.5) + assert wr_from_thread.seen is True + wr2 = wr_from_thread.wr + import gc; gc.collect() # wr2() should be collected here + assert wr1() is not None # this thread, still running + assert wr2() is None # other thread, not running any more + return 42 + + extra_options = {'no__thread': no__thread, 'shared': True} + fn = self.getcompiled(main, [], extra_options=extra_options) + res = fn() + assert res == 42 + + def test_tlref_keepalive__thread(self): + self.test_tlref_keepalive(no__thread=False) diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rtime.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rtime.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rtime.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rtime.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,55 @@ + +from rpython.rtyper.test.tool import BaseRtypingTest + +import time, sys + +class TestTime(BaseRtypingTest): + def test_time_time(self): + def fn(): + return time.time() + + t0 = time.time() + res0 = self.interpret(fn, []) + t1 = time.time() + res1 = self.interpret(fn, []) + assert t0 <= res0 <= t1 <= res1 + + def test_time_clock(self): + def sleep(t): + # a version of time.sleep() that consumes actual CPU time + start = time.clock() + while abs(time.clock() - start) <= t: + pass + def f(): + return time.clock() + t0 = time.clock() + sleep(0.011) + t1 = self.interpret(f, []) + sleep(0.011) + t2 = time.clock() + sleep(0.011) + t3 = self.interpret(f, []) + sleep(0.011) + t4 = time.clock() + sleep(0.011) + t5 = self.interpret(f, []) + sleep(0.011) + t6 = time.clock() + # time.clock() and t1() might have a different notion of zero, so + # we can only subtract two numbers returned by the same function. + # Moreover they might have different precisions, but it should + # be at least 0.01 seconds, hence the "sleeps". + assert 0.0099 <= t2-t0 <= 9.0 + assert 0.0099 <= t3-t1 <= t4-t0 <= 9.0 + assert 0.0099 <= t4-t2 <= t5-t1 <= t6-t0 <= 9.0 + assert 0.0099 <= t5-t3 <= t6-t2 <= 9.0 + assert 0.0099 <= t6-t4 <= 9.0 + + def test_time_sleep(self): + def does_nothing(): + time.sleep(0.19) + t0 = time.time() + self.interpret(does_nothing, []) + t1 = time.time() + assert t0 <= t1 + assert t1 - t0 >= 0.15 diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rzipfile.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rzipfile.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rzipfile.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rzipfile.py 2016-03-19 16:40:12.000000000 +0000 @@ -4,6 +4,7 @@ from rpython.tool.udir import udir from zipfile import ZIP_STORED, ZIP_DEFLATED, ZipInfo, ZipFile from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rlib import clibffi # for side effect of testing lib_c_name on win32 import os import time diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_rzlib.py pypy-5.0.1+dfsg/rpython/rlib/test/test_rzlib.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_rzlib.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_rzlib.py 2016-03-19 16:40:12.000000000 +0000 @@ -6,6 +6,7 @@ import py from rpython.rlib import rzlib from rpython.rlib.rarithmetic import r_uint +from rpython.rlib import clibffi # for side effect of testing lib_c_name on win32 import zlib expanded = 'some bytes which will be compressed' diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_streamio.py pypy-5.0.1+dfsg/rpython/rlib/test/test_streamio.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_streamio.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_streamio.py 2016-03-19 16:40:12.000000000 +0000 @@ -1077,6 +1077,7 @@ alarm(1) assert file.read(10) == "hello" finally: + alarm(0) signal(SIGALRM, SIG_DFL) def test_write_interrupted(self): @@ -1102,6 +1103,7 @@ # can succeed. file.write("hello") finally: + alarm(0) signal(SIGALRM, SIG_DFL) def test_append_mode(self): diff -Nru pypy-4.0.1+dfsg/rpython/rlib/test/test_strstorage.py pypy-5.0.1+dfsg/rpython/rlib/test/test_strstorage.py --- pypy-4.0.1+dfsg/rpython/rlib/test/test_strstorage.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rlib/test/test_strstorage.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,96 @@ +import py +import sys +import struct +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.strstorage import str_storage_getitem +from rpython.rlib.rarithmetic import r_singlefloat +from rpython.rtyper.test.tool import BaseRtypingTest + +IS_32BIT = (sys.maxint == 2147483647) + +class BaseStrStorageTest: + + ## def test_str_getitem_supported(self): + ## if IS_32BIT: + ## expected = False + ## else: + ## expected = True + ## # + ## assert self.str_storage_supported(rffi.LONGLONG) == expected + ## assert self.str_storage_supported(rffi.DOUBLE) == expected + + def test_signed(self): + buf = struct.pack('@ll', 42, 43) + size = struct.calcsize('@l') + assert self.str_storage_getitem(lltype.Signed, buf, 0) == 42 + assert self.str_storage_getitem(lltype.Signed, buf, size) == 43 + + def test_short(self): + buf = struct.pack('@hh', 42, 43) + size = struct.calcsize('@h') + x = self.str_storage_getitem(rffi.SHORT, buf, 0) + assert int(x) == 42 + x = self.str_storage_getitem(rffi.SHORT, buf, size) + assert int(x) == 43 + + def test_float(self): + ## if not str_storage_supported(lltype.Float): + ## py.test.skip('str_storage_getitem(lltype.Float) not supported on this machine') + buf = struct.pack('@dd', 12.3, 45.6) + size = struct.calcsize('@d') + assert self.str_storage_getitem(lltype.Float, buf, 0) == 12.3 + assert self.str_storage_getitem(lltype.Float, buf, size) == 45.6 + + def test_singlefloat(self): + buf = struct.pack('@ff', 12.3, 45.6) + size = struct.calcsize('@f') + x = self.str_storage_getitem(lltype.SingleFloat, buf, 0) + assert x == r_singlefloat(12.3) + x = self.str_storage_getitem(lltype.SingleFloat, buf, size) + assert x == r_singlefloat(45.6) + + +class TestDirect(BaseStrStorageTest): + + ## def str_storage_supported(self, TYPE): + ## return str_storage_supported(TYPE) + + def str_storage_getitem(self, TYPE, buf, offset): + return str_storage_getitem(TYPE, buf, offset) + +class TestRTyping(BaseStrStorageTest, BaseRtypingTest): + + ## def str_storage_supported(self, TYPE): + ## def fn(): + ## return str_storage_supported(TYPE) + ## return self.interpret(fn, []) + + def str_storage_getitem(self, TYPE, buf, offset): + def fn(offset): + return str_storage_getitem(TYPE, buf, offset) + return self.interpret(fn, [offset]) + + +class TestCompiled(BaseStrStorageTest): + cache = {} + + def str_storage_getitem(self, TYPE, buf, offset): + if TYPE not in self.cache: + from rpython.translator.c.test.test_genc import compile + + assert isinstance(TYPE, lltype.Primitive) + if TYPE in (lltype.Float, lltype.SingleFloat): + TARGET_TYPE = lltype.Float + else: + TARGET_TYPE = lltype.Signed + + def llf(buf, offset): + x = str_storage_getitem(TYPE, buf, offset) + return lltype.cast_primitive(TARGET_TYPE, x) + + fn = compile(llf, [str, int]) + self.cache[TYPE] = fn + # + fn = self.cache[TYPE] + x = fn(buf, offset) + return lltype.cast_primitive(TYPE, x) diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/callparse.py pypy-5.0.1+dfsg/rpython/rtyper/callparse.py --- pypy-4.0.1+dfsg/rpython/rtyper/callparse.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/callparse.py 2016-03-19 16:40:12.000000000 +0000 @@ -59,7 +59,8 @@ try: holders = arguments.match_signature(signature, defs_h) except ArgErr, e: - raise TyperError("signature mismatch: %s" % e.getmsg(graph.name)) + raise TyperError("signature mismatch: %s: %s" % ( + graph.name, e.getmsg())) assert len(holders) == len(rinputs), "argument parsing mismatch" vlist = [] diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/debug.py pypy-5.0.1+dfsg/rpython/rtyper/debug.py --- pypy-4.0.1+dfsg/rpython/rtyper/debug.py 1970-01-01 00:00:00.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/debug.py 2016-03-19 16:40:12.000000000 +0000 @@ -0,0 +1,47 @@ +from rpython.rlib.objectmodel import we_are_translated +from rpython.rtyper.extregistry import ExtRegistryEntry +from rpython.rtyper.lltypesystem import lltype + +def ll_assert(x, msg): + """After translation to C, this becomes an RPyAssert.""" + assert type(x) is bool, "bad type! got %r" % (type(x),) + assert x, msg + +class Entry(ExtRegistryEntry): + _about_ = ll_assert + + def compute_result_annotation(self, s_x, s_msg): + assert s_msg.is_constant(), ("ll_assert(x, msg): " + "the msg must be constant") + return None + + def specialize_call(self, hop): + vlist = hop.inputargs(lltype.Bool, lltype.Void) + hop.exception_cannot_occur() + hop.genop('debug_assert', vlist) + +class FatalError(Exception): + pass + +def fatalerror(msg): + # print the RPython traceback and abort with a fatal error + if not we_are_translated(): + raise FatalError(msg) + from rpython.rtyper.lltypesystem import lltype + from rpython.rtyper.lltypesystem.lloperation import llop + llop.debug_print_traceback(lltype.Void) + llop.debug_fatalerror(lltype.Void, msg) +fatalerror._dont_inline_ = True +fatalerror._jit_look_inside_ = False +fatalerror._annenforceargs_ = [str] + +def fatalerror_notb(msg): + # a variant of fatalerror() that doesn't print the RPython traceback + if not we_are_translated(): + raise FatalError(msg) + from rpython.rtyper.lltypesystem import lltype + from rpython.rtyper.lltypesystem.lloperation import llop + llop.debug_fatalerror(lltype.Void, msg) +fatalerror_notb._dont_inline_ = True +fatalerror_notb._jit_look_inside_ = False +fatalerror_notb._annenforceargs_ = [str] diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/extfunc.py pypy-5.0.1+dfsg/rpython/rtyper/extfunc.py --- pypy-4.0.1+dfsg/rpython/rtyper/extfunc.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/extfunc.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,212 +1,105 @@ -from rpython.rtyper import extregistry -from rpython.rtyper.extregistry import ExtRegistryEntry -from rpython.rtyper.lltypesystem.lltype import typeOf, FuncType, functionptr -from rpython.annotator.model import unionof +from rpython.annotator.model import unionof, SomeObject from rpython.annotator.signature import annotation, SignatureError +from rpython.rtyper.extregistry import ExtRegistryEntry, lookup +from rpython.rtyper.lltypesystem.lltype import ( + typeOf, FuncType, functionptr, _ptr, Void) +from rpython.rtyper.error import TyperError +from rpython.rtyper.rmodel import Repr + +class SomeExternalFunction(SomeObject): + def __init__(self, name, args_s, s_result): + self.name = name + self.args_s = args_s + self.s_result = s_result + + def check_args(self, callspec): + params_s = self.args_s + args_s, kwargs = callspec.unpack() + if kwargs: + raise SignatureError( + "External functions cannot be called with keyword arguments") + if len(args_s) != len(params_s): + raise SignatureError("Argument number mismatch") + for i, s_param in enumerate(params_s): + arg = unionof(args_s[i], s_param) + if not s_param.contains(arg): + raise SignatureError( + "In call to external function %r:\n" + "arg %d must be %s,\n" + " got %s" % ( + self.name, i + 1, s_param, args_s[i])) + + def call(self, callspec): + self.check_args(callspec) + return self.s_result + + def rtyper_makerepr(self, rtyper): + if not self.is_constant(): + raise TyperError("Non-constant external function!") + entry = lookup(self.const) + impl = getattr(entry, 'lltypeimpl', None) + fakeimpl = getattr(entry, 'lltypefakeimpl', None) + return ExternalFunctionRepr(self, impl, fakeimpl) + + def rtyper_makekey(self): + return self.__class__, self + +class ExternalFunctionRepr(Repr): + lowleveltype = Void + + def __init__(self, s_func, impl, fakeimpl): + self.s_func = s_func + self.impl = impl + self.fakeimpl = fakeimpl -import py, sys - -class extdef(object): + def rtype_simple_call(self, hop): + rtyper = hop.rtyper + args_r = [rtyper.getrepr(s_arg) for s_arg in self.s_func.args_s] + r_result = rtyper.getrepr(self.s_func.s_result) + obj = self.get_funcptr(rtyper, args_r, r_result) + hop2 = hop.copy() + hop2.r_s_popfirstarg() + vlist = [hop2.inputconst(typeOf(obj), obj)] + hop2.inputargs(*args_r) + hop2.exception_is_here() + return hop2.genop('direct_call', vlist, r_result) - def __init__(self, *args, **kwds): - self.def_args = args - self.def_kwds = kwds - -def lazy_register(func_or_list, register_func): - """ Lazily register external function. Will create a function, - which explodes when llinterpd/translated, but does not explode - earlier - """ - if isinstance(func_or_list, list): - funcs = func_or_list - else: - funcs = [func_or_list] - try: - val = register_func() - if isinstance(val, extdef): - assert len(funcs) == 1 - register_external(funcs[0], *val.def_args, **val.def_kwds) - return - return val - except (SystemExit, MemoryError, KeyboardInterrupt): - raise - except: - exc, exc_inst, tb = sys.exc_info() - for func in funcs: - # if the function has already been registered and we got - # an exception afterwards, the ExtRaisingEntry would create - # a double-registration and crash in an AssertionError that - # masks the original problem. In this case, just re-raise now. - if extregistry.is_registered(func): - raise exc, exc_inst, tb - class ExtRaisingEntry(ExtRegistryEntry): - _about_ = func - def __getattr__(self, attr): - if attr == '_about_' or attr == '__dict__': - return super(ExtRegistryEntry, self).__getattr__(attr) - raise exc, exc_inst, tb - -def registering(func, condition=True): - if not condition: - return lambda method: None - - def decorator(method): - method._registering_func = func - return method - return decorator - -def registering_if(ns, name, condition=True): - try: - func = getattr(ns, name) - except AttributeError: - condition = False - func = None - - return registering(func, condition=condition) - -class LazyRegisteringMeta(type): - def __new__(self, _name, _type, _vars): - RegisteringClass = type.__new__(self, _name, _type, _vars) - allfuncs = [] - for varname in _vars: - attr = getattr(RegisteringClass, varname) - f = getattr(attr, '_registering_func', None) - if f: - allfuncs.append(f) - registering_inst = lazy_register(allfuncs, RegisteringClass) - if registering_inst is not None: - for varname in _vars: - attr = getattr(registering_inst, varname) - f = getattr(attr, '_registering_func', None) - if f: - lazy_register(f, attr) - RegisteringClass.instance = registering_inst - # override __init__ to avoid confusion - def raising(self): - raise TypeError("Cannot call __init__ directly, use cls.instance to access singleton") - RegisteringClass.__init__ = raising - return RegisteringClass - -class BaseLazyRegistering(object): - __metaclass__ = LazyRegisteringMeta - compilation_info = None - - def configure(self, CConfig): - classes_seen = self.__dict__.setdefault('__classes_seen', {}) - if CConfig in classes_seen: - return - from rpython.rtyper.tool import rffi_platform as platform - # copy some stuff - if self.compilation_info is None: - self.compilation_info = CConfig._compilation_info_ - else: - self.compilation_info = self.compilation_info.merge( - CConfig._compilation_info_) - self.__dict__.update(platform.configure(CConfig)) - classes_seen[CConfig] = True - - def llexternal(self, *args, **kwds): - kwds = kwds.copy() - from rpython.rtyper.lltypesystem import rffi - - if 'compilation_info' in kwds: - kwds['compilation_info'] = self.compilation_info.merge( - kwds['compilation_info']) + def get_funcptr(self, rtyper, args_r, r_result): + from rpython.rtyper.rtyper import llinterp_backend + args_ll = [r_arg.lowleveltype for r_arg in args_r] + ll_result = r_result.lowleveltype + name = self.s_func.name + if self.fakeimpl and rtyper.backend is llinterp_backend: + FT = FuncType(args_ll, ll_result) + return functionptr( + FT, name, _external_name=name, _callable=self.fakeimpl) + elif self.impl: + if isinstance(self.impl, _ptr): + return self.impl + else: + # store some attributes to the 'impl' function, where + # the eventual call to rtyper.getcallable() will find them + # and transfer them to the final lltype.functionptr(). + self.impl._llfnobjattrs_ = {'_name': name} + return rtyper.getannmixlevel().delayedfunction( + self.impl, self.s_func.args_s, self.s_func.s_result) else: - kwds['compilation_info'] = self.compilation_info - return rffi.llexternal(*args, **kwds) + fakeimpl = self.fakeimpl or self.s_func.const + FT = FuncType(args_ll, ll_result) + return functionptr( + FT, name, _external_name=name, _callable=fakeimpl) - def _freeze_(self): - return True class ExtFuncEntry(ExtRegistryEntry): safe_not_sandboxed = False - # common case: args is a list of annotation or types - def normalize_args(self, *args_s): - args = self.signature_args - signature_args = [annotation(arg, None) for arg in args] - assert len(args_s) == len(signature_args),\ - "Argument number mismatch" - - for i, expected in enumerate(signature_args): - arg = unionof(args_s[i], expected) - if not expected.contains(arg): - name = getattr(self, 'name', None) - if not name: - try: - name = self.instance.__name__ - except AttributeError: - name = '?' - raise SignatureError("In call to external function %r:\n" - "arg %d must be %s,\n" - " got %s" % ( - name, i+1, expected, args_s[i])) - return signature_args - - def compute_result_annotation(self, *args_s): - self.normalize_args(*args_s) # check arguments - return self.signature_result + def compute_annotation(self): + s_result = SomeExternalFunction( + self.name, self.signature_args, self.signature_result) + if (self.bookkeeper.annotator.translator.config.translation.sandbox + and not self.safe_not_sandboxed): + s_result.needs_sandboxing = True + return s_result - def specialize_call(self, hop): - rtyper = hop.rtyper - signature_args = self.normalize_args(*hop.args_s) - args_r = [rtyper.getrepr(s_arg) for s_arg in signature_args] - args_ll = [r_arg.lowleveltype for r_arg in args_r] - s_result = hop.s_result - r_result = rtyper.getrepr(s_result) - ll_result = r_result.lowleveltype - name = getattr(self, 'name', None) or self.instance.__name__ - impl = getattr(self, 'lltypeimpl', None) - fakeimpl = getattr(self, 'lltypefakeimpl', self.instance) - if impl: - if hasattr(self, 'lltypefakeimpl'): - # If we have both an llimpl and an llfakeimpl, - # we need a wrapper that selects the proper one and calls it - from rpython.tool.sourcetools import func_with_new_name - # Using '*args' is delicate because this wrapper is also - # created for init-time functions like llarena.arena_malloc - # which are called before the GC is fully initialized - args = ', '.join(['arg%d' % i for i in range(len(args_ll))]) - d = {'original_impl': impl, - 's_result': s_result, - 'fakeimpl': fakeimpl, - '__name__': __name__, - } - exec py.code.compile(""" - from rpython.rlib.objectmodel import running_on_llinterp - from rpython.rlib.debug import llinterpcall - from rpython.rlib.jit import dont_look_inside - # note: we say 'dont_look_inside' mostly because the - # JIT does not support 'running_on_llinterp', but in - # theory it is probably right to stop jitting anyway. - @dont_look_inside - def ll_wrapper(%s): - if running_on_llinterp: - return llinterpcall(s_result, fakeimpl, %s) - else: - return original_impl(%s) - """ % (args, args, args)) in d - impl = func_with_new_name(d['ll_wrapper'], name + '_wrapper') - if rtyper.annotator.translator.config.translation.sandbox: - impl._dont_inline_ = True - # store some attributes to the 'impl' function, where - # the eventual call to rtyper.getcallable() will find them - # and transfer them to the final lltype.functionptr(). - impl._llfnobjattrs_ = { - '_name': self.name, - '_safe_not_sandboxed': self.safe_not_sandboxed, - } - obj = rtyper.getannmixlevel().delayedfunction( - impl, signature_args, hop.s_result) - else: - FT = FuncType(args_ll, ll_result) - obj = functionptr(FT, name, _external_name=self.name, - _callable=fakeimpl, - _safe_not_sandboxed=self.safe_not_sandboxed) - vlist = [hop.inputconst(typeOf(obj), obj)] + hop.inputargs(*args_r) - hop.exception_is_here() - return hop.genop('direct_call', vlist, r_result) def register_external(function, args, result=None, export_name=None, llimpl=None, llfakeimpl=None, sandboxsafe=False): @@ -222,34 +115,20 @@ if export_name is None: export_name = function.__name__ + params_s = [annotation(arg) for arg in args] + s_result = annotation(result) class FunEntry(ExtFuncEntry): _about_ = function safe_not_sandboxed = sandboxsafe - - if args is None: - def normalize_args(self, *args_s): - return args_s # accept any argument unmodified - elif callable(args): - # custom annotation normalizer (see e.g. os.utime()) - normalize_args = staticmethod(args) - else: # use common case behavior - signature_args = args - - signature_result = annotation(result, None) + signature_args = params_s + signature_result = s_result name = export_name if llimpl: lltypeimpl = staticmethod(llimpl) if llfakeimpl: lltypefakeimpl = staticmethod(llfakeimpl) - if export_name: - FunEntry.__name__ = export_name - else: - FunEntry.__name__ = function.func_name - -BaseLazyRegistering.register = staticmethod(register_external) - def is_external(func): if hasattr(func, 'value'): func = func.value diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/extfuncregistry.py pypy-5.0.1+dfsg/rpython/rtyper/extfuncregistry.py --- pypy-4.0.1+dfsg/rpython/rtyper/extfuncregistry.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/extfuncregistry.py 2016-03-19 16:40:12.000000000 +0000 @@ -2,14 +2,15 @@ from rpython.rtyper.extfunc import register_external +# Register replacement functions for builtin functions +from rpython.rlib import rposix, rposix_stat, rposix_environ +from rpython.rlib import rtime + # ___________________________ # math functions import math from rpython.rtyper.lltypesystem.module import ll_math -from rpython.rtyper.module import ll_os -from rpython.rtyper.module import ll_time -from rpython.rtyper.module import ll_pdb from rpython.rlib import rfloat # the following functions all take one float, return one float diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/llinterp.py pypy-5.0.1+dfsg/rpython/rtyper/llinterp.py --- pypy-4.0.1+dfsg/rpython/rtyper/llinterp.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/llinterp.py 2016-03-19 16:40:15.000000000 +0000 @@ -925,6 +925,21 @@ def op_gc_gcflag_extra(self, subopnum, *args): return self.heap.gcflag_extra(subopnum, *args) + def op_gc_rawrefcount_init(self, *args): + raise NotImplementedError("gc_rawrefcount_init") + + def op_gc_rawrefcount_to_obj(self, *args): + raise NotImplementedError("gc_rawrefcount_to_obj") + + def op_gc_rawrefcount_from_obj(self, *args): + raise NotImplementedError("gc_rawrefcount_from_obj") + + def op_gc_rawrefcount_create_link_pyobj(self, *args): + raise NotImplementedError("gc_rawrefcount_create_link_pyobj") + + def op_gc_rawrefcount_create_link_pypy(self, *args): + raise NotImplementedError("gc_rawrefcount_create_link_pypy") + def op_do_malloc_fixedsize(self): raise NotImplementedError("do_malloc_fixedsize") def op_do_malloc_fixedsize_clear(self): @@ -950,6 +965,13 @@ return self.op_raw_load(RESTYPE, _address_of_thread_local(), offset) op_threadlocalref_get.need_result_type = True + def op_threadlocalref_acquire(self, prev): + raise NotImplementedError + def op_threadlocalref_release(self, prev): + raise NotImplementedError + def op_threadlocalref_enum(self, prev): + raise NotImplementedError + # __________________________________________________________ # operations on addresses diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/ll2ctypes.py pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/ll2ctypes.py --- pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/ll2ctypes.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/ll2ctypes.py 2016-03-19 16:40:12.000000000 +0000 @@ -426,7 +426,12 @@ else: n = None cstruct = cls._malloc(n) - add_storage(container, _struct_mixin, ctypes.pointer(cstruct)) + + if isinstance(container, lltype._fixedsizearray): + cls_mixin = _fixedsizedarray_mixin + else: + cls_mixin = _struct_mixin + add_storage(container, cls_mixin, ctypes.pointer(cstruct)) if delayed_converters is None: delayed_converters_was_None = True @@ -463,6 +468,9 @@ def remove_regular_struct_content(container): STRUCT = container._TYPE + if isinstance(STRUCT, lltype.FixedSizeArray): + del container._items + return for field_name in STRUCT._names: FIELDTYPE = getattr(STRUCT, field_name) if not isinstance(FIELDTYPE, lltype.ContainerType): @@ -503,7 +511,11 @@ def struct_use_ctypes_storage(container, ctypes_storage): STRUCT = container._TYPE assert isinstance(STRUCT, lltype.Struct) - add_storage(container, _struct_mixin, ctypes_storage) + if isinstance(container, lltype._fixedsizearray): + cls_mixin = _fixedsizedarray_mixin + else: + cls_mixin = _struct_mixin + add_storage(container, cls_mixin, ctypes_storage) remove_regular_struct_content(container) for field_name in STRUCT._names: FIELDTYPE = getattr(STRUCT, field_name) @@ -515,8 +527,10 @@ struct_use_ctypes_storage(struct_container, struct_storage) struct_container._setparentstructure(container, field_name) elif isinstance(FIELDTYPE, lltype.Array): - assert FIELDTYPE._hints.get('nolength', False) == False - arraycontainer = _array_of_known_length(FIELDTYPE) + if FIELDTYPE._hints.get('nolength', False): + arraycontainer = _array_of_unknown_length(FIELDTYPE) + else: + arraycontainer = _array_of_known_length(FIELDTYPE) arraycontainer._storage = ctypes.pointer( getattr(ctypes_storage.contents, field_name)) arraycontainer._setparentstructure(container, field_name) @@ -528,6 +542,7 @@ # Ctypes-aware subclasses of the _parentable classes ALLOCATED = {} # mapping {address: _container} +DEBUG_ALLOCATED = False def get_common_subclass(cls1, cls2, cache={}): """Return a unique subclass with (cls1, cls2) as bases.""" @@ -567,6 +582,8 @@ raise Exception("internal ll2ctypes error - " "double conversion from lltype to ctypes?") # XXX don't store here immortal structures + if DEBUG_ALLOCATED: + print >> sys.stderr, "LL2CTYPES:", hex(addr) ALLOCATED[addr] = self def _addressof_storage(self): @@ -579,6 +596,8 @@ self._check() # no double-frees # allow the ctypes object to go away now addr = ctypes.cast(self._storage, ctypes.c_void_p).value + if DEBUG_ALLOCATED: + print >> sys.stderr, "LL2C FREE:", hex(addr) try: del ALLOCATED[addr] except KeyError: @@ -613,11 +632,14 @@ return object.__hash__(self) def __repr__(self): + if '__str__' in self._TYPE._adtmeths: + r = self._TYPE._adtmeths['__str__'](self) + else: + r = 'C object %s' % (self._TYPE,) if self._storage is None: - return '' % (self._TYPE,) + return '' % (r,) else: - return '' % (self._TYPE, - fixid(self._addressof_storage())) + return '<%s at 0x%x>' % (r, fixid(self._addressof_storage())) def __str__(self): return repr(self) @@ -642,6 +664,45 @@ cobj = lltype2ctypes(value) setattr(self._storage.contents, field_name, cobj) +class _fixedsizedarray_mixin(_parentable_mixin): + """Mixin added to _fixedsizearray containers when they become ctypes-based.""" + __slots__ = () + + def __getattr__(self, field_name): + if hasattr(self, '_items'): + obj = lltype._fixedsizearray.__getattr__.im_func(self, field_name) + return obj + else: + cobj = getattr(self._storage.contents, field_name) + T = getattr(self._TYPE, field_name) + return ctypes2lltype(T, cobj) + + def __setattr__(self, field_name, value): + if field_name.startswith('_'): + object.__setattr__(self, field_name, value) # '_xxx' attributes + else: + cobj = lltype2ctypes(value) + if hasattr(self, '_items'): + lltype._fixedsizearray.__setattr__.im_func(self, field_name, cobj) + else: + setattr(self._storage.contents, field_name, cobj) + + + def getitem(self, index, uninitialized_ok=False): + if hasattr(self, '_items'): + obj = lltype._fixedsizearray.getitem.im_func(self, + index, uninitialized_ok=uninitialized_ok) + return obj + else: + return getattr(self, 'item%d' % index) + + def setitem(self, index, value): + cobj = lltype2ctypes(value) + if hasattr(self, '_items'): + lltype._fixedsizearray.setitem.im_func(self, index, value) + else: + setattr(self, 'item%d' % index, cobj) + class _array_mixin(_parentable_mixin): """Mixin added to _array containers when they become ctypes-based.""" __slots__ = () @@ -902,6 +963,14 @@ llobj = ctypes.sizeof(get_ctypes_type(llobj.TYPE)) * llobj.repeat elif isinstance(llobj, ComputedIntSymbolic): llobj = llobj.compute_fn() + elif isinstance(llobj, llmemory.CompositeOffset): + llobj = sum([lltype2ctypes(c) for c in llobj.offsets]) + elif isinstance(llobj, llmemory.FieldOffset): + CSTRUCT = get_ctypes_type(llobj.TYPE) + llobj = getattr(CSTRUCT, llobj.fldname).offset + elif isinstance(llobj, llmemory.ArrayItemsOffset): + CARRAY = get_ctypes_type(llobj.TYPE) + llobj = CARRAY.items.offset else: raise NotImplementedError(llobj) # don't know about symbolic value @@ -934,7 +1003,8 @@ REAL_TYPE = T.TO if T.TO._arrayfld is not None: carray = getattr(cobj.contents, T.TO._arrayfld) - container = lltype._struct(T.TO, carray.length) + length = getattr(carray, 'length', 9999) # XXX + container = lltype._struct(T.TO, length) else: # special treatment of 'OBJECT' subclasses if get_rtyper() and lltype._castdepth(REAL_TYPE, OBJECT) >= 0: diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/lloperation.py pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/lloperation.py --- pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/lloperation.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/lloperation.py 2016-03-19 16:40:12.000000000 +0000 @@ -417,6 +417,7 @@ 'raw_load': LLOp(sideeffects=False, canrun=True), 'raw_store': LLOp(canrun=True), 'bare_raw_store': LLOp(), + 'gc_load_indexed': LLOp(sideeffects=False, canrun=True), 'stack_malloc': LLOp(), # mmh 'track_alloc_start': LLOp(), 'track_alloc_stop': LLOp(), @@ -452,6 +453,8 @@ 'jit_record_exact_class' : LLOp(canrun=True), 'jit_ffi_save_result': LLOp(canrun=True), 'jit_conditional_call': LLOp(), + 'jit_enter_portal_frame': LLOp(canrun=True), + 'jit_leave_portal_frame': LLOp(canrun=True), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize':LLOp(canmallocgc=True), @@ -502,6 +505,12 @@ 'gc_gcflag_extra' : LLOp(), 'gc_add_memory_pressure': LLOp(), + 'gc_rawrefcount_init': LLOp(), + 'gc_rawrefcount_create_link_pypy': LLOp(), + 'gc_rawrefcount_create_link_pyobj': LLOp(), + 'gc_rawrefcount_from_obj': LLOp(sideeffects=False), + 'gc_rawrefcount_to_obj': LLOp(sideeffects=False), + # ------- JIT & GC interaction, only for some GCs ---------- 'gc_adr_of_nursery_free' : LLOp(), @@ -544,8 +553,11 @@ 'getslice': LLOp(canraise=(Exception,)), 'check_and_clear_exc': LLOp(), - 'threadlocalref_addr': LLOp(sideeffects=False), # get (or make) addr of tl + 'threadlocalref_addr': LLOp(), # get (or make) addr of tl 'threadlocalref_get': LLOp(sideeffects=False), # read field (no check) + 'threadlocalref_acquire': LLOp(), # lock for enum + 'threadlocalref_release': LLOp(), # lock for enum + 'threadlocalref_enum': LLOp(sideeffects=False), # enum all threadlocalrefs # __________ debugging __________ 'debug_view': LLOp(), diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/lltype.py pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/lltype.py --- pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/lltype.py 2015-11-19 19:21:43.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/lltype.py 2016-03-19 16:40:12.000000000 +0000 @@ -1469,7 +1469,10 @@ result = intmask(obj._getid()) # assume that id() returns an addressish value which is # not zero and aligned to at least a multiple of 4 - assert result != 0 and (result & 3) == 0 + # (at least for GC pointers; we can't really assume anything + # for raw addresses) + if self._T._gckind == 'gc': + assert result != 0 and (result & 3) == 0 return result def _cast_to_adr(self): @@ -1758,7 +1761,10 @@ def __new__(self, TYPE, n=None, initialization=None, parent=None, parentindex=None): - my_variety = _struct_variety(TYPE._names) + if isinstance(TYPE, FixedSizeArray): + my_variety = _fixedsizearray + else: + my_variety = _struct_variety(TYPE._names) return object.__new__(my_variety) def __init__(self, TYPE, n=None, initialization=None, parent=None, @@ -1768,7 +1774,6 @@ raise TypeError("%r is not variable-sized" % (TYPE,)) if n is None and TYPE._arrayfld is not None: raise TypeError("%r is variable-sized" % (TYPE,)) - first, FIRSTTYPE = TYPE._first_struct() for fld, typ in TYPE._flds.items(): if fld == TYPE._arrayfld: value = _array(typ, n, initialization=initialization, @@ -1811,23 +1816,48 @@ raise UninitializedMemoryAccess("%r.%s"%(self, field_name)) return r - # for FixedSizeArray kind of structs: + +class _fixedsizearray(_struct): + def __init__(self, TYPE, n=None, initialization=None, parent=None, + parentindex=None): + _parentable.__init__(self, TYPE) + if n is not None: + raise TypeError("%r is not variable-sized" % (TYPE,)) + typ = TYPE.OF + storage = [] + for i, fld in enumerate(TYPE._names): + value = typ._allocate(initialization=initialization, + parent=self, parentindex=fld) + storage.append(value) + self._items = storage + if parent is not None: + self._setparentstructure(parent, parentindex) def getlength(self): - assert isinstance(self._TYPE, FixedSizeArray) return self._TYPE.length def getbounds(self): return 0, self.getlength() def getitem(self, index, uninitialized_ok=False): - assert isinstance(self._TYPE, FixedSizeArray) - return self._getattr('item%d' % index, uninitialized_ok) + assert 0 <= index < self.getlength() + return self._items[index] def setitem(self, index, value): - assert isinstance(self._TYPE, FixedSizeArray) - setattr(self, 'item%d' % index, value) + assert 0 <= index < self.getlength() + self._items[index] = value + def __getattr__(self, name): + # obscure + if name.startswith("item"): + return self.getitem(int(name[len('item'):])) + return _struct.__getattr__(self, name) + + def __setattr__(self, name, value): + if name.startswith("item"): + self.setitem(int(name[len('item'):]), value) + return + _struct.__setattr__(self, name, value) class _array(_parentable): _kind = "array" diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/module/ll_math.py pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/module/ll_math.py --- pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/module/ll_math.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/module/ll_math.py 2016-03-19 16:40:12.000000000 +0000 @@ -6,8 +6,8 @@ from rpython.translator import cdir from rpython.rlib import jit, rposix from rpython.rlib.rfloat import INFINITY, NAN, isfinite, isinf, isnan +from rpython.rlib.rposix import UNDERSCORE_ON_WIN32 from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rtyper.module.support import UNDERSCORE_ON_WIN32 from rpython.tool.sourcetools import func_with_new_name from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/module/ll_os_path.py pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/module/ll_os_path.py --- pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/module/ll_os_path.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/module/ll_os_path.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -from rpython.rtyper.module.support import LLSupport -from rpython.rtyper.module.ll_os_path import BaseOsPath - -class Implementation(BaseOsPath, LLSupport): - pass - - diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/opimpl.py pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/opimpl.py --- pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/opimpl.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/opimpl.py 2016-03-19 16:40:12.000000000 +0000 @@ -624,6 +624,12 @@ def op_jit_ffi_save_result(*args): pass +def op_jit_enter_portal_frame(x): + pass + +def op_jit_leave_portal_frame(): + pass + def op_get_group_member(TYPE, grpptr, memberoffset): from rpython.rtyper.lltypesystem import llgroup assert isinstance(memberoffset, llgroup.GroupMemberOffset) @@ -702,6 +708,17 @@ return p[0] op_raw_load.need_result_type = True +def op_gc_load_indexed(TVAL, p, index, scale, base_ofs): + # 'base_ofs' should be a CompositeOffset(..., ArrayItemsOffset). + # 'scale' should be a llmemory.sizeof(). + from rpython.rtyper.lltypesystem import rffi + ofs = base_ofs + scale * index + if isinstance(ofs, int): + return op_raw_load(TVAL, p, ofs) + p = rffi.cast(rffi.CArrayPtr(TVAL), llmemory.cast_ptr_to_adr(p) + ofs) + return p[0] +op_gc_load_indexed.need_result_type = True + def op_likely(x): assert isinstance(x, bool) return x diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/rbuilder.py pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/rbuilder.py --- pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/rbuilder.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/rbuilder.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,7 +1,7 @@ from rpython.rlib import rgc, jit from rpython.rlib.objectmodel import enforceargs from rpython.rlib.rarithmetic import ovfcheck, r_uint, intmask -from rpython.rlib.debug import ll_assert +from rpython.rtyper.debug import ll_assert from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.rptr import PtrRepr from rpython.rtyper.lltypesystem import lltype, rffi, rstr @@ -11,7 +11,7 @@ from rpython.rtyper.rbuilder import AbstractStringBuilderRepr from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.annlowlevel import llstr, llunicode - + # ------------------------------------------------------------ diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/rbytearray.py pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/rbytearray.py --- pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/rbytearray.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/rbytearray.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,7 +1,7 @@ from rpython.rtyper.rbytearray import AbstractByteArrayRepr from rpython.rtyper.lltypesystem import lltype, rstr -from rpython.rlib.debug import ll_assert +from rpython.rtyper.debug import ll_assert BYTEARRAY = lltype.GcForwardReference() diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/rdict.py pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/rdict.py --- pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/rdict.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/rdict.py 2016-03-19 16:40:12.000000000 +0000 @@ -4,7 +4,7 @@ from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib import objectmodel, jit -from rpython.rlib.debug import ll_assert +from rpython.rtyper.debug import ll_assert from rpython.rlib.rarithmetic import r_uint, intmask, LONG_BIT from rpython.rtyper import rmodel from rpython.rtyper.error import TyperError diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/rffi.py pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/rffi.py --- pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/rffi.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/rffi.py 2016-03-19 16:40:12.000000000 +0000 @@ -169,9 +169,9 @@ argnames = ', '.join(['a%d' % i for i in range(len(args))]) source = py.code.Source(""" + from rpython.rlib import rgil def call_external_function(%(argnames)s): - before = aroundstate.before - if before: before() + rgil.release() # NB. it is essential that no exception checking occurs here! if %(save_err)d: from rpython.rlib import rposix @@ -180,12 +180,10 @@ if %(save_err)d: from rpython.rlib import rposix rposix._errno_after(%(save_err)d) - after = aroundstate.after - if after: after() + rgil.acquire() return res """ % locals()) - miniglobals = {'aroundstate': aroundstate, - 'funcptr': funcptr, + miniglobals = {'funcptr': funcptr, '__name__': __name__, # for module name propagation } exec source.compile() in miniglobals @@ -205,7 +203,7 @@ # don't inline, as a hack to guarantee that no GC pointer is alive # anywhere in call_external_function else: - # if we don't have to invoke the aroundstate, we can just call + # if we don't have to invoke the GIL handling, we can just call # the low-level function pointer carelessly if macro is None and save_err == RFFI_ERR_NONE: call_external_function = funcptr @@ -270,13 +268,10 @@ freeme = arg elif _isfunctype(TARGET) and not _isllptr(arg): # XXX pass additional arguments - if invoke_around_handlers: - arg = llhelper(TARGET, _make_wrapper_for(TARGET, arg, - callbackholder, - aroundstate)) - else: - arg = llhelper(TARGET, _make_wrapper_for(TARGET, arg, - callbackholder)) + use_gil = invoke_around_handlers + arg = llhelper(TARGET, _make_wrapper_for(TARGET, arg, + callbackholder, + use_gil)) else: SOURCE = lltype.typeOf(arg) if SOURCE != TARGET: @@ -315,7 +310,7 @@ def __init__(self): self.callbacks = {} -def _make_wrapper_for(TP, callable, callbackholder=None, aroundstate=None): +def _make_wrapper_for(TP, callable, callbackholder, use_gil): """ Function creating wrappers for callbacks. Note that this is cheating as we assume constant callbacks and we just memoize wrappers """ @@ -330,11 +325,13 @@ callbackholder.callbacks[callable] = True args = ', '.join(['a%d' % i for i in range(len(TP.TO.ARGS))]) source = py.code.Source(r""" + rgil = None + if use_gil: + from rpython.rlib import rgil + def wrapper(%(args)s): # no *args - no GIL for mallocing the tuple - if aroundstate is not None: - after = aroundstate.after - if after: - after() + if rgil is not None: + rgil.acquire() # from now on we hold the GIL stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py @@ -349,13 +346,11 @@ traceback.print_exc() result = errorcode stackcounter.stacks_counter -= 1 - if aroundstate is not None: - before = aroundstate.before - if before: - before() + if rgil is not None: + rgil.release() # here we don't hold the GIL any more. As in the wrapper() produced # by llexternal, it is essential that no exception checking occurs - # after the call to before(). + # after the call to rgil.release(). return result """ % locals()) miniglobals = locals().copy() @@ -369,13 +364,6 @@ AroundFnPtr = lltype.Ptr(lltype.FuncType([], lltype.Void)) -class AroundState: - def _cleanup_(self): - self.before = None # or a regular RPython function - self.after = None # or a regular RPython function -aroundstate = AroundState() -aroundstate._cleanup_() - class StackCounter: def _cleanup_(self): self.stacks_counter = 0 # number of "stack pieces": callbacks @@ -643,7 +631,8 @@ def CExternVariable(TYPE, name, eci, _CConstantClass=CConstant, sandboxsafe=False, _nowrapper=False, - c_type=None, getter_only=False): + c_type=None, getter_only=False, + declare_as_extern=(sys.platform != 'win32')): """Return a pair of functions - a getter and a setter - to access the given global C variable. """ @@ -673,7 +662,7 @@ c_setter = "void %(setter_name)s (%(c_type)s v) { %(name)s = v; }" % locals() lines = ["#include <%s>" % i for i in eci.includes] - if sys.platform != 'win32': + if declare_as_extern: lines.append('extern %s %s;' % (c_type, name)) lines.append(c_getter) if not getter_only: @@ -802,6 +791,12 @@ return length str2chararray._annenforceargs_ = [strtype, None, int] + # s[start:start+length] -> already-existing char[], + # all characters including zeros + def str2rawmem(s, array, start, length): + ll_s = llstrtype(s) + copy_string_to_raw(ll_s, array, start, length) + # char* -> str # doesn't free char* def charp2str(cp): @@ -952,19 +947,19 @@ return (str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, alloc_buffer, str_from_buffer, keep_buffer_alive_until_here, - charp2strn, charpsize2str, str2chararray, + charp2strn, charpsize2str, str2chararray, str2rawmem, ) (str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, alloc_buffer, str_from_buffer, keep_buffer_alive_until_here, - charp2strn, charpsize2str, str2chararray, + charp2strn, charpsize2str, str2chararray, str2rawmem, ) = make_string_mappings(str) (unicode2wcharp, free_wcharp, wcharp2unicode, get_nonmoving_unicodebuffer, free_nonmoving_unicodebuffer, alloc_unicodebuffer, unicode_from_buffer, keep_unicodebuffer_alive_until_here, - wcharp2unicoden, wcharpsize2unicode, unicode2wchararray, + wcharp2unicoden, wcharpsize2unicode, unicode2wchararray, unicode2rawmem, ) = make_string_mappings(unicode) # char** @@ -979,7 +974,7 @@ array[len(l)] = lltype.nullptr(CCHARP.TO) return array liststr2charpp._annenforceargs_ = [[annmodel.s_Str0]] # List of strings -# Make a copy for the ll_os.py module +# Make a copy for rposix.py ll_liststr2charpp = func_with_new_name(liststr2charpp, 'll_liststr2charpp') def free_charpp(ref): diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/rlist.py pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/rlist.py --- pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/rlist.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/rlist.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,5 +1,5 @@ from rpython.rlib import rgc, jit, types -from rpython.rlib.debug import ll_assert +from rpython.rtyper.debug import ll_assert from rpython.rlib.signature import signature from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import rstr diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/rordereddict.py pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/rordereddict.py --- pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/rordereddict.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/rordereddict.py 2016-03-19 16:40:12.000000000 +0000 @@ -6,7 +6,7 @@ from rpython.rlib import objectmodel, jit, rgc, types from rpython.rlib.signature import signature from rpython.rlib.objectmodel import specialize, likely -from rpython.rlib.debug import ll_assert +from rpython.rtyper.debug import ll_assert from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rtyper import rmodel from rpython.rtyper.error import TyperError diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/rstr.py pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/rstr.py --- pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/rstr.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/rstr.py 2016-03-19 16:40:12.000000000 +0000 @@ -2,12 +2,12 @@ from rpython.annotator import model as annmodel from rpython.rlib import jit, types -from rpython.rlib.debug import ll_assert from rpython.rlib.objectmodel import (malloc_zero_filled, we_are_translated, _hash_string, keepalive_until_here, specialize, enforceargs) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.error import TyperError +from rpython.rtyper.debug import ll_assert from rpython.rtyper.lltypesystem import ll_str, llmemory from rpython.rtyper.lltypesystem.lltype import (GcStruct, Signed, Array, Char, UniChar, Ptr, malloc, Bool, Void, GcArray, nullptr, cast_primitive, @@ -60,6 +60,13 @@ @signature(types.any(), types.any(), types.int(), returns=types.any()) @specialize.arg(0) def _get_raw_buf(TP, src, ofs): + """ + WARNING: dragons ahead. + Return the address of the internal char* buffer of the low level + string. The return value is valid as long as no GC operation occur, so + you must ensure that it will be used inside a "GC safe" section, for + example by marking your function with @rgc.no_collect + """ assert typeOf(src).TO == TP assert ofs >= 0 return llmemory.cast_ptr_to_adr(src) + _str_ofs(TP, ofs) @@ -131,9 +138,13 @@ return copy_string_to_raw, copy_raw_to_string, copy_string_contents -copy_string_to_raw, copy_raw_to_string, copy_string_contents = _new_copy_contents_fun(STR, STR, Char, 'string') -copy_unicode_to_raw, copy_raw_to_unicode, copy_unicode_contents = _new_copy_contents_fun(UNICODE, UNICODE, - UniChar, 'unicode') +(copy_string_to_raw, + copy_raw_to_string, + copy_string_contents) = _new_copy_contents_fun(STR, STR, Char, 'string') + +(copy_unicode_to_raw, + copy_raw_to_unicode, + copy_unicode_contents) = _new_copy_contents_fun(UNICODE, UNICODE, UniChar, 'unicode') CONST_STR_CACHE = WeakValueDictionary() CONST_UNICODE_CACHE = WeakValueDictionary() @@ -706,10 +717,7 @@ return cls.ll_count_char(s1, s2.chars[0], start, end) res = cls.ll_search(s1, s2, start, end, FAST_COUNT) - # For a few cases ll_search can return -1 to indicate an "impossible" - # condition for a string match, count just returns 0 in these cases. - if res < 0: - res = 0 + assert res >= 0 return res @staticmethod @@ -730,6 +738,8 @@ w = n - m if w < 0: + if mode == FAST_COUNT: + return 0 return -1 mlast = m - 1 diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py --- pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/test/test_ll2ctypes.py 2016-03-19 16:40:15.000000000 +0000 @@ -11,12 +11,12 @@ from rpython.rtyper.lltypesystem.ll2ctypes import _llgcopaque from rpython.rtyper.annlowlevel import llhelper from rpython.rlib import rposix +from rpython.rlib.rposix import UNDERSCORE_ON_WIN32 from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator import cdir from rpython.tool.udir import udir from rpython.rtyper.test.test_llinterp import interpret from rpython.annotator.annrpython import RPythonAnnotator -from rpython.rtyper.module.support import UNDERSCORE_ON_WIN32 from rpython.rtyper.rtyper import RPythonTyper from rpython.rlib.rarithmetic import r_uint, get_long_pattern, is_emulated_long from rpython.rlib.rarithmetic import is_valid_int @@ -1461,3 +1461,20 @@ assert a[3].a == 17 #lltype.free(a, flavor='raw') py.test.skip("free() not working correctly here...") + + def test_fixedsizedarray_to_ctypes(self): + T = lltype.Ptr(rffi.CFixedArray(rffi.INT, 1)) + inst = lltype.malloc(T.TO, flavor='raw') + inst[0] = rffi.cast(rffi.INT, 42) + assert inst[0] == 42 + cinst = lltype2ctypes(inst) + assert rffi.cast(lltype.Signed, inst[0]) == 42 + assert cinst.contents.item0 == 42 + lltype.free(inst, flavor='raw') + + def test_fixedsizedarray_to_ctypes(self): + T = lltype.Ptr(rffi.CFixedArray(rffi.CHAR, 123)) + inst = lltype.malloc(T.TO, flavor='raw', zero=True) + cinst = lltype2ctypes(inst) + assert cinst.contents.item0 == 0 + lltype.free(inst, flavor='raw') diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/test/test_rffi.py pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/test/test_rffi.py --- pypy-4.0.1+dfsg/rpython/rtyper/lltypesystem/test/test_rffi.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/lltypesystem/test/test_rffi.py 2016-03-19 16:40:12.000000000 +0000 @@ -688,42 +688,6 @@ assert interpret(f, []) == 4 - def test_around_extcall(self): - if sys.platform == "win32": - py.test.skip('No pipes on windows') - import os - from rpython.annotator import model as annmodel - from rpython.rlib.objectmodel import invoke_around_extcall - from rpython.rtyper.extfuncregistry import register_external - read_fd, write_fd = os.pipe() - try: - # we need an external function that is not going to get wrapped around - # before()/after() calls, in order to call it from before()/after()... - def mywrite(s): - os.write(write_fd, s) - def llimpl(s): - s = ''.join(s.chars) - os.write(write_fd, s) - register_external(mywrite, [str], annmodel.s_None, 'll_mywrite', - llfakeimpl=llimpl, sandboxsafe=True) - - def before(): - mywrite("B") - def after(): - mywrite("A") - def f(): - os.write(write_fd, "-") - invoke_around_extcall(before, after) - os.write(write_fd, "E") - - interpret(f, []) - data = os.read(read_fd, 99) - assert data == "-BEA" - - finally: - os.close(write_fd) - os.close(read_fd) - def test_external_callable(self): """ Try to call some llexternal function with llinterp """ diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/__init__.py pypy-5.0.1+dfsg/rpython/rtyper/module/__init__.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/__init__.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -# diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/ll_os_environ.py pypy-5.0.1+dfsg/rpython/rtyper/module/ll_os_environ.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/ll_os_environ.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/ll_os_environ.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,221 +0,0 @@ -import os -import sys -from rpython.annotator import model as annmodel -from rpython.rtyper.controllerentry import Controller -from rpython.rtyper.extfunc import register_external -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.module.support import _WIN32, StringTraits, UnicodeTraits -from rpython.translator.tool.cbuild import ExternalCompilationInfo - -str0 = annmodel.s_Str0 - -# ____________________________________________________________ -# -# Annotation support to control access to 'os.environ' in the RPython -# program - -class OsEnvironController(Controller): - knowntype = os.environ.__class__ - - def convert(self, obj): - # 'None' is good enough, there is only one os.environ - return None - - def getitem(self, obj, key): - # in the RPython program reads of 'os.environ[key]' are - # redirected here - result = r_getenv(key) - if result is None: - raise KeyError - return result - - def setitem(self, obj, key, value): - # in the RPython program, 'os.environ[key] = value' is - # redirected here - r_putenv(key, value) - - def delitem(self, obj, key): - # in the RPython program, 'del os.environ[key]' is redirected - # here - absent = r_getenv(key) is None - # Always call unsetenv(), to get eventual OSErrors - r_unsetenv(key) - if absent: - raise KeyError - - def get_keys(self, obj): - # 'os.environ.keys' is redirected here - note that it's the - # getattr that arrives here, not the actual method call! - return r_envkeys - - def get_items(self, obj): - # 'os.environ.items' is redirected here (not the actual method - # call!) - return r_envitems - - def get_get(self, obj): - # 'os.environ.get' is redirected here (not the actual method - # call!) - return r_getenv - -# ____________________________________________________________ -# Access to the 'environ' external variable -prefix = '' -if sys.platform.startswith('darwin'): - CCHARPPP = rffi.CArrayPtr(rffi.CCHARPP) - _os_NSGetEnviron = rffi.llexternal( - '_NSGetEnviron', [], CCHARPPP, - compilation_info=ExternalCompilationInfo(includes=['crt_externs.h']) - ) - def os_get_environ(): - return _os_NSGetEnviron()[0] -elif _WIN32: - eci = ExternalCompilationInfo(includes=['stdlib.h']) - CWCHARPP = lltype.Ptr(lltype.Array(rffi.CWCHARP, hints={'nolength': True})) - - os_get_environ, _os_set_environ = rffi.CExternVariable( - rffi.CCHARPP, '_environ', eci) - get__wenviron, _set__wenviron = rffi.CExternVariable( - CWCHARPP, '_wenviron', eci, c_type='wchar_t **') - prefix = '_' -else: - os_get_environ, _os_set_environ = rffi.CExternVariable( - rffi.CCHARPP, 'environ', ExternalCompilationInfo()) - -# ____________________________________________________________ -# -# Lower-level interface: dummy placeholders and external registations - -def r_envkeys(): - just_a_placeholder - -def envkeys_llimpl(): - environ = os_get_environ() - result = [] - i = 0 - while environ[i]: - name_value = rffi.charp2str(environ[i]) - p = name_value.find('=') - if p >= 0: - result.append(name_value[:p]) - i += 1 - return result - -register_external(r_envkeys, [], [str0], # returns a list of strings - export_name='ll_os.ll_os_envkeys', - llimpl=envkeys_llimpl) - -# ____________________________________________________________ - -def r_envitems(): - just_a_placeholder - -def r_getenv(name): - just_a_placeholder # should return None if name not found - -def r_putenv(name, value): - just_a_placeholder - -os_getenv = rffi.llexternal('getenv', [rffi.CCHARP], rffi.CCHARP, - releasegil=False) -os_putenv = rffi.llexternal(prefix + 'putenv', [rffi.CCHARP], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) -if _WIN32: - _wgetenv = rffi.llexternal('_wgetenv', [rffi.CWCHARP], rffi.CWCHARP, - compilation_info=eci, releasegil=False) - _wputenv = rffi.llexternal('_wputenv', [rffi.CWCHARP], rffi.INT, - compilation_info=eci, - save_err=rffi.RFFI_SAVE_LASTERROR) - -class EnvKeepalive: - pass -envkeepalive = EnvKeepalive() -envkeepalive.byname = {} -envkeepalive.bywname = {} - -def make_env_impls(win32=False): - if not win32: - traits = StringTraits() - get_environ, getenv, putenv = os_get_environ, os_getenv, os_putenv - byname, eq = envkeepalive.byname, '=' - def last_error(msg): - from rpython.rlib import rposix - raise OSError(rposix.get_saved_errno(), msg) - else: - traits = UnicodeTraits() - get_environ, getenv, putenv = get__wenviron, _wgetenv, _wputenv - byname, eq = envkeepalive.bywname, u'=' - from rpython.rlib.rwin32 import lastSavedWindowsError as last_error - - def envitems_llimpl(): - environ = get_environ() - result = [] - i = 0 - while environ[i]: - name_value = traits.charp2str(environ[i]) - p = name_value.find(eq) - if p >= 0: - result.append((name_value[:p], name_value[p+1:])) - i += 1 - return result - - def getenv_llimpl(name): - with traits.scoped_str2charp(name) as l_name: - l_result = getenv(l_name) - return traits.charp2str(l_result) if l_result else None - - def putenv_llimpl(name, value): - l_string = traits.str2charp(name + eq + value) - error = rffi.cast(lltype.Signed, putenv(l_string)) - if error: - traits.free_charp(l_string) - last_error("putenv failed") - # keep 'l_string' alive - we know that the C library needs it - # until the next call to putenv() with the same 'name'. - l_oldstring = byname.get(name, lltype.nullptr(traits.CCHARP.TO)) - byname[name] = l_string - if l_oldstring: - traits.free_charp(l_oldstring) - - return envitems_llimpl, getenv_llimpl, putenv_llimpl - -envitems_llimpl, getenv_llimpl, putenv_llimpl = make_env_impls() - -register_external(r_envitems, [], [(str0, str0)], - export_name='ll_os.ll_os_envitems', - llimpl=envitems_llimpl) -register_external(r_getenv, [str0], - annmodel.SomeString(can_be_None=True, no_nul=True), - export_name='ll_os.ll_os_getenv', - llimpl=getenv_llimpl) -register_external(r_putenv, [str0, str0], annmodel.s_None, - export_name='ll_os.ll_os_putenv', - llimpl=putenv_llimpl) - -# ____________________________________________________________ - -def r_unsetenv(name): - # default implementation for platforms without a real unsetenv() - r_putenv(name, '') - -if hasattr(__import__(os.name), 'unsetenv'): - os_unsetenv = rffi.llexternal('unsetenv', [rffi.CCHARP], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def unsetenv_llimpl(name): - with rffi.scoped_str2charp(name) as l_name: - error = rffi.cast(lltype.Signed, os_unsetenv(l_name)) - if error: - from rpython.rlib import rposix - raise OSError(rposix.get_saved_errno(), "os_unsetenv failed") - try: - l_oldstring = envkeepalive.byname[name] - except KeyError: - pass - else: - del envkeepalive.byname[name] - rffi.free_charp(l_oldstring) - - register_external(r_unsetenv, [str0], annmodel.s_None, - export_name='ll_os.ll_os_unsetenv', - llimpl=unsetenv_llimpl) diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/ll_os_path.py pypy-5.0.1+dfsg/rpython/rtyper/module/ll_os_path.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/ll_os_path.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/ll_os_path.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -""" -Dummy low-level implementations for the external functions of the 'os.path' module. -""" - -# see ll_os.py for comments - -import stat -import os - -# Does a path exist? -# This is false for dangling symbolic links. - -class BaseOsPath(object): - @classmethod - def ll_os_path_exists(cls, path): - """Test whether a path exists""" - try: - os.stat(cls.from_rstr_nonnull(path)) - except OSError: - return False - return True - - @classmethod - def ll_os_path_isdir(cls, path): - try: - st = os.stat(cls.from_rstr_nonnull(path)) - except OSError: - return False - return stat.S_ISDIR(st[0]) diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/ll_os.py pypy-5.0.1+dfsg/rpython/rtyper/module/ll_os.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/ll_os.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/ll_os.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,2023 +0,0 @@ -""" -Low-level implementations for the external functions of the 'os' module. -""" - -# Implementation details about those functions -# might be found in doc/rffi.txt - -import os, sys, errno -import py -from rpython.rtyper.module.support import ( - UNDERSCORE_ON_WIN32, _WIN32, StringTraits, UnicodeTraits) -from rpython.tool.sourcetools import func_renamer -from rpython.rlib.rarithmetic import r_longlong -from rpython.rtyper.extfunc import ( - BaseLazyRegistering, register_external) -from rpython.rtyper.extfunc import registering, registering_if, extdef -from rpython.annotator.model import ( - SomeInteger, SomeString, SomeTuple, SomeFloat, s_Str0, s_Unicode0) -from rpython.annotator.model import s_ImpossibleValue, s_None, s_Bool -from rpython.rtyper.lltypesystem import rffi -from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.tool import rffi_platform as platform -from rpython.rlib import rposix, jit -from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rlib.objectmodel import specialize -from rpython.translator import cdir - -str0 = s_Str0 -unicode0 = s_Unicode0 - -def monkeypatch_rposix(posixfunc, unicodefunc, signature): - func_name = posixfunc.__name__ - - if hasattr(signature, '_default_signature_'): - signature = signature._default_signature_ - arglist = ['arg%d' % (i,) for i in range(len(signature))] - transformed_arglist = arglist[:] - for i, arg in enumerate(signature): - if arg in (unicode, unicode0): - transformed_arglist[i] = transformed_arglist[i] + '.as_unicode()' - - args = ', '.join(arglist) - transformed_args = ', '.join(transformed_arglist) - try: - main_arg = 'arg%d' % (signature.index(unicode0),) - except ValueError: - main_arg = 'arg%d' % (signature.index(unicode),) - - source = py.code.Source(""" - def %(func_name)s(%(args)s): - if isinstance(%(main_arg)s, str): - return posixfunc(%(args)s) - else: - return unicodefunc(%(transformed_args)s) - """ % locals()) - miniglobals = {'posixfunc' : posixfunc, - 'unicodefunc': unicodefunc, - '__name__': __name__, # for module name propagation - } - exec source.compile() in miniglobals - new_func = miniglobals[func_name] - specialized_args = [i for i in range(len(signature)) - if signature[i] in (unicode, unicode0, None)] - new_func = specialize.argtype(*specialized_args)(new_func) - - # Monkeypatch the function in rpython.rlib.rposix - setattr(rposix, func_name, new_func) - -def registering_str_unicode(posixfunc, condition=True): - if not condition or posixfunc is None: - return registering(None, condition=False) - - func_name = posixfunc.__name__ - - def register_posixfunc(self, method): - val = method(self, StringTraits()) - register_external(posixfunc, *val.def_args, **val.def_kwds) - - if sys.platform == 'win32': - val = method(self, UnicodeTraits()) - @func_renamer(func_name + "_unicode") - def unicodefunc(*args): - return posixfunc(*args) - register_external(unicodefunc, *val.def_args, **val.def_kwds) - signature = val.def_args[0] - monkeypatch_rposix(posixfunc, unicodefunc, signature) - - def decorator(method): - decorated = lambda self: register_posixfunc(self, method) - decorated._registering_func = posixfunc - return decorated - return decorator - -posix = __import__(os.name) - -includes = [] -if not _WIN32: - # XXX many of these includes are not portable at all - includes += ['dirent.h', 'sys/stat.h', - 'sys/times.h', 'utime.h', 'sys/types.h', 'unistd.h', - 'signal.h', 'sys/wait.h', 'fcntl.h'] -else: - includes += ['sys/utime.h', 'sys/types.h'] - -_CYGWIN = sys.platform == 'cygwin' - -class CConfig: - """ - Definitions for platform integration. - - Note: this must be processed through platform.configure() to provide - usable objects. For example:: - - CLOCK_T = platform.configure(CConfig)['CLOCK_T'] - register(function, [CLOCK_T], ...) - - """ - - _compilation_info_ = ExternalCompilationInfo( - includes=includes - ) - if not _WIN32: - CLOCK_T = platform.SimpleType('clock_t', rffi.INT) - - TMS = platform.Struct( - 'struct tms', [('tms_utime', rffi.INT), - ('tms_stime', rffi.INT), - ('tms_cutime', rffi.INT), - ('tms_cstime', rffi.INT)]) - - # For now we require off_t to be the same size as LONGLONG, which is the - # interface required by callers of functions that thake an argument of type - # off_t - OFF_T_SIZE = platform.SizeOf('off_t') - - SEEK_SET = platform.DefinedConstantInteger('SEEK_SET') - SEEK_CUR = platform.DefinedConstantInteger('SEEK_CUR') - SEEK_END = platform.DefinedConstantInteger('SEEK_END') - - UTIMBUF = platform.Struct('struct %sutimbuf' % UNDERSCORE_ON_WIN32, - [('actime', rffi.INT), - ('modtime', rffi.INT)]) - - -class RegisterOs(BaseLazyRegistering): - - def __init__(self): - self.configure(CConfig) - if not _WIN32: - assert self.OFF_T_SIZE == rffi.sizeof(rffi.LONGLONG) - - if hasattr(os, 'getpgrp'): - self.GETPGRP_HAVE_ARG = platform.checkcompiles( - "getpgrp(0)", - '#include ', - []) - - if hasattr(os, 'setpgrp'): - self.SETPGRP_HAVE_ARG = platform.checkcompiles( - "setpgrp(0,0)", - '#include ', - []) - - # we need an indirection via c functions to get macro calls working on llvm XXX still? - if hasattr(os, 'WCOREDUMP'): - decl_snippet = """ - RPY_EXTERN %(ret_type)s pypy_macro_wrapper_%(name)s (int status); - """ - def_snippet = """ - %(ret_type)s pypy_macro_wrapper_%(name)s (int status) { - return %(name)s(status); - } - """ - decls = [] - defs = [] - for name in self.w_star: - if hasattr(os, name): - data = {'ret_type': 'int', 'name': name} - decls.append((decl_snippet % data).strip()) - defs.append((def_snippet % data).strip()) - - self.compilation_info = self.compilation_info.merge( - ExternalCompilationInfo( - post_include_bits = decls, - separate_module_sources = ["\n".join(defs)] - )) - - # a simple, yet useful factory - def extdef_for_os_function_returning_int(self, name, **kwds): - c_func = self.llexternal(name, [], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO, **kwds) - def c_func_llimpl(): - res = rffi.cast(rffi.SIGNED, c_func()) - if res == -1: - raise OSError(rposix.get_saved_errno(), "%s failed" % name) - return res - c_func_llimpl.func_name = name + '_llimpl' - - return extdef([], int, llimpl=c_func_llimpl, - export_name='ll_os.ll_os_' + name) - - def extdef_for_os_function_accepting_int(self, name, **kwds): - c_func = self.llexternal(name, [rffi.INT], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO, **kwds) - def c_func_llimpl(arg): - res = rffi.cast(rffi.SIGNED, c_func(arg)) - if res == -1: - raise OSError(rposix.get_saved_errno(), "%s failed" % name) - - c_func_llimpl.func_name = name + '_llimpl' - - return extdef([int], None, llimpl=c_func_llimpl, - export_name='ll_os.ll_os_' + name) - - def extdef_for_os_function_accepting_2int(self, name, **kwds): - c_func = self.llexternal(name, [rffi.INT, rffi.INT], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO, **kwds) - def c_func_llimpl(arg, arg2): - res = rffi.cast(rffi.SIGNED, c_func(arg, arg2)) - if res == -1: - raise OSError(rposix.get_saved_errno(), "%s failed" % name) - - c_func_llimpl.func_name = name + '_llimpl' - - return extdef([int, int], None, llimpl=c_func_llimpl, - export_name='ll_os.ll_os_' + name) - - def extdef_for_os_function_accepting_0int(self, name, **kwds): - c_func = self.llexternal(name, [], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO, **kwds) - def c_func_llimpl(): - res = rffi.cast(rffi.SIGNED, c_func()) - if res == -1: - raise OSError(rposix.get_saved_errno(), "%s failed" % name) - - c_func_llimpl.func_name = name + '_llimpl' - - return extdef([], None, llimpl=c_func_llimpl, - export_name='ll_os.ll_os_' + name) - - def extdef_for_os_function_int_to_int(self, name, **kwds): - c_func = self.llexternal(name, [rffi.INT], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO, **kwds) - def c_func_llimpl(arg): - res = rffi.cast(rffi.SIGNED, c_func(arg)) - if res == -1: - raise OSError(rposix.get_saved_errno(), "%s failed" % name) - return res - - c_func_llimpl.func_name = name + '_llimpl' - - return extdef([int], int, llimpl=c_func_llimpl, - export_name='ll_os.ll_os_' + name) - - @registering_if(os, 'execv') - def register_os_execv(self): - os_execv = self.llexternal( - 'execv', - [rffi.CCHARP, rffi.CCHARPP], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def execv_llimpl(path, args): - l_args = rffi.ll_liststr2charpp(args) - os_execv(path, l_args) - rffi.free_charpp(l_args) - raise OSError(rposix.get_saved_errno(), "execv failed") - - return extdef([str0, [str0]], s_ImpossibleValue, llimpl=execv_llimpl, - export_name="ll_os.ll_os_execv") - - - @registering_if(os, 'execve') - def register_os_execve(self): - os_execve = self.llexternal( - 'execve', - [rffi.CCHARP, rffi.CCHARPP, rffi.CCHARPP], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def execve_llimpl(path, args, env): - # XXX Check path, args, env for \0 and raise TypeErrors as - # appropriate - envstrs = [] - for item in env.iteritems(): - envstr = "%s=%s" % item - envstrs.append(envstr) - - l_args = rffi.ll_liststr2charpp(args) - l_env = rffi.ll_liststr2charpp(envstrs) - os_execve(path, l_args, l_env) - - # XXX untested - rffi.free_charpp(l_env) - rffi.free_charpp(l_args) - - raise OSError(rposix.get_saved_errno(), "execve failed") - - return extdef( - [str0, [str0], {str0: str0}], - s_ImpossibleValue, - llimpl=execve_llimpl, - export_name="ll_os.ll_os_execve") - - - @registering_if(posix, 'spawnv') - def register_os_spawnv(self): - os_spawnv = self.llexternal('spawnv', - [rffi.INT, rffi.CCHARP, rffi.CCHARPP], - rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def spawnv_llimpl(mode, path, args): - mode = rffi.cast(rffi.INT, mode) - l_args = rffi.ll_liststr2charpp(args) - childpid = os_spawnv(mode, path, l_args) - rffi.free_charpp(l_args) - if childpid == -1: - raise OSError(rposix.get_saved_errno(), "os_spawnv failed") - return rffi.cast(lltype.Signed, childpid) - - return extdef([int, str0, [str0]], int, llimpl=spawnv_llimpl, - export_name="ll_os.ll_os_spawnv") - - @registering_if(os, 'spawnve') - def register_os_spawnve(self): - os_spawnve = self.llexternal('spawnve', - [rffi.INT, rffi.CCHARP, rffi.CCHARPP, - rffi.CCHARPP], - rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def spawnve_llimpl(mode, path, args, env): - envstrs = [] - for item in env.iteritems(): - envstrs.append("%s=%s" % item) - - mode = rffi.cast(rffi.INT, mode) - l_args = rffi.ll_liststr2charpp(args) - l_env = rffi.ll_liststr2charpp(envstrs) - childpid = os_spawnve(mode, path, l_args, l_env) - rffi.free_charpp(l_env) - rffi.free_charpp(l_args) - if childpid == -1: - raise OSError(rposix.get_saved_errno(), "os_spawnve failed") - return rffi.cast(lltype.Signed, childpid) - - return extdef([int, str0, [str0], {str0: str0}], int, - llimpl=spawnve_llimpl, - export_name="ll_os.ll_os_spawnve") - - @registering(os.dup) - def register_os_dup(self): - os_dup = self.llexternal(UNDERSCORE_ON_WIN32 + 'dup', - [rffi.INT], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def dup_llimpl(fd): - rposix.validate_fd(fd) - newfd = rffi.cast(lltype.Signed, os_dup(rffi.cast(rffi.INT, fd))) - if newfd == -1: - raise OSError(rposix.get_saved_errno(), "dup failed") - return newfd - - return extdef([int], int, llimpl=dup_llimpl, export_name="ll_os.ll_os_dup") - - @registering(os.dup2) - def register_os_dup2(self): - os_dup2 = self.llexternal(UNDERSCORE_ON_WIN32 + 'dup2', - [rffi.INT, rffi.INT], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def dup2_llimpl(fd, newfd): - rposix.validate_fd(fd) - error = rffi.cast(lltype.Signed, os_dup2(rffi.cast(rffi.INT, fd), - rffi.cast(rffi.INT, newfd))) - if error == -1: - raise OSError(rposix.get_saved_errno(), "dup2 failed") - - return extdef([int, int], s_None, llimpl=dup2_llimpl, - export_name="ll_os.ll_os_dup2") - - @registering_if(os, "getlogin", condition=not _WIN32) - def register_os_getlogin(self): - os_getlogin = self.llexternal('getlogin', [], rffi.CCHARP, - releasegil=False, - save_err=rffi.RFFI_SAVE_ERRNO) - - def getlogin_llimpl(): - result = os_getlogin() - if not result: - raise OSError(rposix.get_saved_errno(), "getlogin failed") - - return rffi.charp2str(result) - - return extdef([], str, llimpl=getlogin_llimpl, - export_name="ll_os.ll_os_getlogin") - - @registering_str_unicode(os.utime) - def register_os_utime(self, traits): - UTIMBUFP = lltype.Ptr(self.UTIMBUF) - os_utime = self.llexternal('utime', [rffi.CCHARP, UTIMBUFP], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - if not _WIN32: - includes = ['sys/time.h'] - else: - includes = ['time.h'] - eci = ExternalCompilationInfo(includes=includes) - - class CConfig: - _compilation_info_ = eci - HAVE_UTIMES = platform.Has('utimes') - config = platform.configure(CConfig) - - # XXX note that on Windows, calls to os.utime() are ignored on - # directories. Remove that hack over there once it's fixed here! - - if config['HAVE_UTIMES']: - class CConfig: - _compilation_info_ = eci - TIMEVAL = platform.Struct('struct timeval', [('tv_sec', rffi.LONG), - ('tv_usec', rffi.LONG)]) - config = platform.configure(CConfig) - TIMEVAL = config['TIMEVAL'] - TIMEVAL2P = rffi.CArrayPtr(TIMEVAL) - os_utimes = self.llexternal('utimes', [rffi.CCHARP, TIMEVAL2P], - rffi.INT, compilation_info=eci, - save_err=rffi.RFFI_SAVE_ERRNO) - - def os_utime_platform(path, actime, modtime): - import math - l_times = lltype.malloc(TIMEVAL2P.TO, 2, flavor='raw') - fracpart, intpart = math.modf(actime) - rffi.setintfield(l_times[0], 'c_tv_sec', int(intpart)) - rffi.setintfield(l_times[0], 'c_tv_usec', int(fracpart * 1E6)) - fracpart, intpart = math.modf(modtime) - rffi.setintfield(l_times[1], 'c_tv_sec', int(intpart)) - rffi.setintfield(l_times[1], 'c_tv_usec', int(fracpart * 1E6)) - error = os_utimes(path, l_times) - lltype.free(l_times, flavor='raw') - return error - else: - # we only have utime(), which does not allow sub-second resolution - def os_utime_platform(path, actime, modtime): - l_utimbuf = lltype.malloc(UTIMBUFP.TO, flavor='raw') - l_utimbuf.c_actime = rffi.r_time_t(actime) - l_utimbuf.c_modtime = rffi.r_time_t(modtime) - error = os_utime(path, l_utimbuf) - lltype.free(l_utimbuf, flavor='raw') - return error - - # NB. this function is specialized; we get one version where - # tp is known to be None, and one version where it is known - # to be a tuple of 2 floats. - if not _WIN32: - assert traits.str is str - - @specialize.argtype(1) - def os_utime_llimpl(path, tp): - if tp is None: - error = os_utime(path, lltype.nullptr(UTIMBUFP.TO)) - else: - actime, modtime = tp - error = os_utime_platform(path, actime, modtime) - error = rffi.cast(lltype.Signed, error) - if error == -1: - raise OSError(rposix.get_saved_errno(), "os_utime failed") - else: - from rpython.rtyper.module.ll_win32file import make_utime_impl - os_utime_llimpl = make_utime_impl(traits) - - s_tuple_of_2_floats = SomeTuple([SomeFloat(), SomeFloat()]) - - def os_utime_normalize_args(s_path, s_times): - # special handling of the arguments: they can be either - # [str, (float, float)] or [str, s_None], and get normalized - # to exactly one of these two. - if not traits.str0.contains(s_path): - raise Exception("os.utime() arg 1 must be a string, got %s" % ( - s_path,)) - case1 = s_None.contains(s_times) - case2 = s_tuple_of_2_floats.contains(s_times) - if case1 and case2: - return [traits.str0, s_ImpossibleValue] #don't know which case yet - elif case1: - return [traits.str0, s_None] - elif case2: - return [traits.str0, s_tuple_of_2_floats] - else: - raise Exception("os.utime() arg 2 must be None or a tuple of " - "2 floats, got %s" % (s_times,)) - os_utime_normalize_args._default_signature_ = [traits.str0, None] - - return extdef(os_utime_normalize_args, s_None, - "ll_os.ll_os_utime", - llimpl=os_utime_llimpl) - - @registering(os.times) - def register_os_times(self): - if sys.platform.startswith('win'): - from rpython.rlib import rwin32 - GetCurrentProcess = self.llexternal('GetCurrentProcess', [], - rwin32.HANDLE) - GetProcessTimes = self.llexternal('GetProcessTimes', - [rwin32.HANDLE, - lltype.Ptr(rwin32.FILETIME), - lltype.Ptr(rwin32.FILETIME), - lltype.Ptr(rwin32.FILETIME), - lltype.Ptr(rwin32.FILETIME)], - rwin32.BOOL) - - def times_lltypeimpl(): - pcreate = lltype.malloc(rwin32.FILETIME, flavor='raw') - pexit = lltype.malloc(rwin32.FILETIME, flavor='raw') - pkernel = lltype.malloc(rwin32.FILETIME, flavor='raw') - puser = lltype.malloc(rwin32.FILETIME, flavor='raw') - hProc = GetCurrentProcess() - GetProcessTimes(hProc, pcreate, pexit, pkernel, puser) - # The fields of a FILETIME structure are the hi and lo parts - # of a 64-bit value expressed in 100 nanosecond units - # (of course). - result = (rffi.cast(lltype.Signed, pkernel.c_dwHighDateTime) * 429.4967296 + - rffi.cast(lltype.Signed, pkernel.c_dwLowDateTime) * 1E-7, - rffi.cast(lltype.Signed, puser.c_dwHighDateTime) * 429.4967296 + - rffi.cast(lltype.Signed, puser.c_dwLowDateTime) * 1E-7, - 0, 0, 0) - lltype.free(puser, flavor='raw') - lltype.free(pkernel, flavor='raw') - lltype.free(pexit, flavor='raw') - lltype.free(pcreate, flavor='raw') - return result - self.register(os.times, [], (float, float, float, float, float), - "ll_os.ll_times", llimpl=times_lltypeimpl) - return - - TMSP = lltype.Ptr(self.TMS) - os_times = self.llexternal('times', [TMSP], self.CLOCK_T, - save_err=rffi.RFFI_SAVE_ERRNO) - - # Here is a random extra platform parameter which is important. - # Strictly speaking, this should probably be retrieved at runtime, not - # at translation time. - CLOCK_TICKS_PER_SECOND = float(os.sysconf('SC_CLK_TCK')) - - def times_lltypeimpl(): - l_tmsbuf = lltype.malloc(TMSP.TO, flavor='raw') - try: - result = os_times(l_tmsbuf) - result = rffi.cast(lltype.Signed, result) - if result == -1: - raise OSError(rposix.get_saved_errno(), "times failed") - return ( - rffi.cast(lltype.Signed, l_tmsbuf.c_tms_utime) - / CLOCK_TICKS_PER_SECOND, - rffi.cast(lltype.Signed, l_tmsbuf.c_tms_stime) - / CLOCK_TICKS_PER_SECOND, - rffi.cast(lltype.Signed, l_tmsbuf.c_tms_cutime) - / CLOCK_TICKS_PER_SECOND, - rffi.cast(lltype.Signed, l_tmsbuf.c_tms_cstime) - / CLOCK_TICKS_PER_SECOND, - result / CLOCK_TICKS_PER_SECOND) - finally: - lltype.free(l_tmsbuf, flavor='raw') - self.register(os.times, [], (float, float, float, float, float), - "ll_os.ll_times", llimpl=times_lltypeimpl) - - - @registering_if(os, 'setsid') - def register_os_setsid(self): - os_setsid = self.llexternal('setsid', [], rffi.PID_T, - save_err=rffi.RFFI_SAVE_ERRNO) - def setsid_llimpl(): - result = rffi.cast(lltype.Signed, os_setsid()) - if result == -1: - raise OSError(rposix.get_saved_errno(), "os_setsid failed") - return result - - return extdef([], int, export_name="ll_os.ll_os_setsid", - llimpl=setsid_llimpl) - - @registering_if(os, 'chroot') - def register_os_chroot(self): - os_chroot = self.llexternal('chroot', [rffi.CCHARP], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - def chroot_llimpl(arg): - result = os_chroot(arg) - if result == -1: - raise OSError(rposix.get_saved_errno(), "os_chroot failed") - - return extdef([str0], None, export_name="ll_os.ll_os_chroot", - llimpl=chroot_llimpl) - - @registering_if(os, 'uname') - def register_os_uname(self): - CHARARRAY = lltype.FixedSizeArray(lltype.Char, 1) - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - includes = ['sys/utsname.h'] - ) - UTSNAME = platform.Struct('struct utsname', [ - ('sysname', CHARARRAY), - ('nodename', CHARARRAY), - ('release', CHARARRAY), - ('version', CHARARRAY), - ('machine', CHARARRAY)]) - config = platform.configure(CConfig) - UTSNAMEP = lltype.Ptr(config['UTSNAME']) - - os_uname = self.llexternal('uname', [UTSNAMEP], rffi.INT, - compilation_info=CConfig._compilation_info_, - save_err=rffi.RFFI_SAVE_ERRNO) - - def uname_llimpl(): - l_utsbuf = lltype.malloc(UTSNAMEP.TO, flavor='raw') - result = os_uname(l_utsbuf) - if result == -1: - raise OSError(rposix.get_saved_errno(), "os_uname failed") - retval = ( - rffi.charp2str(rffi.cast(rffi.CCHARP, l_utsbuf.c_sysname)), - rffi.charp2str(rffi.cast(rffi.CCHARP, l_utsbuf.c_nodename)), - rffi.charp2str(rffi.cast(rffi.CCHARP, l_utsbuf.c_release)), - rffi.charp2str(rffi.cast(rffi.CCHARP, l_utsbuf.c_version)), - rffi.charp2str(rffi.cast(rffi.CCHARP, l_utsbuf.c_machine)), - ) - lltype.free(l_utsbuf, flavor='raw') - return retval - - return extdef([], (str, str, str, str, str), - "ll_os.ll_uname", llimpl=uname_llimpl) - - @registering_if(os, 'sysconf') - def register_os_sysconf(self): - c_sysconf = self.llexternal('sysconf', [rffi.INT], rffi.LONG, - save_err=rffi.RFFI_FULL_ERRNO_ZERO) - - def sysconf_llimpl(i): - res = c_sysconf(i) - if res == -1: - errno = rposix.get_saved_errno() - if errno != 0: - raise OSError(errno, "sysconf failed") - return res - return extdef([int], int, "ll_os.ll_sysconf", llimpl=sysconf_llimpl) - - @registering_if(os, 'fpathconf') - def register_os_fpathconf(self): - c_fpathconf = self.llexternal('fpathconf', - [rffi.INT, rffi.INT], rffi.LONG, - save_err=rffi.RFFI_FULL_ERRNO_ZERO) - - def fpathconf_llimpl(fd, i): - res = c_fpathconf(fd, i) - if res == -1: - errno = rposix.get_saved_errno() - if errno != 0: - raise OSError(errno, "fpathconf failed") - return res - return extdef([int, int], int, "ll_os.ll_fpathconf", - llimpl=fpathconf_llimpl) - - @registering_if(os, 'pathconf') - def register_os_pathconf(self): - c_pathconf = self.llexternal('pathconf', - [rffi.CCHARP, rffi.INT], rffi.LONG, - save_err=rffi.RFFI_FULL_ERRNO_ZERO) - - def pathconf_llimpl(path, i): - res = c_pathconf(path, i) - if res == -1: - errno = rposix.get_saved_errno() - if errno != 0: - raise OSError(errno, "pathconf failed") - return res - return extdef([str0, int], int, "ll_os.ll_pathconf", - llimpl=pathconf_llimpl) - - @registering_if(os, 'confstr') - def register_os_confstr(self): - c_confstr = self.llexternal('confstr', [rffi.INT, rffi.CCHARP, - rffi.SIZE_T], rffi.SIZE_T, - save_err=rffi.RFFI_FULL_ERRNO_ZERO) - - def confstr_llimpl(i): - n = c_confstr(i, lltype.nullptr(rffi.CCHARP.TO), 0) - n = rffi.cast(lltype.Signed, n) - if n > 0: - buf = lltype.malloc(rffi.CCHARP.TO, n, flavor='raw') - try: - c_confstr(i, buf, n) - return rffi.charp2strn(buf, n) - finally: - lltype.free(buf, flavor='raw') - else: - errno = rposix.get_saved_errno() - if errno != 0: - raise OSError(errno, "confstr failed") - return None - return extdef([int], SomeString(can_be_None=True), - "ll_os.ll_confstr", llimpl=confstr_llimpl) - - @registering_if(os, 'getuid') - def register_os_getuid(self): - return self.extdef_for_os_function_returning_int('getuid') - - @registering_if(os, 'geteuid') - def register_os_geteuid(self): - return self.extdef_for_os_function_returning_int('geteuid') - - @registering_if(os, 'setuid') - def register_os_setuid(self): - return self.extdef_for_os_function_accepting_int('setuid') - - @registering_if(os, 'seteuid') - def register_os_seteuid(self): - return self.extdef_for_os_function_accepting_int('seteuid') - - @registering_if(os, 'setgid') - def register_os_setgid(self): - return self.extdef_for_os_function_accepting_int('setgid') - - @registering_if(os, 'setegid') - def register_os_setegid(self): - return self.extdef_for_os_function_accepting_int('setegid') - - @registering_if(os, 'getpid') - def register_os_getpid(self): - return self.extdef_for_os_function_returning_int('getpid', releasegil=False) - - @registering_if(os, 'getgid') - def register_os_getgid(self): - return self.extdef_for_os_function_returning_int('getgid') - - @registering_if(os, 'getegid') - def register_os_getegid(self): - return self.extdef_for_os_function_returning_int('getegid') - - @registering_if(os, 'getgroups') - def register_os_getgroups(self): - GP = rffi.CArrayPtr(rffi.PID_T) - c_getgroups = self.llexternal('getgroups', [rffi.INT, GP], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def getgroups_llimpl(): - n = c_getgroups(0, lltype.nullptr(GP.TO)) - if n >= 0: - groups = lltype.malloc(GP.TO, n, flavor='raw') - try: - n = c_getgroups(n, groups) - result = [rffi.cast(lltype.Signed, groups[i]) - for i in range(n)] - finally: - lltype.free(groups, flavor='raw') - if n >= 0: - return result - raise OSError(rposix.get_saved_errno(), "os_getgroups failed") - - return extdef([], [int], llimpl=getgroups_llimpl, - export_name="ll_os.ll_getgroups") - - @registering_if(os, 'setgroups') - def register_os_setgroups(self): - GP = rffi.CArrayPtr(rffi.PID_T) - c_setgroups = self.llexternal('setgroups', [rffi.SIZE_T, GP], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def setgroups_llimpl(list): - n = len(list) - groups = lltype.malloc(GP.TO, n, flavor='raw') - try: - for i in range(n): - groups[i] = rffi.cast(rffi.PID_T, list[i]) - n = c_setgroups(rffi.cast(rffi.SIZE_T, n), groups) - finally: - lltype.free(groups, flavor='raw') - if n != 0: - raise OSError(rposix.get_saved_errno(), "os_setgroups failed") - - return extdef([[int]], None, llimpl=setgroups_llimpl, - export_name="ll_os.ll_setgroups") - - @registering_if(os, 'initgroups') - def register_os_initgroups(self): - c_initgroups = self.llexternal('initgroups', - [rffi.CCHARP, rffi.PID_T], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def initgroups_llimpl(user, group): - n = c_initgroups(user, rffi.cast(rffi.PID_T, group)) - if n != 0: - raise OSError(rposix.get_saved_errno(), "os_initgroups failed") - - return extdef([str, int], None, llimpl=initgroups_llimpl, - export_name="ll_os.ll_initgroups") - - @registering_if(os, 'getpgrp') - def register_os_getpgrp(self): - name = 'getpgrp' - if self.GETPGRP_HAVE_ARG: - c_func = self.llexternal(name, [rffi.INT], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - def c_func_llimpl(): - res = rffi.cast(rffi.SIGNED, c_func(0)) - if res == -1: - raise OSError(rposix.get_saved_errno(), "%s failed" % name) - return res - - c_func_llimpl.func_name = name + '_llimpl' - - return extdef([], int, llimpl=c_func_llimpl, - export_name='ll_os.ll_os_' + name) - else: - return self.extdef_for_os_function_returning_int('getpgrp') - - @registering_if(os, 'setpgrp') - def register_os_setpgrp(self): - name = 'setpgrp' - if self.SETPGRP_HAVE_ARG: - c_func = self.llexternal(name, [rffi.INT, rffi.INT], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - def c_func_llimpl(): - res = rffi.cast(rffi.SIGNED, c_func(0, 0)) - if res == -1: - raise OSError(rposix.get_saved_errno(), "%s failed" % name) - - c_func_llimpl.func_name = name + '_llimpl' - - return extdef([], None, llimpl=c_func_llimpl, - export_name='ll_os.ll_os_' + name) - else: - return self.extdef_for_os_function_accepting_0int(name) - - @registering_if(os, 'tcgetpgrp') - def register_os_tcgetpgrp(self): - c_tcgetpgrp = self.llexternal('tcgetpgrp', [rffi.INT], rffi.PID_T, - save_err=rffi.RFFI_SAVE_ERRNO) - - def c_tcgetpgrp_llimpl(fd): - res = c_tcgetpgrp(rffi.cast(rffi.INT, fd)) - res = rffi.cast(lltype.Signed, res) - if res == -1: - raise OSError(rposix.get_saved_errno(), "tcgetpgrp failed") - return res - - return extdef([int], int, llimpl=c_tcgetpgrp_llimpl, - export_name='ll_os.ll_os_tcgetpgrp') - - @registering_if(os, 'tcsetpgrp') - def register_os_tcsetpgrp(self): - c_tcsetpgrp = self.llexternal('tcsetpgrp', [rffi.INT, rffi.PID_T], - rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def c_tcsetpgrp_llimpl(fd, pgrp): - res = c_tcsetpgrp(rffi.cast(rffi.INT, fd), - rffi.cast(rffi.PID_T, pgrp)) - res = rffi.cast(lltype.Signed, res) - if res == -1: - raise OSError(rposix.get_saved_errno(), "tcsetpgrp failed") - - return extdef([int, int], None, llimpl=c_tcsetpgrp_llimpl, - export_name='ll_os.ll_os_tcsetpgrp') - - @registering_if(os, 'getppid') - def register_os_getppid(self): - return self.extdef_for_os_function_returning_int('getppid') - - @registering_if(os, 'getpgid') - def register_os_getpgid(self): - return self.extdef_for_os_function_int_to_int('getpgid') - - @registering_if(os, 'setpgid') - def register_os_setpgid(self): - return self.extdef_for_os_function_accepting_2int('setpgid') - - @registering_if(os, 'setreuid') - def register_os_setreuid(self): - return self.extdef_for_os_function_accepting_2int('setreuid') - - @registering_if(os, 'setregid') - def register_os_setregid(self): - return self.extdef_for_os_function_accepting_2int('setregid') - - @registering_if(os, 'getsid') - def register_os_getsid(self): - return self.extdef_for_os_function_int_to_int('getsid') - - @registering_if(os, 'setsid') - def register_os_setsid(self): - return self.extdef_for_os_function_returning_int('setsid') - - @registering_if(os, 'getresuid') - def register_os_getresuid(self): - c_getresuid = self.llexternal('getresuid', [rffi.INTP] * 3, rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def c_getresuid_llimpl(): - out = lltype.malloc(rffi.INTP.TO, 3, flavor='raw') - try: - res = c_getresuid(rffi.ptradd(out, 0), - rffi.ptradd(out, 1), - rffi.ptradd(out, 2)) - res = rffi.cast(lltype.Signed, res) - if res == -1: - raise OSError(rposix.get_saved_errno(), "getresuid failed") - return (rffi.cast(lltype.Signed, out[0]), - rffi.cast(lltype.Signed, out[1]), - rffi.cast(lltype.Signed, out[2])) - finally: - lltype.free(out, flavor='raw') - - return extdef([], (int, int, int), llimpl=c_getresuid_llimpl, - export_name='ll_os.ll_os_getresuid') - - @registering_if(os, 'getresgid') - def register_os_getresgid(self): - c_getresgid = self.llexternal('getresgid', [rffi.INTP] * 3, rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def c_getresgid_llimpl(): - out = lltype.malloc(rffi.INTP.TO, 3, flavor='raw') - try: - res = c_getresgid(rffi.ptradd(out, 0), - rffi.ptradd(out, 1), - rffi.ptradd(out, 2)) - res = rffi.cast(lltype.Signed, res) - if res == -1: - raise OSError(rposix.get_saved_errno(), "getresgid failed") - return (rffi.cast(lltype.Signed, out[0]), - rffi.cast(lltype.Signed, out[1]), - rffi.cast(lltype.Signed, out[2])) - finally: - lltype.free(out, flavor='raw') - - return extdef([], (int, int, int), llimpl=c_getresgid_llimpl, - export_name='ll_os.ll_os_getresgid') - - @registering_if(os, 'setresuid') - def register_os_setresuid(self): - c_setresuid = self.llexternal('setresuid', [rffi.INT] * 3, rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def c_setresuid_llimpl(ruid, euid, suid): - res = c_setresuid(ruid, euid, suid) - res = rffi.cast(lltype.Signed, res) - if res == -1: - raise OSError(rposix.get_saved_errno(), "setresuid failed") - - return extdef([int, int, int], None, llimpl=c_setresuid_llimpl, - export_name='ll_os.ll_os_setresuid') - - @registering_if(os, 'setresgid') - def register_os_setresgid(self): - c_setresgid = self.llexternal('setresgid', [rffi.INT] * 3, rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def c_setresgid_llimpl(rgid, egid, sgid): - res = c_setresgid(rgid, egid, sgid) - res = rffi.cast(lltype.Signed, res) - if res == -1: - raise OSError(rposix.get_saved_errno(), "setresgid failed") - - return extdef([int, int, int], None, llimpl=c_setresgid_llimpl, - export_name='ll_os.ll_os_setresgid') - - @registering_str_unicode(os.open) - def register_os_open(self, traits): - os_open = self.llexternal(traits.posix_function_name('open'), - [traits.CCHARP, rffi.INT, rffi.MODE_T], - rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - def os_open_llimpl(path, flags, mode): - result = rffi.cast(lltype.Signed, os_open(path, flags, mode)) - if result == -1: - raise OSError(rposix.get_saved_errno(), "os_open failed") - return result - - return extdef([traits.str0, int, int], int, traits.ll_os_name('open'), - llimpl=os_open_llimpl) - - @registering_if(os, 'getloadavg') - def register_os_getloadavg(self): - AP = rffi.CArrayPtr(lltype.Float) - c_getloadavg = self.llexternal('getloadavg', [AP, rffi.INT], rffi.INT) - - def getloadavg_llimpl(): - load = lltype.malloc(AP.TO, 3, flavor='raw') - r = c_getloadavg(load, 3) - result_tuple = load[0], load[1], load[2] - lltype.free(load, flavor='raw') - if r != 3: - raise OSError - return result_tuple - return extdef([], (float, float, float), - "ll_os.ll_getloadavg", llimpl=getloadavg_llimpl) - - @registering_if(os, 'makedev') - def register_os_makedev(self): - c_makedev = self.llexternal('makedev', [rffi.INT, rffi.INT], rffi.INT) - def makedev_llimpl(maj, min): - return c_makedev(maj, min) - return extdef([int, int], int, - "ll_os.ll_makedev", llimpl=makedev_llimpl) - - @registering_if(os, 'major') - def register_os_major(self): - c_major = self.llexternal('major', [rffi.INT], rffi.INT) - def major_llimpl(dev): - return c_major(dev) - return extdef([int], int, - "ll_os.ll_major", llimpl=major_llimpl) - - @registering_if(os, 'minor') - def register_os_minor(self): - c_minor = self.llexternal('minor', [rffi.INT], rffi.INT) - def minor_llimpl(dev): - return c_minor(dev) - return extdef([int], int, - "ll_os.ll_minor", llimpl=minor_llimpl) - -# ------------------------------- os.read ------------------------------- - - @registering(os.read) - def register_os_read(self): - os_read = self.llexternal(UNDERSCORE_ON_WIN32 + 'read', - [rffi.INT, rffi.VOIDP, rffi.SIZE_T], - rffi.SSIZE_T, save_err=rffi.RFFI_SAVE_ERRNO) - - def os_read_llimpl(fd, count): - if count < 0: - raise OSError(errno.EINVAL, None) - rposix.validate_fd(fd) - with rffi.scoped_alloc_buffer(count) as buf: - void_buf = rffi.cast(rffi.VOIDP, buf.raw) - got = rffi.cast(lltype.Signed, os_read(fd, void_buf, count)) - if got < 0: - raise OSError(rposix.get_saved_errno(), "os_read failed") - return buf.str(got) - - return extdef([int, int], SomeString(can_be_None=True), - "ll_os.ll_os_read", llimpl=os_read_llimpl) - - @registering(os.write) - def register_os_write(self): - os_write = self.llexternal(UNDERSCORE_ON_WIN32 + 'write', - [rffi.INT, rffi.VOIDP, rffi.SIZE_T], - rffi.SIZE_T, - save_err=rffi.RFFI_SAVE_ERRNO) - - def os_write_llimpl(fd, data): - count = len(data) - rposix.validate_fd(fd) - with rffi.scoped_nonmovingbuffer(data) as buf: - written = rffi.cast(lltype.Signed, os_write( - rffi.cast(rffi.INT, fd), - buf, rffi.cast(rffi.SIZE_T, count))) - if written < 0: - raise OSError(rposix.get_saved_errno(), "os_write failed") - return written - - return extdef([int, str], SomeInteger(nonneg=True), - "ll_os.ll_os_write", llimpl=os_write_llimpl) - - @registering(os.close) - def register_os_close(self): - os_close = self.llexternal(UNDERSCORE_ON_WIN32 + 'close', [rffi.INT], - rffi.INT, releasegil=False, - save_err=rffi.RFFI_SAVE_ERRNO) - - def close_llimpl(fd): - rposix.validate_fd(fd) - error = rffi.cast(lltype.Signed, os_close(rffi.cast(rffi.INT, fd))) - if error == -1: - raise OSError(rposix.get_saved_errno(), "close failed") - - return extdef([int], s_None, llimpl=close_llimpl, - export_name="ll_os.ll_os_close") - - @registering(os.lseek) - def register_os_lseek(self): - if sys.platform.startswith('win'): - funcname = '_lseeki64' - else: - funcname = 'lseek' - if self.SEEK_SET is not None: - SEEK_SET = self.SEEK_SET - SEEK_CUR = self.SEEK_CUR - SEEK_END = self.SEEK_END - else: - SEEK_SET, SEEK_CUR, SEEK_END = 0, 1, 2 - if (SEEK_SET, SEEK_CUR, SEEK_END) != (0, 1, 2): - # Turn 0, 1, 2 into SEEK_{SET,CUR,END} - def fix_seek_arg(n): - if n == 0: return SEEK_SET - if n == 1: return SEEK_CUR - if n == 2: return SEEK_END - return n - else: - def fix_seek_arg(n): - return n - - os_lseek = self.llexternal(funcname, - [rffi.INT, rffi.LONGLONG, rffi.INT], - rffi.LONGLONG, macro=True, - save_err=rffi.RFFI_SAVE_ERRNO) - - def lseek_llimpl(fd, pos, how): - rposix.validate_fd(fd) - how = fix_seek_arg(how) - res = os_lseek(rffi.cast(rffi.INT, fd), - rffi.cast(rffi.LONGLONG, pos), - rffi.cast(rffi.INT, how)) - res = rffi.cast(lltype.SignedLongLong, res) - if res < 0: - raise OSError(rposix.get_saved_errno(), "os_lseek failed") - return res - - return extdef([int, r_longlong, int], - r_longlong, - llimpl = lseek_llimpl, - export_name = "ll_os.ll_os_lseek") - - @registering_if(os, 'ftruncate') - def register_os_ftruncate(self): - os_ftruncate = self.llexternal('ftruncate', - [rffi.INT, rffi.LONGLONG], rffi.INT, - macro=True, - save_err=rffi.RFFI_SAVE_ERRNO) - - def ftruncate_llimpl(fd, length): - rposix.validate_fd(fd) - res = rffi.cast(rffi.LONG, - os_ftruncate(rffi.cast(rffi.INT, fd), - rffi.cast(rffi.LONGLONG, length))) - if res < 0: - raise OSError(rposix.get_saved_errno(), "os_ftruncate failed") - - return extdef([int, r_longlong], s_None, - llimpl = ftruncate_llimpl, - export_name = "ll_os.ll_os_ftruncate") - - @registering_if(os, 'fsync') - def register_os_fsync(self): - if not _WIN32: - os_fsync = self.llexternal('fsync', [rffi.INT], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - else: - os_fsync = self.llexternal('_commit', [rffi.INT], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def fsync_llimpl(fd): - rposix.validate_fd(fd) - res = rffi.cast(rffi.SIGNED, os_fsync(rffi.cast(rffi.INT, fd))) - if res < 0: - raise OSError(rposix.get_saved_errno(), "fsync failed") - return extdef([int], s_None, - llimpl=fsync_llimpl, - export_name="ll_os.ll_os_fsync") - - @registering_if(os, 'fdatasync') - def register_os_fdatasync(self): - os_fdatasync = self.llexternal('fdatasync', [rffi.INT], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def fdatasync_llimpl(fd): - rposix.validate_fd(fd) - res = rffi.cast(rffi.SIGNED, os_fdatasync(rffi.cast(rffi.INT, fd))) - if res < 0: - raise OSError(rposix.get_saved_errno(), "fdatasync failed") - return extdef([int], s_None, - llimpl=fdatasync_llimpl, - export_name="ll_os.ll_os_fdatasync") - - @registering_if(os, 'fchdir') - def register_os_fchdir(self): - os_fchdir = self.llexternal('fchdir', [rffi.INT], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def fchdir_llimpl(fd): - rposix.validate_fd(fd) - res = rffi.cast(rffi.SIGNED, os_fchdir(rffi.cast(rffi.INT, fd))) - if res < 0: - raise OSError(rposix.get_saved_errno(), "fchdir failed") - return extdef([int], s_None, - llimpl=fchdir_llimpl, - export_name="ll_os.ll_os_fchdir") - - @registering_str_unicode(os.access) - def register_os_access(self, traits): - os_access = self.llexternal(traits.posix_function_name('access'), - [traits.CCHARP, rffi.INT], - rffi.INT) - - if sys.platform.startswith('win'): - # All files are executable on Windows - def access_llimpl(path, mode): - mode = mode & ~os.X_OK - error = rffi.cast(lltype.Signed, os_access(path, mode)) - return error == 0 - else: - def access_llimpl(path, mode): - error = rffi.cast(lltype.Signed, os_access(path, mode)) - return error == 0 - - return extdef([traits.str0, int], s_Bool, llimpl=access_llimpl, - export_name=traits.ll_os_name("access")) - - @registering_str_unicode(getattr(posix, '_getfullpathname', None), - condition=sys.platform=='win32') - def register_posix__getfullpathname(self, traits): - # this nt function is not exposed via os, but needed - # to get a correct implementation of os.path.abspath - from rpython.rtyper.module.ll_win32file import make_getfullpathname_impl - getfullpathname_llimpl = make_getfullpathname_impl(traits) - - return extdef([traits.str0], # a single argument which is a str - traits.str0, # returns a string - traits.ll_os_name('_getfullpathname'), - llimpl=getfullpathname_llimpl) - - @registering(os.getcwd) - def register_os_getcwd(self): - os_getcwd = self.llexternal(UNDERSCORE_ON_WIN32 + 'getcwd', - [rffi.CCHARP, rffi.SIZE_T], - rffi.CCHARP, - save_err=rffi.RFFI_SAVE_ERRNO) - - def os_getcwd_llimpl(): - bufsize = 256 - while True: - buf = lltype.malloc(rffi.CCHARP.TO, bufsize, flavor='raw') - res = os_getcwd(buf, rffi.cast(rffi.SIZE_T, bufsize)) - if res: - break # ok - error = rposix.get_saved_errno() - lltype.free(buf, flavor='raw') - if error != errno.ERANGE: - raise OSError(error, "getcwd failed") - # else try again with a larger buffer, up to some sane limit - bufsize *= 4 - if bufsize > 1024*1024: # xxx hard-coded upper limit - raise OSError(error, "getcwd result too large") - result = rffi.charp2str(res) - lltype.free(buf, flavor='raw') - return result - - return extdef([], str0, - "ll_os.ll_os_getcwd", llimpl=os_getcwd_llimpl) - - @registering(os.getcwdu, condition=sys.platform=='win32') - def register_os_getcwdu(self): - os_wgetcwd = self.llexternal(UNDERSCORE_ON_WIN32 + 'wgetcwd', - [rffi.CWCHARP, rffi.SIZE_T], - rffi.CWCHARP, - save_err=rffi.RFFI_SAVE_ERRNO) - - def os_getcwd_llimpl(): - bufsize = 256 - while True: - buf = lltype.malloc(rffi.CWCHARP.TO, bufsize, flavor='raw') - res = os_wgetcwd(buf, rffi.cast(rffi.SIZE_T, bufsize)) - if res: - break # ok - error = rposix.get_saved_errno() - lltype.free(buf, flavor='raw') - if error != errno.ERANGE: - raise OSError(error, "getcwd failed") - # else try again with a larger buffer, up to some sane limit - bufsize *= 4 - if bufsize > 1024*1024: # xxx hard-coded upper limit - raise OSError(error, "getcwd result too large") - result = rffi.wcharp2unicode(res) - lltype.free(buf, flavor='raw') - return result - - return extdef([], unicode, - "ll_os.ll_os_wgetcwd", llimpl=os_getcwd_llimpl) - - @registering_str_unicode(os.listdir) - def register_os_listdir(self, traits): - # we need a different approach on Windows and on Posix - if sys.platform.startswith('win'): - from rpython.rtyper.module.ll_win32file import make_listdir_impl - os_listdir_llimpl = make_listdir_impl(traits) - else: - assert traits.str is str - compilation_info = ExternalCompilationInfo( - includes = ['sys/types.h', 'dirent.h'] - ) - class CConfig: - _compilation_info_ = compilation_info - DIRENT = platform.Struct('struct dirent', - [('d_name', lltype.FixedSizeArray(rffi.CHAR, 1))]) - - DIRP = rffi.COpaquePtr('DIR') - config = platform.configure(CConfig) - DIRENT = config['DIRENT'] - DIRENTP = lltype.Ptr(DIRENT) - os_opendir = self.llexternal('opendir', [rffi.CCHARP], DIRP, - compilation_info=compilation_info, - save_err=rffi.RFFI_SAVE_ERRNO) - # XXX macro=True is hack to make sure we get the correct kind of - # dirent struct (which depends on defines) - os_readdir = self.llexternal('readdir', [DIRP], DIRENTP, - compilation_info=compilation_info, - save_err=rffi.RFFI_FULL_ERRNO_ZERO, - macro=True) - os_closedir = self.llexternal('closedir', [DIRP], rffi.INT, - compilation_info=compilation_info) - - def os_listdir_llimpl(path): - dirp = os_opendir(path) - if not dirp: - raise OSError(rposix.get_saved_errno(), "os_opendir failed") - result = [] - while True: - direntp = os_readdir(dirp) - if not direntp: - error = rposix.get_saved_errno() - break - namep = rffi.cast(rffi.CCHARP, direntp.c_d_name) - name = rffi.charp2str(namep) - if name != '.' and name != '..': - result.append(name) - os_closedir(dirp) - if error: - raise OSError(error, "os_readdir failed") - return result - - return extdef([traits.str0], # a single argument which is a str - [traits.str0], # returns a list of strings - traits.ll_os_name('listdir'), - llimpl=os_listdir_llimpl) - - @registering(os.pipe) - def register_os_pipe(self): - # we need a different approach on Windows and on Posix - if sys.platform.startswith('win'): - from rpython.rlib import rwin32 - CreatePipe = self.llexternal('CreatePipe', [rwin32.LPHANDLE, - rwin32.LPHANDLE, - rffi.VOIDP, - rwin32.DWORD], - rwin32.BOOL, - save_err=rffi.RFFI_SAVE_LASTERROR) - _open_osfhandle = self.llexternal('_open_osfhandle', [rffi.INTPTR_T, - rffi.INT], - rffi.INT) - null = lltype.nullptr(rffi.VOIDP.TO) - - def os_pipe_llimpl(): - pread = lltype.malloc(rwin32.LPHANDLE.TO, 1, flavor='raw') - pwrite = lltype.malloc(rwin32.LPHANDLE.TO, 1, flavor='raw') - ok = CreatePipe(pread, pwrite, null, 0) - if ok: - error = 0 - else: - error = rwin32.GetLastError_saved() - hread = rffi.cast(rffi.INTPTR_T, pread[0]) - hwrite = rffi.cast(rffi.INTPTR_T, pwrite[0]) - lltype.free(pwrite, flavor='raw') - lltype.free(pread, flavor='raw') - if error: - raise WindowsError(error, "os_pipe failed") - fdread = _open_osfhandle(hread, 0) - fdwrite = _open_osfhandle(hwrite, 1) - return (fdread, fdwrite) - - else: - INT_ARRAY_P = rffi.CArrayPtr(rffi.INT) - os_pipe = self.llexternal('pipe', [INT_ARRAY_P], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def os_pipe_llimpl(): - filedes = lltype.malloc(INT_ARRAY_P.TO, 2, flavor='raw') - error = rffi.cast(lltype.Signed, os_pipe(filedes)) - read_fd = filedes[0] - write_fd = filedes[1] - lltype.free(filedes, flavor='raw') - if error != 0: - raise OSError(rposix.get_saved_errno(), "os_pipe failed") - return (rffi.cast(lltype.Signed, read_fd), - rffi.cast(lltype.Signed, write_fd)) - - return extdef([], (int, int), - "ll_os.ll_os_pipe", - llimpl=os_pipe_llimpl) - - @registering_if(os, 'chown') - def register_os_chown(self): - os_chown = self.llexternal('chown', [rffi.CCHARP, rffi.INT, rffi.INT], - rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def os_chown_llimpl(path, uid, gid): - res = os_chown(path, uid, gid) - if res == -1: - raise OSError(rposix.get_saved_errno(), "os_chown failed") - - return extdef([str0, int, int], None, "ll_os.ll_os_chown", - llimpl=os_chown_llimpl) - - @registering_if(os, 'lchown') - def register_os_lchown(self): - os_lchown = self.llexternal('lchown',[rffi.CCHARP, rffi.INT, rffi.INT], - rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def os_lchown_llimpl(path, uid, gid): - res = os_lchown(path, uid, gid) - if res == -1: - raise OSError(rposix.get_saved_errno(), "os_lchown failed") - - return extdef([str0, int, int], None, "ll_os.ll_os_lchown", - llimpl=os_lchown_llimpl) - - @registering_if(os, 'fchown') - def register_os_fchown(self): - os_fchown = self.llexternal('fchown',[rffi.INT, rffi.INT, rffi.INT], - rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def os_fchown_llimpl(fd, uid, gid): - res = os_fchown(fd, uid, gid) - if res == -1: - raise OSError(rposix.get_saved_errno(), "os_fchown failed") - - return extdef([int, int, int], None, "ll_os.ll_os_fchown", - llimpl=os_fchown_llimpl) - - @registering_if(os, 'readlink') - def register_os_readlink(self): - os_readlink = self.llexternal('readlink', - [rffi.CCHARP, rffi.CCHARP, rffi.SIZE_T], - rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - # XXX SSIZE_T in POSIX.1-2001 - - def os_readlink_llimpl(path): - bufsize = 1023 - while True: - l_path = rffi.str2charp(path) - buf = lltype.malloc(rffi.CCHARP.TO, bufsize, - flavor='raw') - res = rffi.cast(lltype.Signed, os_readlink(l_path, buf, bufsize)) - lltype.free(l_path, flavor='raw') - if res < 0: - error = rposix.get_saved_errno() # failed - lltype.free(buf, flavor='raw') - raise OSError(error, "readlink failed") - elif res < bufsize: - break # ok - else: - # buf too small, try again with a larger buffer - lltype.free(buf, flavor='raw') - bufsize *= 4 - # convert the result to a string - result = rffi.charp2strn(buf, res) - lltype.free(buf, flavor='raw') - return result - - return extdef([str0], str0, - "ll_os.ll_os_readlink", - llimpl=os_readlink_llimpl) - - @registering(os.waitpid) - def register_os_waitpid(self): - if sys.platform.startswith('win'): - # emulate waitpid() with the _cwait() of Microsoft's compiler - os__cwait = self.llexternal('_cwait', - [rffi.INTP, rffi.PID_T, rffi.INT], - rffi.PID_T, - save_err=rffi.RFFI_SAVE_ERRNO) - def os_waitpid(pid, status_p, options): - result = os__cwait(status_p, pid, options) - # shift the status left a byte so this is more - # like the POSIX waitpid - tmp = rffi.cast(rffi.SIGNED, status_p[0]) - tmp <<= 8 - status_p[0] = rffi.cast(rffi.INT, tmp) - return result - else: - # Posix - if _CYGWIN: - os_waitpid = self.llexternal('cygwin_waitpid', - [rffi.PID_T, rffi.INTP, rffi.INT], - rffi.PID_T, - save_err=rffi.RFFI_SAVE_ERRNO) - else: - os_waitpid = self.llexternal('waitpid', - [rffi.PID_T, rffi.INTP, rffi.INT], - rffi.PID_T, - save_err=rffi.RFFI_SAVE_ERRNO) - - def os_waitpid_llimpl(pid, options): - status_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') - status_p[0] = rffi.cast(rffi.INT, 0) - result = os_waitpid(rffi.cast(rffi.PID_T, pid), - status_p, - rffi.cast(rffi.INT, options)) - result = rffi.cast(lltype.Signed, result) - status = status_p[0] - lltype.free(status_p, flavor='raw') - if result == -1: - raise OSError(rposix.get_saved_errno(), "os_waitpid failed") - return (rffi.cast(lltype.Signed, result), - rffi.cast(lltype.Signed, status)) - - return extdef([int, int], (int, int), - "ll_os.ll_os_waitpid", - llimpl=os_waitpid_llimpl) - - @registering(os.isatty) - def register_os_isatty(self): - os_isatty = self.llexternal(UNDERSCORE_ON_WIN32 + 'isatty', - [rffi.INT], rffi.INT) - - def isatty_llimpl(fd): - if not rposix.is_valid_fd(fd): - return False - res = rffi.cast(lltype.Signed, os_isatty(rffi.cast(rffi.INT, fd))) - return res != 0 - - return extdef([int], bool, llimpl=isatty_llimpl, - export_name="ll_os.ll_os_isatty") - - @registering(os.strerror) - def register_os_strerror(self): - os_strerror = self.llexternal('strerror', [rffi.INT], rffi.CCHARP, releasegil=False) - - def strerror_llimpl(errnum): - res = os_strerror(rffi.cast(rffi.INT, errnum)) - if not res: - raise ValueError("os_strerror failed") - return rffi.charp2str(res) - - return extdef([int], str, llimpl=strerror_llimpl, - export_name="ll_os.ll_os_strerror") - - @registering(os.system) - def register_os_system(self): - os_system = self.llexternal('system', [rffi.CCHARP], rffi.INT) - - def system_llimpl(command): - res = os_system(command) - return rffi.cast(lltype.Signed, res) - - return extdef([str0], int, llimpl=system_llimpl, - export_name="ll_os.ll_os_system") - - @registering_str_unicode(os.unlink) - def register_os_unlink(self, traits): - os_unlink = self.llexternal(traits.posix_function_name('unlink'), - [traits.CCHARP], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def unlink_llimpl(pathname): - res = rffi.cast(lltype.Signed, os_unlink(pathname)) - if res < 0: - raise OSError(rposix.get_saved_errno(), "os_unlink failed") - - if sys.platform == 'win32': - from rpython.rtyper.module.ll_win32file import make_win32_traits - win32traits = make_win32_traits(traits) - - @func_renamer('unlink_llimpl_%s' % traits.str.__name__) - def unlink_llimpl(path): - if not win32traits.DeleteFile(path): - raise rwin32.lastSavedWindowsError() - - return extdef([traits.str0], s_None, llimpl=unlink_llimpl, - export_name=traits.ll_os_name('unlink')) - - @registering_str_unicode(os.chdir) - def register_os_chdir(self, traits): - os_chdir = self.llexternal(traits.posix_function_name('chdir'), - [traits.CCHARP], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def os_chdir_llimpl(path): - res = rffi.cast(lltype.Signed, os_chdir(path)) - if res < 0: - raise OSError(rposix.get_saved_errno(), "os_chdir failed") - - # On Windows, use an implementation that will produce Win32 errors - if sys.platform == 'win32': - from rpython.rtyper.module.ll_win32file import make_chdir_impl - os_chdir_llimpl = make_chdir_impl(traits) - - return extdef([traits.str0], s_None, llimpl=os_chdir_llimpl, - export_name=traits.ll_os_name('chdir')) - - @registering_str_unicode(os.mkdir) - def register_os_mkdir(self, traits): - os_mkdir = self.llexternal(traits.posix_function_name('mkdir'), - [traits.CCHARP, rffi.MODE_T], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - if sys.platform == 'win32': - from rpython.rtyper.module.ll_win32file import make_win32_traits - win32traits = make_win32_traits(traits) - - @func_renamer('mkdir_llimpl_%s' % traits.str.__name__) - def os_mkdir_llimpl(path, mode): - if not win32traits.CreateDirectory(path, None): - raise rwin32.lastSavedWindowsError() - else: - def os_mkdir_llimpl(pathname, mode): - res = os_mkdir(pathname, mode) - res = rffi.cast(lltype.Signed, res) - if res < 0: - raise OSError(rposix.get_saved_errno(), "os_mkdir failed") - - return extdef([traits.str0, int], s_None, llimpl=os_mkdir_llimpl, - export_name=traits.ll_os_name('mkdir')) - - @registering_str_unicode(os.rmdir) - def register_os_rmdir(self, traits): - os_rmdir = self.llexternal(traits.posix_function_name('rmdir'), - [traits.CCHARP], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def rmdir_llimpl(pathname): - res = rffi.cast(lltype.Signed, os_rmdir(pathname)) - if res < 0: - raise OSError(rposix.get_saved_errno(), "os_rmdir failed") - - return extdef([traits.str0], s_None, llimpl=rmdir_llimpl, - export_name=traits.ll_os_name('rmdir')) - - @registering_str_unicode(os.chmod) - def register_os_chmod(self, traits): - os_chmod = self.llexternal(traits.posix_function_name('chmod'), - [traits.CCHARP, rffi.MODE_T], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def chmod_llimpl(path, mode): - res = rffi.cast(lltype.Signed, os_chmod(path, rffi.cast(rffi.MODE_T, mode))) - if res < 0: - raise OSError(rposix.get_saved_errno(), "os_chmod failed") - - if sys.platform == 'win32': - from rpython.rtyper.module.ll_win32file import make_chmod_impl - chmod_llimpl = make_chmod_impl(traits) - - return extdef([traits.str0, int], s_None, llimpl=chmod_llimpl, - export_name=traits.ll_os_name('chmod')) - - @registering_if(os, 'fchmod') - def register_os_fchmod(self): - os_fchmod = self.llexternal('fchmod', [rffi.INT, rffi.MODE_T], - rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def fchmod_llimpl(fd, mode): - mode = rffi.cast(rffi.MODE_T, mode) - res = rffi.cast(lltype.Signed, os_fchmod(fd, mode)) - if res < 0: - raise OSError(rposix.get_saved_errno(), "os_fchmod failed") - - return extdef([int, int], s_None, "ll_os.ll_os_fchmod", - llimpl=fchmod_llimpl) - - @registering_str_unicode(os.rename) - def register_os_rename(self, traits): - os_rename = self.llexternal(traits.posix_function_name('rename'), - [traits.CCHARP, traits.CCHARP], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def rename_llimpl(oldpath, newpath): - res = rffi.cast(lltype.Signed, os_rename(oldpath, newpath)) - if res < 0: - raise OSError(rposix.get_saved_errno(), "os_rename failed") - - if sys.platform == 'win32': - from rpython.rtyper.module.ll_win32file import make_win32_traits - win32traits = make_win32_traits(traits) - - @func_renamer('rename_llimpl_%s' % traits.str.__name__) - def rename_llimpl(oldpath, newpath): - if not win32traits.MoveFile(oldpath, newpath): - raise rwin32.lastSavedWindowsError() - - return extdef([traits.str0, traits.str0], s_None, llimpl=rename_llimpl, - export_name=traits.ll_os_name('rename')) - - @registering_str_unicode(getattr(os, 'mkfifo', None)) - def register_os_mkfifo(self, traits): - os_mkfifo = self.llexternal(traits.posix_function_name('mkfifo'), - [traits.CCHARP, rffi.MODE_T], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def mkfifo_llimpl(path, mode): - res = rffi.cast(lltype.Signed, os_mkfifo(path, mode)) - if res < 0: - raise OSError(rposix.get_saved_errno(), "os_mkfifo failed") - - return extdef([traits.str0, int], s_None, llimpl=mkfifo_llimpl, - export_name=traits.ll_os_name('mkfifo')) - - @registering_str_unicode(getattr(os, 'mknod', None)) - def register_os_mknod(self, traits): - os_mknod = self.llexternal(traits.posix_function_name('mknod'), - [traits.CCHARP, rffi.MODE_T, rffi.INT], - rffi.INT, # xxx: actually ^^^ dev_t - save_err=rffi.RFFI_SAVE_ERRNO) - - def mknod_llimpl(path, mode, dev): - res = rffi.cast(lltype.Signed, os_mknod(path, mode, dev)) - if res < 0: - raise OSError(rposix.get_saved_errno(), "os_mknod failed") - - return extdef([traits.str0, int, int], s_None, llimpl=mknod_llimpl, - export_name=traits.ll_os_name('mknod')) - - @registering(os.umask) - def register_os_umask(self): - os_umask = self.llexternal(UNDERSCORE_ON_WIN32 + 'umask', - [rffi.MODE_T], rffi.MODE_T) - - def umask_llimpl(newmask): - res = os_umask(rffi.cast(rffi.MODE_T, newmask)) - return rffi.cast(lltype.Signed, res) - - return extdef([int], int, llimpl=umask_llimpl, - export_name="ll_os.ll_os_umask") - - @registering_if(os, 'kill', sys.platform != 'win32') - def register_os_kill(self): - os_kill = self.llexternal('kill', [rffi.PID_T, rffi.INT], - rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - def kill_llimpl(pid, sig): - res = rffi.cast(lltype.Signed, os_kill(rffi.cast(rffi.PID_T, pid), - rffi.cast(rffi.INT, sig))) - if res < 0: - raise OSError(rposix.get_saved_errno(), "os_kill failed") - return extdef([int, int], s_None, llimpl=kill_llimpl, - export_name="ll_os.ll_os_kill") - - @registering_if(os, 'killpg') - def register_os_killpg(self): - os_killpg = self.llexternal('killpg', [rffi.INT, rffi.INT], - rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def killpg_llimpl(pid, sig): - res = rffi.cast(lltype.Signed, os_killpg(rffi.cast(rffi.INT, pid), - rffi.cast(rffi.INT, sig))) - if res < 0: - raise OSError(rposix.get_saved_errno(), "os_killpg failed") - - return extdef([int, int], s_None, llimpl=killpg_llimpl, - export_name="ll_os.ll_os_killpg") - - @registering_if(os, 'link') - def register_os_link(self): - os_link = self.llexternal('link', [rffi.CCHARP, rffi.CCHARP], - rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def link_llimpl(oldpath, newpath): - res = rffi.cast(lltype.Signed, os_link(oldpath, newpath)) - if res < 0: - raise OSError(rposix.get_saved_errno(), "os_link failed") - - return extdef([str0, str0], s_None, llimpl=link_llimpl, - export_name="ll_os.ll_os_link") - - @registering_if(os, 'symlink') - def register_os_symlink(self): - os_symlink = self.llexternal('symlink', [rffi.CCHARP, rffi.CCHARP], - rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - - def symlink_llimpl(oldpath, newpath): - res = rffi.cast(lltype.Signed, os_symlink(oldpath, newpath)) - if res < 0: - raise OSError(rposix.get_saved_errno(), "os_symlink failed") - - return extdef([str0, str0], s_None, llimpl=symlink_llimpl, - export_name="ll_os.ll_os_symlink") - - @registering_if(os, 'fork') - def register_os_fork(self): - from rpython.rlib import debug, rthread - os_fork = self.llexternal('fork', [], rffi.PID_T, - _nowrapper = True) - - @jit.dont_look_inside - def fork_llimpl(): - # NB. keep forkpty() up-to-date, too - ofs = debug.debug_offset() - opaqueaddr = rthread.gc_thread_before_fork() - childpid = rffi.cast(lltype.Signed, os_fork()) - errno = rffi.cast(lltype.Signed, rposix._get_errno()) - rthread.gc_thread_after_fork(childpid, opaqueaddr) - if childpid == -1: - raise OSError(errno, "os_fork failed") - if childpid == 0: - debug.debug_forked(ofs) - return rffi.cast(lltype.Signed, childpid) - - return extdef([], int, llimpl=fork_llimpl, - export_name="ll_os.ll_os_fork") - - @registering_if(os, 'openpty') - def register_os_openpty(self): - os_openpty = self.llexternal( - 'openpty', - [rffi.INTP, rffi.INTP, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], - rffi.INT, - compilation_info=ExternalCompilationInfo(libraries=['util']), - save_err=rffi.RFFI_SAVE_ERRNO) - def openpty_llimpl(): - master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') - slave_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') - result = os_openpty(master_p, slave_p, None, None, None) - master_fd = master_p[0] - slave_fd = slave_p[0] - lltype.free(master_p, flavor='raw') - lltype.free(slave_p, flavor='raw') - if result == -1: - raise OSError(rposix.get_saved_errno(), "os_openpty failed") - return (rffi.cast(lltype.Signed, master_fd), - rffi.cast(lltype.Signed, slave_fd)) - - return extdef([], (int, int), "ll_os.ll_os_openpty", - llimpl=openpty_llimpl) - - @registering_if(os, 'forkpty') - def register_os_forkpty(self): - from rpython.rlib import debug, rthread - os_forkpty = self.llexternal( - 'forkpty', - [rffi.INTP, rffi.VOIDP, rffi.VOIDP, rffi.VOIDP], - rffi.PID_T, - compilation_info=ExternalCompilationInfo(libraries=['util']), - save_err=rffi.RFFI_SAVE_ERRNO) - def forkpty_llimpl(): - master_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') - master_p[0] = rffi.cast(rffi.INT, -1) - ofs = debug.debug_offset() - opaqueaddr = rthread.gc_thread_before_fork() - childpid = rffi.cast(lltype.Signed, - os_forkpty(master_p, None, None, None)) - rthread.gc_thread_after_fork(childpid, opaqueaddr) - master_fd = master_p[0] - lltype.free(master_p, flavor='raw') - if childpid == -1: - raise OSError(rposix.get_saved_errno(), "os_forkpty failed") - if childpid == 0: - debug.debug_forked(ofs) - return (rffi.cast(lltype.Signed, childpid), - rffi.cast(lltype.Signed, master_fd)) - - return extdef([], (int, int), "ll_os.ll_os_forkpty", - llimpl=forkpty_llimpl) - - @registering(os._exit) - def register_os__exit(self): - from rpython.rlib import debug - os__exit = self.llexternal('_exit', [rffi.INT], lltype.Void) - - def _exit_llimpl(status): - debug.debug_flush() - os__exit(rffi.cast(rffi.INT, status)) - - return extdef([int], s_None, llimpl=_exit_llimpl, - export_name="ll_os.ll_os__exit") - - @registering_if(os, 'nice') - def register_os_nice(self): - os_nice = self.llexternal('nice', [rffi.INT], rffi.INT, - save_err=rffi.RFFI_FULL_ERRNO_ZERO) - - def nice_llimpl(inc): - # Assume that the system provides a standard-compliant version - # of nice() that returns the new priority. Nowadays, FreeBSD - # might be the last major non-compliant system (xxx check me). - res = rffi.cast(lltype.Signed, os_nice(inc)) - if res == -1: - err = rposix.get_saved_errno() - if err != 0: - raise OSError(err, "os_nice failed") - return res - - return extdef([int], int, llimpl=nice_llimpl, - export_name="ll_os.ll_os_nice") - - @registering_if(os, 'ctermid') - def register_os_ctermid(self): - os_ctermid = self.llexternal('ctermid', [rffi.CCHARP], rffi.CCHARP) - - def ctermid_llimpl(): - return rffi.charp2str(os_ctermid(lltype.nullptr(rffi.CCHARP.TO))) - - return extdef([], str, llimpl=ctermid_llimpl, - export_name="ll_os.ll_os_ctermid") - - @registering_if(os, 'tmpnam') - def register_os_tmpnam(self): - os_tmpnam = self.llexternal('tmpnam', [rffi.CCHARP], rffi.CCHARP) - - def tmpnam_llimpl(): - return rffi.charp2str(os_tmpnam(lltype.nullptr(rffi.CCHARP.TO))) - - return extdef([], str, llimpl=tmpnam_llimpl, - export_name="ll_os.ll_os_tmpnam") - -# --------------------------- os.stat & variants --------------------------- - - @registering(os.fstat) - def register_os_fstat(self): - from rpython.rtyper.module import ll_os_stat - return ll_os_stat.register_stat_variant('fstat', StringTraits()) - - @registering_str_unicode(os.stat) - def register_os_stat(self, traits): - from rpython.rtyper.module import ll_os_stat - return ll_os_stat.register_stat_variant('stat', traits) - - @registering_str_unicode(os.lstat) - def register_os_lstat(self, traits): - from rpython.rtyper.module import ll_os_stat - return ll_os_stat.register_stat_variant('lstat', traits) - - @registering_if(os, 'fstatvfs') - def register_os_fstatvfs(self): - from rpython.rtyper.module import ll_os_stat - return ll_os_stat.register_statvfs_variant('fstatvfs', StringTraits()) - - if hasattr(os, 'statvfs'): - @registering_str_unicode(os.statvfs) - def register_os_statvfs(self, traits): - from rpython.rtyper.module import ll_os_stat - return ll_os_stat.register_statvfs_variant('statvfs', traits) - - - # ------------------------------- os.W* --------------------------------- - - w_star = ['WCOREDUMP', 'WIFCONTINUED', 'WIFSTOPPED', - 'WIFSIGNALED', 'WIFEXITED', 'WEXITSTATUS', - 'WSTOPSIG', 'WTERMSIG'] - # last 3 are returning int - w_star_returning_int = dict.fromkeys(w_star[-3:]) - - - - def declare_new_w_star(self, name): - """ stupid workaround for the python late-binding - 'feature' - """ - - def fake(status): - return int(getattr(os, name)(status)) - fake.func_name = 'fake_' + name - - os_c_func = self.llexternal("pypy_macro_wrapper_" + name, - [lltype.Signed], lltype.Signed, - _callable=fake) - - if name in self.w_star_returning_int: - def llimpl(status): - return os_c_func(status) - resulttype = int - else: - def llimpl(status): - return bool(os_c_func(status)) - resulttype = bool - llimpl.func_name = name + '_llimpl' - return extdef([int], resulttype, "ll_os." + name, - llimpl=llimpl) - - for name in w_star: - locals()['register_w_' + name] = registering_if(os, name)( - lambda self, xname=name : self.declare_new_w_star(xname)) - - @registering_if(os, 'ttyname') - def register_os_ttyname(self): - os_ttyname = self.llexternal('ttyname', [lltype.Signed], rffi.CCHARP, - releasegil=False, - save_err=rffi.RFFI_SAVE_ERRNO) - - def ttyname_llimpl(fd): - l_name = os_ttyname(fd) - if not l_name: - raise OSError(rposix.get_saved_errno(), "ttyname raised") - return rffi.charp2str(l_name) - - return extdef([int], str, "ll_os.ttyname", - llimpl=ttyname_llimpl) - -# ____________________________________________________________ -# Support for os.environ - -# XXX only for systems where os.environ is an instance of _Environ, -# which should cover Unix and Windows at least -assert type(os.environ) is not dict - -from rpython.rtyper.controllerentry import ControllerEntryForPrebuilt - -class EnvironExtRegistry(ControllerEntryForPrebuilt): - _about_ = os.environ - - def getcontroller(self): - from rpython.rtyper.module.ll_os_environ import OsEnvironController - return OsEnvironController() - -# ____________________________________________________________ -# Support for the WindowsError exception - -if sys.platform == 'win32': - from rpython.rlib import rwin32 - - class RegisterFormatError(BaseLazyRegistering): - def __init__(self): - pass - - @registering(rwin32.FormatError) - def register_rwin32_FormatError(self): - return extdef([lltype.Signed], str, - "rwin32_FormatError", - llimpl=rwin32.llimpl_FormatError) diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/ll_os_stat.py pypy-5.0.1+dfsg/rpython/rtyper/module/ll_os_stat.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/ll_os_stat.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/ll_os_stat.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,592 +0,0 @@ -"""Annotation and rtyping support for the result of os.stat(), os.lstat() -and os.fstat(). In RPython like in plain Python the stat result can be -indexed like a tuple but also exposes the st_xxx attributes. -""" - -import os -import sys - -from rpython.annotator import model as annmodel -from rpython.rtyper.llannotation import lltype_to_annotation -from rpython.rlib import rposix -from rpython.rlib.rarithmetic import intmask -from rpython.rtyper import extregistry -from rpython.rtyper.annlowlevel import hlstr -from rpython.rtyper.extfunc import extdef -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.rtuple import TUPLE_TYPE -from rpython.rtyper.tool import rffi_platform as platform -from rpython.tool.pairtype import pairtype -from rpython.tool.sourcetools import func_renamer -from rpython.translator.tool.cbuild import ExternalCompilationInfo - -# Support for float times is here. -# - ALL_STAT_FIELDS contains Float fields if the system can retrieve -# sub-second timestamps. -# - TIMESPEC is defined when the "struct stat" contains st_atim field. - -if sys.platform.startswith('linux') or sys.platform.startswith('openbsd'): - TIMESPEC = platform.Struct('struct timespec', - [('tv_sec', rffi.TIME_T), - ('tv_nsec', rffi.LONG)]) -else: - TIMESPEC = None - -# all possible fields - some of them are not available on all platforms -ALL_STAT_FIELDS = [ - ("st_mode", lltype.Signed), - ("st_ino", lltype.SignedLongLong), - ("st_dev", lltype.SignedLongLong), - ("st_nlink", lltype.Signed), - ("st_uid", lltype.Signed), - ("st_gid", lltype.Signed), - ("st_size", lltype.SignedLongLong), - ("st_atime", lltype.Float), - ("st_mtime", lltype.Float), - ("st_ctime", lltype.Float), - ("st_blksize", lltype.Signed), - ("st_blocks", lltype.Signed), - ("st_rdev", lltype.Signed), - ("st_flags", lltype.Signed), - #("st_gen", lltype.Signed), -- new in CPy 2.5, not implemented - #("st_birthtime", lltype.Float), -- new in CPy 2.5, not implemented -] -N_INDEXABLE_FIELDS = 10 - -# For OO backends, expose only the portable fields (the first 10). -PORTABLE_STAT_FIELDS = ALL_STAT_FIELDS[:N_INDEXABLE_FIELDS] - -STATVFS_FIELDS = [ - ("f_bsize", lltype.Signed), - ("f_frsize", lltype.Signed), - ("f_blocks", lltype.Signed), - ("f_bfree", lltype.Signed), - ("f_bavail", lltype.Signed), - ("f_files", lltype.Signed), - ("f_ffree", lltype.Signed), - ("f_favail", lltype.Signed), - ("f_flag", lltype.Signed), - ("f_namemax", lltype.Signed), -] - - -# ____________________________________________________________ -# -# Annotation support - -class SomeStatResult(annmodel.SomeObject): - knowntype = os.stat_result - - def rtyper_makerepr(self, rtyper): - from rpython.rtyper.module import r_os_stat - return r_os_stat.StatResultRepr(rtyper) - - def rtyper_makekey(self): - return self.__class__, - - def getattr(self, s_attr): - assert s_attr.is_constant(), "non-constant attr name in getattr()" - attrname = s_attr.const - TYPE = STAT_FIELD_TYPES[attrname] - return lltype_to_annotation(TYPE) - - def _get_rmarshall_support_(self): # for rlib.rmarshal - # reduce and recreate stat_result objects from 10-tuples - # (we ignore the extra values here for simplicity and portability) - def stat_result_reduce(st): - return (st[0], st[1], st[2], st[3], st[4], - st[5], st[6], st[7], st[8], st[9]) - - def stat_result_recreate(tup): - return make_stat_result(tup + extra_zeroes) - s_reduced = annmodel.SomeTuple([lltype_to_annotation(TYPE) - for name, TYPE in PORTABLE_STAT_FIELDS]) - extra_zeroes = (0,) * (len(STAT_FIELDS) - len(PORTABLE_STAT_FIELDS)) - return s_reduced, stat_result_reduce, stat_result_recreate - - -class SomeStatvfsResult(annmodel.SomeObject): - if hasattr(os, 'statvfs_result'): - knowntype = os.statvfs_result - else: - knowntype = None # will not be used - - def rtyper_makerepr(self, rtyper): - from rpython.rtyper.module import r_os_stat - return r_os_stat.StatvfsResultRepr(rtyper) - - def rtyper_makekey(self): - return self.__class__, - - def getattr(self, s_attr): - assert s_attr.is_constant() - TYPE = STATVFS_FIELD_TYPES[s_attr.const] - return lltype_to_annotation(TYPE) - - -class __extend__(pairtype(SomeStatResult, annmodel.SomeInteger)): - def getitem((s_sta, s_int)): - assert s_int.is_constant(), "os.stat()[index]: index must be constant" - index = s_int.const - assert 0 <= index < N_INDEXABLE_FIELDS, "os.stat()[index] out of range" - name, TYPE = STAT_FIELDS[index] - return lltype_to_annotation(TYPE) - - -class __extend__(pairtype(SomeStatvfsResult, annmodel.SomeInteger)): - def getitem((s_stat, s_int)): - assert s_int.is_constant() - name, TYPE = STATVFS_FIELDS[s_int.const] - return lltype_to_annotation(TYPE) - - -s_StatResult = SomeStatResult() -s_StatvfsResult = SomeStatvfsResult() - - -def make_stat_result(tup): - """Turn a tuple into an os.stat_result object.""" - positional = tup[:N_INDEXABLE_FIELDS] - kwds = {} - for i, name in enumerate(STAT_FIELD_NAMES[N_INDEXABLE_FIELDS:]): - kwds[name] = tup[N_INDEXABLE_FIELDS + i] - return os.stat_result(positional, kwds) - - -def make_statvfs_result(tup): - return os.statvfs_result(tup) - - -class MakeStatResultEntry(extregistry.ExtRegistryEntry): - _about_ = make_stat_result - - def compute_result_annotation(self, s_tup): - return s_StatResult - - def specialize_call(self, hop): - from rpython.rtyper.module import r_os_stat - return r_os_stat.specialize_make_stat_result(hop) - - -class MakeStatvfsResultEntry(extregistry.ExtRegistryEntry): - _about_ = make_statvfs_result - - def compute_result_annotation(self, s_tup): - return s_StatvfsResult - - def specialize_call(self, hop): - from rpython.rtyper.module import r_os_stat - return r_os_stat.specialize_make_statvfs_result(hop) - -# ____________________________________________________________ -# -# RFFI support - -if sys.platform.startswith('win'): - _name_struct_stat = '_stati64' - INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h'] -else: - if sys.platform.startswith('linux'): - _name_struct_stat = 'stat64' - else: - _name_struct_stat = 'stat' - INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h', 'unistd.h'] - -compilation_info = ExternalCompilationInfo( - # This must be set to 64 on some systems to enable large file support. - #pre_include_bits = ['#define _FILE_OFFSET_BITS 64'], - # ^^^ nowadays it's always set in all C files we produce. - includes=INCLUDES -) - -if TIMESPEC is not None: - class CConfig_for_timespec: - _compilation_info_ = compilation_info - TIMESPEC = TIMESPEC - TIMESPEC = lltype.Ptr( - platform.configure(CConfig_for_timespec)['TIMESPEC']) - - -def posix_declaration(try_to_add=None): - global STAT_STRUCT, STATVFS_STRUCT - - LL_STAT_FIELDS = STAT_FIELDS[:] - if try_to_add: - LL_STAT_FIELDS.append(try_to_add) - - if TIMESPEC is not None: - - def _expand(lst, originalname, timespecname): - for i, (_name, _TYPE) in enumerate(lst): - if _name == originalname: - # replace the 'st_atime' field of type rffi.DOUBLE - # with a field 'st_atim' of type 'struct timespec' - lst[i] = (timespecname, TIMESPEC.TO) - break - - _expand(LL_STAT_FIELDS, 'st_atime', 'st_atim') - _expand(LL_STAT_FIELDS, 'st_mtime', 'st_mtim') - _expand(LL_STAT_FIELDS, 'st_ctime', 'st_ctim') - - del _expand - else: - # Replace float fields with integers - for name in ('st_atime', 'st_mtime', 'st_ctime', 'st_birthtime'): - for i, (_name, _TYPE) in enumerate(LL_STAT_FIELDS): - if _name == name: - LL_STAT_FIELDS[i] = (_name, lltype.Signed) - break - - class CConfig: - _compilation_info_ = compilation_info - STAT_STRUCT = platform.Struct('struct %s' % _name_struct_stat, LL_STAT_FIELDS) - STATVFS_STRUCT = platform.Struct('struct statvfs', STATVFS_FIELDS) - - try: - config = platform.configure(CConfig, ignore_errors=try_to_add is not None) - except platform.CompilationError: - if try_to_add: - return # failed to add this field, give up - raise - - STAT_STRUCT = lltype.Ptr(config['STAT_STRUCT']) - STATVFS_STRUCT = lltype.Ptr(config['STATVFS_STRUCT']) - if try_to_add: - STAT_FIELDS.append(try_to_add) - - -# This lists only the fields that have been found on the underlying platform. -# Initially only the PORTABLE_STAT_FIELDS, but more may be added by the -# following loop. -STAT_FIELDS = PORTABLE_STAT_FIELDS[:] - -if sys.platform != 'win32': - posix_declaration() - for _i in range(len(PORTABLE_STAT_FIELDS), len(ALL_STAT_FIELDS)): - posix_declaration(ALL_STAT_FIELDS[_i]) - del _i - -# these two global vars only list the fields defined in the underlying platform -STAT_FIELD_TYPES = dict(STAT_FIELDS) # {'st_xxx': TYPE} -STAT_FIELD_NAMES = [_name for (_name, _TYPE) in STAT_FIELDS] -del _name, _TYPE - -STATVFS_FIELD_TYPES = dict(STATVFS_FIELDS) -STATVFS_FIELD_NAMES = [name for name, tp in STATVFS_FIELDS] - - -def build_stat_result(st): - # only for LL backends - if TIMESPEC is not None: - atim = st.c_st_atim; atime = int(atim.c_tv_sec) + 1E-9 * int(atim.c_tv_nsec) - mtim = st.c_st_mtim; mtime = int(mtim.c_tv_sec) + 1E-9 * int(mtim.c_tv_nsec) - ctim = st.c_st_ctim; ctime = int(ctim.c_tv_sec) + 1E-9 * int(ctim.c_tv_nsec) - else: - atime = st.c_st_atime - mtime = st.c_st_mtime - ctime = st.c_st_ctime - - result = (st.c_st_mode, - st.c_st_ino, - st.c_st_dev, - st.c_st_nlink, - st.c_st_uid, - st.c_st_gid, - st.c_st_size, - atime, - mtime, - ctime) - - if "st_blksize" in STAT_FIELD_TYPES: result += (st.c_st_blksize,) - if "st_blocks" in STAT_FIELD_TYPES: result += (st.c_st_blocks,) - if "st_rdev" in STAT_FIELD_TYPES: result += (st.c_st_rdev,) - if "st_flags" in STAT_FIELD_TYPES: result += (st.c_st_flags,) - - return make_stat_result(result) - - -def build_statvfs_result(st): - return make_statvfs_result(( - st.c_f_bsize, - st.c_f_frsize, - st.c_f_blocks, - st.c_f_bfree, - st.c_f_bavail, - st.c_f_files, - st.c_f_ffree, - st.c_f_favail, - st.c_f_flag, - st.c_f_namemax - )) - - -def register_stat_variant(name, traits): - if name != 'fstat': - arg_is_path = True - s_arg = traits.str0 - ARG1 = traits.CCHARP - else: - arg_is_path = False - s_arg = int - ARG1 = rffi.INT - - if sys.platform == 'win32': - # See Win32 implementation below - posix_stat_llimpl = make_win32_stat_impl(name, traits) - - return extdef( - [s_arg], s_StatResult, traits.ll_os_name(name), - llimpl=posix_stat_llimpl) - - if sys.platform.startswith('linux'): - # because we always use _FILE_OFFSET_BITS 64 - this helps things work that are not a c compiler - _functions = {'stat': 'stat64', - 'fstat': 'fstat64', - 'lstat': 'lstat64'} - c_func_name = _functions[name] - else: - c_func_name = name - - posix_mystat = rffi.llexternal(c_func_name, - [ARG1, STAT_STRUCT], rffi.INT, - compilation_info=compilation_info, - save_err=rffi.RFFI_SAVE_ERRNO) - - @func_renamer('os_%s_llimpl' % (name,)) - def posix_stat_llimpl(arg): - stresult = lltype.malloc(STAT_STRUCT.TO, flavor='raw') - try: - if arg_is_path: - arg = traits.str2charp(arg) - error = rffi.cast(rffi.LONG, posix_mystat(arg, stresult)) - if arg_is_path: - traits.free_charp(arg) - if error != 0: - raise OSError(rposix.get_saved_errno(), "os_?stat failed") - return build_stat_result(stresult) - finally: - lltype.free(stresult, flavor='raw') - - @func_renamer('os_%s_fake' % (name,)) - def posix_fakeimpl(arg): - if s_arg == traits.str0: - arg = hlstr(arg) - st = getattr(os, name)(arg) - fields = [TYPE for fieldname, TYPE in STAT_FIELDS] - TP = TUPLE_TYPE(fields) - ll_tup = lltype.malloc(TP.TO) - for i, (fieldname, TYPE) in enumerate(STAT_FIELDS): - val = getattr(st, fieldname) - if isinstance(TYPE, lltype.Number): - rffi.setintfield(ll_tup, 'item%d' % i, int(val)) - elif TYPE is lltype.Float: - setattr(ll_tup, 'item%d' % i, float(val)) - else: - setattr(ll_tup, 'item%d' % i, val) - return ll_tup - - return extdef( - [s_arg], s_StatResult, "ll_os.ll_os_%s" % (name,), - llimpl=posix_stat_llimpl, llfakeimpl=posix_fakeimpl) - - -def register_statvfs_variant(name, traits): - if name != 'fstatvfs': - arg_is_path = True - s_arg = traits.str0 - ARG1 = traits.CCHARP - else: - arg_is_path = False - s_arg = int - ARG1 = rffi.INT - - posix_mystatvfs = rffi.llexternal(name, - [ARG1, STATVFS_STRUCT], rffi.INT, - compilation_info=compilation_info, - save_err=rffi.RFFI_SAVE_ERRNO) - - @func_renamer('os_%s_llimpl' % (name,)) - def posix_statvfs_llimpl(arg): - stresult = lltype.malloc(STATVFS_STRUCT.TO, flavor='raw') - try: - if arg_is_path: - arg = traits.str2charp(arg) - error = rffi.cast(rffi.LONG, posix_mystatvfs(arg, stresult)) - if arg_is_path: - traits.free_charp(arg) - if error != 0: - raise OSError(rposix.get_saved_errno(), "os_?statvfs failed") - return build_statvfs_result(stresult) - finally: - lltype.free(stresult, flavor='raw') - - @func_renamer('os_%s_fake' % (name,)) - def posix_fakeimpl(arg): - if s_arg == traits.str0: - arg = hlstr(arg) - st = getattr(os, name)(arg) - fields = [TYPE for fieldname, TYPE in STATVFS_FIELDS] - TP = TUPLE_TYPE(fields) - ll_tup = lltype.malloc(TP.TO) - for i, (fieldname, TYPE) in enumerate(STATVFS_FIELDS): - val = getattr(st, fieldname) - rffi.setintfield(ll_tup, 'item%d' % i, int(val)) - return ll_tup - - return extdef( - [s_arg], s_StatvfsResult, "ll_os.ll_os_%s" % (name,), - llimpl=posix_statvfs_llimpl, llfakeimpl=posix_fakeimpl - ) - - -def make_win32_stat_impl(name, traits): - from rpython.rlib import rwin32 - from rpython.rtyper.module.ll_win32file import make_win32_traits - win32traits = make_win32_traits(traits) - - # The CRT of Windows has a number of flaws wrt. its stat() implementation: - # - time stamps are restricted to second resolution - # - file modification times suffer from forth-and-back conversions between - # UTC and local time - # Therefore, we implement our own stat, based on the Win32 API directly. - from rpython.rtyper.tool import rffi_platform as platform - from rpython.translator.tool.cbuild import ExternalCompilationInfo - from rpython.rlib import rwin32 - - assert len(STAT_FIELDS) == 10 # no extra fields on Windows - - def attributes_to_mode(attributes): - m = 0 - attributes = intmask(attributes) - if attributes & win32traits.FILE_ATTRIBUTE_DIRECTORY: - m |= win32traits._S_IFDIR | 0111 # IFEXEC for user,group,other - else: - m |= win32traits._S_IFREG - if attributes & win32traits.FILE_ATTRIBUTE_READONLY: - m |= 0444 - else: - m |= 0666 - return m - - def attribute_data_to_stat(info): - st_mode = attributes_to_mode(info.c_dwFileAttributes) - st_size = make_longlong(info.c_nFileSizeHigh, info.c_nFileSizeLow) - ctime = FILE_TIME_to_time_t_float(info.c_ftCreationTime) - mtime = FILE_TIME_to_time_t_float(info.c_ftLastWriteTime) - atime = FILE_TIME_to_time_t_float(info.c_ftLastAccessTime) - - result = (st_mode, - 0, 0, 0, 0, 0, - st_size, - atime, mtime, ctime) - - return make_stat_result(result) - - def by_handle_info_to_stat(info): - # similar to the one above - st_mode = attributes_to_mode(info.c_dwFileAttributes) - st_size = make_longlong(info.c_nFileSizeHigh, info.c_nFileSizeLow) - ctime = FILE_TIME_to_time_t_float(info.c_ftCreationTime) - mtime = FILE_TIME_to_time_t_float(info.c_ftLastWriteTime) - atime = FILE_TIME_to_time_t_float(info.c_ftLastAccessTime) - - # specific to fstat() - st_ino = make_longlong(info.c_nFileIndexHigh, info.c_nFileIndexLow) - st_nlink = info.c_nNumberOfLinks - - result = (st_mode, - st_ino, 0, st_nlink, 0, 0, - st_size, - atime, mtime, ctime) - - return make_stat_result(result) - - def attributes_from_dir(l_path, data): - filedata = lltype.malloc(win32traits.WIN32_FIND_DATA, flavor='raw') - try: - hFindFile = win32traits.FindFirstFile(l_path, filedata) - if hFindFile == rwin32.INVALID_HANDLE_VALUE: - return 0 - win32traits.FindClose(hFindFile) - data.c_dwFileAttributes = filedata.c_dwFileAttributes - rffi.structcopy(data.c_ftCreationTime, filedata.c_ftCreationTime) - rffi.structcopy(data.c_ftLastAccessTime, filedata.c_ftLastAccessTime) - rffi.structcopy(data.c_ftLastWriteTime, filedata.c_ftLastWriteTime) - data.c_nFileSizeHigh = filedata.c_nFileSizeHigh - data.c_nFileSizeLow = filedata.c_nFileSizeLow - return 1 - finally: - lltype.free(filedata, flavor='raw') - - def win32_stat_llimpl(path): - data = lltype.malloc(win32traits.WIN32_FILE_ATTRIBUTE_DATA, flavor='raw') - try: - l_path = traits.str2charp(path) - res = win32traits.GetFileAttributesEx(l_path, win32traits.GetFileExInfoStandard, data) - errcode = rwin32.GetLastError_saved() - if res == 0: - if errcode == win32traits.ERROR_SHARING_VIOLATION: - res = attributes_from_dir(l_path, data) - errcode = rwin32.GetLastError_saved() - traits.free_charp(l_path) - if res == 0: - raise WindowsError(errcode, "os_stat failed") - return attribute_data_to_stat(data) - finally: - lltype.free(data, flavor='raw') - - def win32_fstat_llimpl(fd): - handle = rwin32.get_osfhandle(fd) - filetype = win32traits.GetFileType(handle) - if filetype == win32traits.FILE_TYPE_CHAR: - # console or LPT device - return make_stat_result((win32traits._S_IFCHR, - 0, 0, 0, 0, 0, - 0, 0, 0, 0)) - elif filetype == win32traits.FILE_TYPE_PIPE: - # socket or named pipe - return make_stat_result((win32traits._S_IFIFO, - 0, 0, 0, 0, 0, - 0, 0, 0, 0)) - elif filetype == win32traits.FILE_TYPE_UNKNOWN: - error = rwin32.GetLastError_saved() - if error != 0: - raise WindowsError(error, "os_fstat failed") - # else: unknown but valid file - - # normal disk file (FILE_TYPE_DISK) - info = lltype.malloc(win32traits.BY_HANDLE_FILE_INFORMATION, - flavor='raw', zero=True) - try: - res = win32traits.GetFileInformationByHandle(handle, info) - if res == 0: - raise WindowsError(rwin32.GetLastError_saved(), - "os_fstat failed") - return by_handle_info_to_stat(info) - finally: - lltype.free(info, flavor='raw') - - if name == 'fstat': - return win32_fstat_llimpl - else: - return win32_stat_llimpl - - -#__________________________________________________ -# Helper functions for win32 - -def make_longlong(high, low): - return (rffi.r_longlong(high) << 32) + rffi.r_longlong(low) - -# Seconds between 1.1.1601 and 1.1.1970 -secs_between_epochs = rffi.r_longlong(11644473600) - -def FILE_TIME_to_time_t_float(filetime): - ft = make_longlong(filetime.c_dwHighDateTime, filetime.c_dwLowDateTime) - # FILETIME is in units of 100 nsec - return float(ft) * (1.0 / 10000000.0) - secs_between_epochs - -def time_t_to_FILE_TIME(time, filetime): - ft = rffi.r_longlong((time + secs_between_epochs) * 10000000) - filetime.c_dwHighDateTime = rffi.r_uint(ft >> 32) - filetime.c_dwLowDateTime = rffi.r_uint(ft) # masking off high bits diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/ll_pdb.py pypy-5.0.1+dfsg/rpython/rtyper/module/ll_pdb.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/ll_pdb.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/ll_pdb.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -""" -Complain if you leave in pdb.set_trace() in the code -""" - -import pdb -from rpython.rtyper.extfunc import ExtFuncEntry - - -class FunEntry(ExtFuncEntry): - _about_ = pdb.set_trace - def compute_result_annotation(self, *args_s): - raise Exception("you left pdb.set_trace() in your interpreter!" - "If you want to attach a gdb instead, call rlib.debug.attach_gdb()") diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/ll_time.py pypy-5.0.1+dfsg/rpython/rtyper/module/ll_time.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/ll_time.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/ll_time.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,239 +0,0 @@ -""" -Low-level implementations for the external functions of the 'time' module. -""" - -import time, sys, math -from errno import EINTR -from rpython.rtyper.lltypesystem import rffi -from rpython.rtyper.tool import rffi_platform as platform -from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.extfunc import BaseLazyRegistering, registering, extdef -from rpython.rlib import rposix -from rpython.rlib.rarithmetic import intmask, UINT_MAX -from rpython.translator.tool.cbuild import ExternalCompilationInfo - -if sys.platform == 'win32': - TIME_H = 'time.h' - FTIME = '_ftime64' - STRUCT_TIMEB = 'struct __timeb64' - includes = ['winsock2.h', 'windows.h', - TIME_H, 'sys/types.h', 'sys/timeb.h'] - need_rusage = False -else: - TIME_H = 'sys/time.h' - FTIME = 'ftime' - STRUCT_TIMEB = 'struct timeb' - includes = [TIME_H, 'time.h', 'errno.h', 'sys/select.h', - 'sys/types.h', 'unistd.h', - 'sys/time.h', 'sys/resource.h'] - - if not sys.platform.startswith("openbsd"): - includes.append('sys/timeb.h') - - need_rusage = True - - -class CConfig: - _compilation_info_ = ExternalCompilationInfo( - includes=includes - ) - TIMEVAL = platform.Struct('struct timeval', [('tv_sec', rffi.INT), - ('tv_usec', rffi.INT)]) - HAVE_GETTIMEOFDAY = platform.Has('gettimeofday') - HAVE_FTIME = platform.Has(FTIME) - if need_rusage: - RUSAGE = platform.Struct('struct rusage', [('ru_utime', TIMEVAL), - ('ru_stime', TIMEVAL)]) - -if sys.platform.startswith('freebsd') or sys.platform.startswith('netbsd'): - libraries = ['compat'] -elif sys.platform == 'linux2': - libraries = ['rt'] -else: - libraries = [] - -class CConfigForFTime: - _compilation_info_ = ExternalCompilationInfo( - includes=[TIME_H, 'sys/timeb.h'], - libraries=libraries - ) - TIMEB = platform.Struct(STRUCT_TIMEB, [('time', rffi.INT), - ('millitm', rffi.INT)]) - -class CConfigForClockGetTime: - _compilation_info_ = ExternalCompilationInfo( - includes=['time.h'], - libraries=libraries - ) - TIMESPEC = platform.Struct('struct timespec', [('tv_sec', rffi.LONG), - ('tv_nsec', rffi.LONG)]) - -constant_names = ['RUSAGE_SELF', 'EINTR', 'CLOCK_PROCESS_CPUTIME_ID'] -for const in constant_names: - setattr(CConfig, const, platform.DefinedConstantInteger(const)) -defs_names = ['GETTIMEOFDAY_NO_TZ'] -for const in defs_names: - setattr(CConfig, const, platform.Defined(const)) - -def decode_timeval(t): - return (float(rffi.getintfield(t, 'c_tv_sec')) + - float(rffi.getintfield(t, 'c_tv_usec')) * 0.000001) - -class RegisterTime(BaseLazyRegistering): - def __init__(self): - self.configure(CConfig) - self.TIMEVALP = lltype.Ptr(self.TIMEVAL) - - @registering(time.time) - def register_time_time(self): - # Note: time.time() is used by the framework GC during collect(), - # which means that we have to be very careful about not allocating - # GC memory here. This is the reason for the _nowrapper=True. - - # AWFUL - if self.HAVE_GETTIMEOFDAY: - if self.GETTIMEOFDAY_NO_TZ: - c_gettimeofday = self.llexternal('gettimeofday', - [self.TIMEVALP], rffi.INT, - _nowrapper=True, releasegil=False) - else: - c_gettimeofday = self.llexternal('gettimeofday', - [self.TIMEVALP, rffi.VOIDP], rffi.INT, - _nowrapper=True, releasegil=False) - c_ftime = None # We have gettimeofday(2), so force ftime(3) OFF. - else: - c_gettimeofday = None - - # Only look for ftime(3) if gettimeofday(2) was not found. - if self.HAVE_FTIME: - self.configure(CConfigForFTime) - c_ftime = self.llexternal(FTIME, [lltype.Ptr(self.TIMEB)], - lltype.Void, - _nowrapper=True, releasegil=False) - else: - c_ftime = None # to not confuse the flow space - - c_time = self.llexternal('time', [rffi.VOIDP], rffi.TIME_T, - _nowrapper=True, releasegil=False) - - def time_time_llimpl(): - void = lltype.nullptr(rffi.VOIDP.TO) - result = -1.0 - if self.HAVE_GETTIMEOFDAY: - t = lltype.malloc(self.TIMEVAL, flavor='raw') - - errcode = -1 - if self.GETTIMEOFDAY_NO_TZ: - errcode = c_gettimeofday(t) - else: - errcode = c_gettimeofday(t, void) - - if rffi.cast(rffi.LONG, errcode) == 0: - result = decode_timeval(t) - lltype.free(t, flavor='raw') - if result != -1: - return result - else: # assume using ftime(3) - t = lltype.malloc(self.TIMEB, flavor='raw') - c_ftime(t) - result = (float(intmask(t.c_time)) + - float(intmask(t.c_millitm)) * 0.001) - lltype.free(t, flavor='raw') - return result - return float(c_time(void)) - - return extdef([], float, llimpl=time_time_llimpl, - export_name='ll_time.ll_time_time') - - @registering(time.clock) - def register_time_clock(self): - if sys.platform == 'win32': - # hacking to avoid LARGE_INTEGER which is a union... - A = lltype.FixedSizeArray(lltype.SignedLongLong, 1) - QueryPerformanceCounter = self.llexternal( - 'QueryPerformanceCounter', [lltype.Ptr(A)], lltype.Void, - releasegil=False) - QueryPerformanceFrequency = self.llexternal( - 'QueryPerformanceFrequency', [lltype.Ptr(A)], rffi.INT, - releasegil=False) - class State(object): - pass - state = State() - state.divisor = 0.0 - state.counter_start = 0 - def time_clock_llimpl(): - a = lltype.malloc(A, flavor='raw') - if state.divisor == 0.0: - QueryPerformanceCounter(a) - state.counter_start = a[0] - QueryPerformanceFrequency(a) - state.divisor = float(a[0]) - QueryPerformanceCounter(a) - diff = a[0] - state.counter_start - lltype.free(a, flavor='raw') - return float(diff) / state.divisor - elif self.CLOCK_PROCESS_CPUTIME_ID is not None: - # Linux and other POSIX systems with clock_gettime() - self.configure(CConfigForClockGetTime) - TIMESPEC = self.TIMESPEC - CLOCK_PROCESS_CPUTIME_ID = self.CLOCK_PROCESS_CPUTIME_ID - c_clock_gettime = self.llexternal('clock_gettime', - [lltype.Signed, lltype.Ptr(TIMESPEC)], - rffi.INT, releasegil=False) - def time_clock_llimpl(): - a = lltype.malloc(TIMESPEC, flavor='raw') - c_clock_gettime(CLOCK_PROCESS_CPUTIME_ID, a) - result = (float(rffi.getintfield(a, 'c_tv_sec')) + - float(rffi.getintfield(a, 'c_tv_nsec')) * 0.000000001) - lltype.free(a, flavor='raw') - return result - else: - RUSAGE = self.RUSAGE - RUSAGE_SELF = self.RUSAGE_SELF or 0 - c_getrusage = self.llexternal('getrusage', - [rffi.INT, lltype.Ptr(RUSAGE)], - lltype.Void, - releasegil=False) - def time_clock_llimpl(): - a = lltype.malloc(RUSAGE, flavor='raw') - c_getrusage(RUSAGE_SELF, a) - result = (decode_timeval(a.c_ru_utime) + - decode_timeval(a.c_ru_stime)) - lltype.free(a, flavor='raw') - return result - - return extdef([], float, llimpl=time_clock_llimpl, - export_name='ll_time.ll_time_clock') - - @registering(time.sleep) - def register_time_sleep(self): - if sys.platform == 'win32': - Sleep = self.llexternal('Sleep', [rffi.ULONG], lltype.Void) - def time_sleep_llimpl(secs): - millisecs = secs * 1000.0 - while millisecs > UINT_MAX: - Sleep(UINT_MAX) - millisecs -= UINT_MAX - Sleep(rffi.cast(rffi.ULONG, int(millisecs))) - else: - c_select = self.llexternal('select', [rffi.INT, rffi.VOIDP, - rffi.VOIDP, rffi.VOIDP, - self.TIMEVALP], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) - def time_sleep_llimpl(secs): - void = lltype.nullptr(rffi.VOIDP.TO) - t = lltype.malloc(self.TIMEVAL, flavor='raw') - try: - frac = math.fmod(secs, 1.0) - rffi.setintfield(t, 'c_tv_sec', int(secs)) - rffi.setintfield(t, 'c_tv_usec', int(frac*1000000.0)) - - if rffi.cast(rffi.LONG, c_select(0, void, void, void, t)) != 0: - errno = rposix.get_saved_errno() - if errno != EINTR: - raise OSError(errno, "Select failed") - finally: - lltype.free(t, flavor='raw') - - return extdef([float], None, llimpl=time_sleep_llimpl, - export_name='ll_time.ll_time_sleep') diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/ll_win32file.py pypy-5.0.1+dfsg/rpython/rtyper/module/ll_win32file.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/ll_win32file.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/ll_win32file.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,430 +0,0 @@ -""" -The Windows implementation of some posix modules, -based on the Win32 API. -""" -from __future__ import with_statement - -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.tool import rffi_platform as platform -from rpython.tool.sourcetools import func_renamer -from rpython.rlib.objectmodel import specialize - -def make_win32_traits(traits): - from rpython.rlib import rwin32 - - if traits.str is unicode: - suffix = 'W' - else: - suffix = 'A' - - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - includes = ['windows.h', 'winbase.h', 'sys/stat.h'], - ) - WIN32_FIND_DATA = platform.Struct( - 'struct _WIN32_FIND_DATA' + suffix, - # Only interesting fields - [('dwFileAttributes', rwin32.DWORD), - ('nFileSizeHigh', rwin32.DWORD), - ('nFileSizeLow', rwin32.DWORD), - ('ftCreationTime', rwin32.FILETIME), - ('ftLastAccessTime', rwin32.FILETIME), - ('ftLastWriteTime', rwin32.FILETIME), - ('cFileName', lltype.FixedSizeArray(traits.CHAR, 250))]) - ERROR_FILE_NOT_FOUND = platform.ConstantInteger( - 'ERROR_FILE_NOT_FOUND') - ERROR_NO_MORE_FILES = platform.ConstantInteger( - 'ERROR_NO_MORE_FILES') - - GetFileExInfoStandard = platform.ConstantInteger( - 'GetFileExInfoStandard') - FILE_ATTRIBUTE_DIRECTORY = platform.ConstantInteger( - 'FILE_ATTRIBUTE_DIRECTORY') - FILE_ATTRIBUTE_READONLY = platform.ConstantInteger( - 'FILE_ATTRIBUTE_READONLY') - INVALID_FILE_ATTRIBUTES = platform.ConstantInteger( - 'INVALID_FILE_ATTRIBUTES') - ERROR_SHARING_VIOLATION = platform.ConstantInteger( - 'ERROR_SHARING_VIOLATION') - _S_IFDIR = platform.ConstantInteger('_S_IFDIR') - _S_IFREG = platform.ConstantInteger('_S_IFREG') - _S_IFCHR = platform.ConstantInteger('_S_IFCHR') - _S_IFIFO = platform.ConstantInteger('_S_IFIFO') - FILE_TYPE_UNKNOWN = platform.ConstantInteger('FILE_TYPE_UNKNOWN') - FILE_TYPE_CHAR = platform.ConstantInteger('FILE_TYPE_CHAR') - FILE_TYPE_PIPE = platform.ConstantInteger('FILE_TYPE_PIPE') - - FILE_WRITE_ATTRIBUTES = platform.ConstantInteger( - 'FILE_WRITE_ATTRIBUTES') - OPEN_EXISTING = platform.ConstantInteger( - 'OPEN_EXISTING') - FILE_FLAG_BACKUP_SEMANTICS = platform.ConstantInteger( - 'FILE_FLAG_BACKUP_SEMANTICS') - VOLUME_NAME_DOS = platform.ConstantInteger('VOLUME_NAME_DOS') - VOLUME_NAME_NT = platform.ConstantInteger('VOLUME_NAME_NT') - - WIN32_FILE_ATTRIBUTE_DATA = platform.Struct( - 'WIN32_FILE_ATTRIBUTE_DATA', - [('dwFileAttributes', rwin32.DWORD), - ('nFileSizeHigh', rwin32.DWORD), - ('nFileSizeLow', rwin32.DWORD), - ('ftCreationTime', rwin32.FILETIME), - ('ftLastAccessTime', rwin32.FILETIME), - ('ftLastWriteTime', rwin32.FILETIME)]) - - BY_HANDLE_FILE_INFORMATION = platform.Struct( - 'BY_HANDLE_FILE_INFORMATION', - [('dwFileAttributes', rwin32.DWORD), - ('ftCreationTime', rwin32.FILETIME), - ('ftLastAccessTime', rwin32.FILETIME), - ('ftLastWriteTime', rwin32.FILETIME), - ('dwVolumeSerialNumber', rwin32.DWORD), - ('nFileSizeHigh', rwin32.DWORD), - ('nFileSizeLow', rwin32.DWORD), - ('nNumberOfLinks', rwin32.DWORD), - ('nFileIndexHigh', rwin32.DWORD), - ('nFileIndexLow', rwin32.DWORD)]) - - config = platform.configure(CConfig) - - def external(*args, **kwargs): - kwargs['compilation_info'] = CConfig._compilation_info_ - llfunc = rffi.llexternal(calling_conv='win', *args, **kwargs) - return staticmethod(llfunc) - - class Win32Traits: - apisuffix = suffix - - for name in '''WIN32_FIND_DATA WIN32_FILE_ATTRIBUTE_DATA BY_HANDLE_FILE_INFORMATION - GetFileExInfoStandard - FILE_ATTRIBUTE_DIRECTORY FILE_ATTRIBUTE_READONLY - INVALID_FILE_ATTRIBUTES - _S_IFDIR _S_IFREG _S_IFCHR _S_IFIFO - FILE_TYPE_UNKNOWN FILE_TYPE_CHAR FILE_TYPE_PIPE - FILE_WRITE_ATTRIBUTES OPEN_EXISTING FILE_FLAG_BACKUP_SEMANTICS - VOLUME_NAME_DOS VOLUME_NAME_NT - ERROR_FILE_NOT_FOUND ERROR_NO_MORE_FILES - ERROR_SHARING_VIOLATION - '''.split(): - locals()[name] = config[name] - LPWIN32_FIND_DATA = lltype.Ptr(WIN32_FIND_DATA) - GET_FILEEX_INFO_LEVELS = rffi.ULONG # an enumeration - - FindFirstFile = external('FindFirstFile' + suffix, - [traits.CCHARP, LPWIN32_FIND_DATA], - rwin32.HANDLE, - save_err=rffi.RFFI_SAVE_LASTERROR) - FindNextFile = external('FindNextFile' + suffix, - [rwin32.HANDLE, LPWIN32_FIND_DATA], - rwin32.BOOL, - save_err=rffi.RFFI_SAVE_LASTERROR) - FindClose = external('FindClose', - [rwin32.HANDLE], - rwin32.BOOL) - - GetFileAttributes = external( - 'GetFileAttributes' + suffix, - [traits.CCHARP], - rwin32.DWORD, - save_err=rffi.RFFI_SAVE_LASTERROR) - - SetFileAttributes = external( - 'SetFileAttributes' + suffix, - [traits.CCHARP, rwin32.DWORD], - rwin32.BOOL, - save_err=rffi.RFFI_SAVE_LASTERROR) - - GetFileAttributesEx = external( - 'GetFileAttributesEx' + suffix, - [traits.CCHARP, GET_FILEEX_INFO_LEVELS, - lltype.Ptr(WIN32_FILE_ATTRIBUTE_DATA)], - rwin32.BOOL, - save_err=rffi.RFFI_SAVE_LASTERROR) - - GetFileInformationByHandle = external( - 'GetFileInformationByHandle', - [rwin32.HANDLE, lltype.Ptr(BY_HANDLE_FILE_INFORMATION)], - rwin32.BOOL, - save_err=rffi.RFFI_SAVE_LASTERROR) - - GetFileType = external( - 'GetFileType', - [rwin32.HANDLE], - rwin32.DWORD, - save_err=rffi.RFFI_SAVE_LASTERROR) - - LPSTRP = rffi.CArrayPtr(traits.CCHARP) - - GetFullPathName = external( - 'GetFullPathName' + suffix, - [traits.CCHARP, rwin32.DWORD, - traits.CCHARP, LPSTRP], - rwin32.DWORD, - save_err=rffi.RFFI_SAVE_LASTERROR) - - GetCurrentDirectory = external( - 'GetCurrentDirectory' + suffix, - [rwin32.DWORD, traits.CCHARP], - rwin32.DWORD, - save_err=rffi.RFFI_SAVE_LASTERROR) - - SetCurrentDirectory = external( - 'SetCurrentDirectory' + suffix, - [traits.CCHARP], - rwin32.BOOL, - save_err=rffi.RFFI_SAVE_LASTERROR) - - CreateDirectory = external( - 'CreateDirectory' + suffix, - [traits.CCHARP, rffi.VOIDP], - rwin32.BOOL, - save_err=rffi.RFFI_SAVE_LASTERROR) - - SetEnvironmentVariable = external( - 'SetEnvironmentVariable' + suffix, - [traits.CCHARP, traits.CCHARP], - rwin32.BOOL, - save_err=rffi.RFFI_SAVE_LASTERROR) - - CreateFile = external( - 'CreateFile' + apisuffix, - [traits.CCHARP, rwin32.DWORD, rwin32.DWORD, - rwin32.LPSECURITY_ATTRIBUTES, rwin32.DWORD, rwin32.DWORD, - rwin32.HANDLE], - rwin32.HANDLE, - save_err=rffi.RFFI_SAVE_LASTERROR) - - DeleteFile = external( - 'DeleteFile' + suffix, - [traits.CCHARP], - rwin32.BOOL, - save_err=rffi.RFFI_SAVE_LASTERROR) - - MoveFile = external( - 'MoveFile' + suffix, - [traits.CCHARP, traits.CCHARP], - rwin32.BOOL, - save_err=rffi.RFFI_SAVE_LASTERROR) - - return Win32Traits - -#_______________________________________________________________ -# listdir - -def make_listdir_impl(traits): - from rpython.rlib import rwin32 - win32traits = make_win32_traits(traits) - - if traits.str is unicode: - def make_listdir_mask(path): - if path and path[-1] not in (u'/', u'\\', u':'): - path += u'/' - return path + u'*.*' - - def skip_listdir(name): - return name == u"." or name == u".." - else: - def make_listdir_mask(path): - if path and path[-1] not in ('/', '\\', ':'): - path += '/' - return path + '*.*' - - def skip_listdir(name): - return name == "." or name == ".." - - @func_renamer('listdir_llimpl_%s' % traits.str.__name__) - def listdir_llimpl(path): - mask = make_listdir_mask(path) - filedata = lltype.malloc(win32traits.WIN32_FIND_DATA, flavor='raw') - try: - result = [] - hFindFile = win32traits.FindFirstFile(mask, filedata) - if hFindFile == rwin32.INVALID_HANDLE_VALUE: - error = rwin32.GetLastError_saved() - if error == win32traits.ERROR_FILE_NOT_FOUND: - return result - else: - raise WindowsError(error, "FindFirstFile failed") - while True: - name = traits.charp2str(rffi.cast(traits.CCHARP, - filedata.c_cFileName)) - if not skip_listdir(name): - result.append(name) - if not win32traits.FindNextFile(hFindFile, filedata): - break - # FindNextFile sets error to ERROR_NO_MORE_FILES if - # it got to the end of the directory - error = rwin32.GetLastError_saved() - win32traits.FindClose(hFindFile) - if error == win32traits.ERROR_NO_MORE_FILES: - return result - else: - raise WindowsError(error, "FindNextFile failed") - finally: - lltype.free(filedata, flavor='raw') - - return listdir_llimpl - -#_______________________________________________________________ -# chdir - -def make_chdir_impl(traits): - from rpython.rlib import rwin32 - win32traits = make_win32_traits(traits) - - if traits.str is unicode: - def isUNC(path): - return path[0] == u'\\' or path[0] == u'/' - def magic_envvar(path): - return u'=' + path[0] + u':' - else: - def isUNC(path): - return path[0] == '\\' or path[0] == '/' - def magic_envvar(path): - return '=' + path[0] + ':' - - @func_renamer('chdir_llimpl_%s' % traits.str.__name__) - def chdir_llimpl(path): - """This is a reimplementation of the C library's chdir function, - but one that produces Win32 errors instead of DOS error codes. - chdir is essentially a wrapper around SetCurrentDirectory; however, - it also needs to set "magic" environment variables indicating - the per-drive current directory, which are of the form =: - """ - if not win32traits.SetCurrentDirectory(path): - raise rwin32.lastSavedWindowsError() - MAX_PATH = rwin32.MAX_PATH - assert MAX_PATH > 0 - - with traits.scoped_alloc_buffer(MAX_PATH) as path: - res = win32traits.GetCurrentDirectory(MAX_PATH + 1, path.raw) - if not res: - raise rwin32.lastSavedWindowsError() - res = rffi.cast(lltype.Signed, res) - assert res > 0 - if res <= MAX_PATH + 1: - new_path = path.str(res) - else: - with traits.scoped_alloc_buffer(res) as path: - res = win32traits.GetCurrentDirectory(res, path.raw) - if not res: - raise rwin32.lastSavedWindowsError() - res = rffi.cast(lltype.Signed, res) - assert res > 0 - new_path = path.str(res) - if isUNC(new_path): - return - if not win32traits.SetEnvironmentVariable(magic_envvar(new_path), new_path): - raise rwin32.lastSavedWindowsError() - - return chdir_llimpl - -#_______________________________________________________________ -# chmod - -def make_chmod_impl(traits): - from rpython.rlib import rwin32 - win32traits = make_win32_traits(traits) - - @func_renamer('chmod_llimpl_%s' % traits.str.__name__) - def chmod_llimpl(path, mode): - attr = win32traits.GetFileAttributes(path) - if attr == win32traits.INVALID_FILE_ATTRIBUTES: - raise rwin32.lastSavedWindowsError() - if mode & 0200: # _S_IWRITE - attr &= ~win32traits.FILE_ATTRIBUTE_READONLY - else: - attr |= win32traits.FILE_ATTRIBUTE_READONLY - if not win32traits.SetFileAttributes(path, attr): - raise rwin32.lastSavedWindowsError() - - return chmod_llimpl - -#_______________________________________________________________ -# getfullpathname - -def make_getfullpathname_impl(traits): - from rpython.rlib import rwin32 - win32traits = make_win32_traits(traits) - - @func_renamer('getfullpathname_llimpl_%s' % traits.str.__name__) - def getfullpathname_llimpl(path): - nBufferLength = rwin32.MAX_PATH + 1 - lpBuffer = lltype.malloc(traits.CCHARP.TO, nBufferLength, flavor='raw') - try: - res = win32traits.GetFullPathName( - path, rffi.cast(rwin32.DWORD, nBufferLength), - lpBuffer, lltype.nullptr(win32traits.LPSTRP.TO)) - if res == 0: - raise rwin32.lastSavedWindowsError("_getfullpathname failed") - result = traits.charp2str(lpBuffer) - return result - finally: - lltype.free(lpBuffer, flavor='raw') - - return getfullpathname_llimpl - -def make_utime_impl(traits): - from rpython.rlib import rwin32 - win32traits = make_win32_traits(traits) - from rpython.rtyper.module.ll_os_stat import time_t_to_FILE_TIME - - GetSystemTime = rffi.llexternal( - 'GetSystemTime', - [lltype.Ptr(rwin32.SYSTEMTIME)], - lltype.Void, - calling_conv='win', - save_err=rffi.RFFI_SAVE_LASTERROR) - - SystemTimeToFileTime = rffi.llexternal( - 'SystemTimeToFileTime', - [lltype.Ptr(rwin32.SYSTEMTIME), - lltype.Ptr(rwin32.FILETIME)], - rwin32.BOOL, - calling_conv='win') - - SetFileTime = rffi.llexternal( - 'SetFileTime', - [rwin32.HANDLE, - lltype.Ptr(rwin32.FILETIME), - lltype.Ptr(rwin32.FILETIME), - lltype.Ptr(rwin32.FILETIME)], - rwin32.BOOL, - calling_conv = 'win', - save_err=rffi.RFFI_SAVE_LASTERROR) - - @specialize.argtype(1) - def os_utime_llimpl(path, tp): - hFile = win32traits.CreateFile(path, - win32traits.FILE_WRITE_ATTRIBUTES, 0, - None, win32traits.OPEN_EXISTING, - win32traits.FILE_FLAG_BACKUP_SEMANTICS, - rwin32.NULL_HANDLE) - if hFile == rwin32.INVALID_HANDLE_VALUE: - raise rwin32.lastSavedWindowsError() - ctime = lltype.nullptr(rwin32.FILETIME) - atime = lltype.malloc(rwin32.FILETIME, flavor='raw') - mtime = lltype.malloc(rwin32.FILETIME, flavor='raw') - try: - if tp is None: - now = lltype.malloc(rwin32.SYSTEMTIME, flavor='raw') - try: - GetSystemTime(now) - if (not SystemTimeToFileTime(now, atime) or - not SystemTimeToFileTime(now, mtime)): - raise rwin32.lastSavedWindowsError() - finally: - lltype.free(now, flavor='raw') - else: - actime, modtime = tp - time_t_to_FILE_TIME(actime, atime) - time_t_to_FILE_TIME(modtime, mtime) - if not SetFileTime(hFile, ctime, atime, mtime): - raise rwin32.lastSavedWindowsError() - finally: - rwin32.CloseHandle(hFile) - lltype.free(atime, flavor='raw') - lltype.free(mtime, flavor='raw') - - return os_utime_llimpl diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/r_os_stat.py pypy-5.0.1+dfsg/rpython/rtyper/module/r_os_stat.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/r_os_stat.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/r_os_stat.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,122 +0,0 @@ -""" -RTyping support for os.stat_result objects. -They are rtyped just like a tuple of the correct length supporting -only indexing and the st_xxx attributes. We need a custom StatResultRepr -because when rtyping for LL backends we have extra platform-dependent -items at the end of the tuple, but for OO backends we only want the -portable items. This allows the OO backends to assume a fixed shape for -the tuples returned by os.stat(). -""" -from rpython.annotator import model as annmodel -from rpython.rtyper.llannotation import lltype_to_annotation -from rpython.flowspace.model import Constant -from rpython.flowspace.operation import op -from rpython.tool.pairtype import pairtype -from rpython.rtyper.rmodel import Repr -from rpython.rtyper.rint import IntegerRepr -from rpython.rtyper.error import TyperError -from rpython.rtyper.module import ll_os_stat - - -class StatResultRepr(Repr): - - def __init__(self, rtyper): - self.rtyper = rtyper - self.stat_fields = ll_os_stat.STAT_FIELDS - - self.stat_field_indexes = {} - for i, (name, TYPE) in enumerate(self.stat_fields): - self.stat_field_indexes[name] = i - - self.s_tuple = annmodel.SomeTuple([lltype_to_annotation(TYPE) - for name, TYPE in self.stat_fields]) - self.r_tuple = rtyper.getrepr(self.s_tuple) - self.lowleveltype = self.r_tuple.lowleveltype - - def redispatch_getfield(self, hop, index): - rtyper = self.rtyper - s_index = rtyper.annotator.bookkeeper.immutablevalue(index) - hop2 = hop.copy() - spaceop = op.getitem(hop.args_v[0], Constant(index)) - spaceop.result = hop.spaceop.result - hop2.spaceop = spaceop - hop2.args_v = spaceop.args - hop2.args_s = [self.s_tuple, s_index] - hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] - return hop2.dispatch() - - def rtype_getattr(self, hop): - s_attr = hop.args_s[1] - attr = s_attr.const - try: - index = self.stat_field_indexes[attr] - except KeyError: - raise TyperError("os.stat().%s: field not available" % (attr,)) - return self.redispatch_getfield(hop, index) - - -class __extend__(pairtype(StatResultRepr, IntegerRepr)): - - def rtype_getitem((r_sta, r_int), hop): - s_int = hop.args_s[1] - index = s_int.const - return r_sta.redispatch_getfield(hop, index) - - -def specialize_make_stat_result(hop): - r_StatResult = hop.rtyper.getrepr(ll_os_stat.s_StatResult) - [v_result] = hop.inputargs(r_StatResult.r_tuple) - # no-op conversion from r_StatResult.r_tuple to r_StatResult - hop.exception_cannot_occur() - return v_result - - -class StatvfsResultRepr(Repr): - - def __init__(self, rtyper): - self.rtyper = rtyper - self.statvfs_fields = ll_os_stat.STATVFS_FIELDS - - self.statvfs_field_indexes = {} - for i, (name, TYPE) in enumerate(self.statvfs_fields): - self.statvfs_field_indexes[name] = i - - self.s_tuple = annmodel.SomeTuple([lltype_to_annotation(TYPE) - for name, TYPE in self.statvfs_fields]) - self.r_tuple = rtyper.getrepr(self.s_tuple) - self.lowleveltype = self.r_tuple.lowleveltype - - def redispatch_getfield(self, hop, index): - rtyper = self.rtyper - s_index = rtyper.annotator.bookkeeper.immutablevalue(index) - hop2 = hop.copy() - spaceop = op.getitem(hop.args_v[0], Constant(index)) - spaceop.result = hop.spaceop.result - hop2.spaceop = spaceop - hop2.args_v = spaceop.args - hop2.args_s = [self.s_tuple, s_index] - hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] - return hop2.dispatch() - - def rtype_getattr(self, hop): - s_attr = hop.args_s[1] - attr = s_attr.const - try: - index = self.statvfs_field_indexes[attr] - except KeyError: - raise TyperError("os.statvfs().%s: field not available" % (attr,)) - return self.redispatch_getfield(hop, index) - - -class __extend__(pairtype(StatvfsResultRepr, IntegerRepr)): - def rtype_getitem((r_sta, r_int), hop): - s_int = hop.args_s[1] - index = s_int.const - return r_sta.redispatch_getfield(hop, index) - - -def specialize_make_statvfs_result(hop): - r_StatvfsResult = hop.rtyper.getrepr(ll_os_stat.s_StatvfsResult) - [v_result] = hop.inputargs(r_StatvfsResult.r_tuple) - hop.exception_cannot_occur() - return v_result diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/support.py pypy-5.0.1+dfsg/rpython/rtyper/module/support.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/support.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/support.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,100 +0,0 @@ -import sys - -from rpython.annotator import model as annmodel -from rpython.rtyper.lltypesystem import lltype, rffi - -_WIN32 = sys.platform.startswith('win') -UNDERSCORE_ON_WIN32 = '_' if _WIN32 else '' - -# utility conversion functions -class LLSupport: - _mixin_ = True - - def to_rstr(s): - from rpython.rtyper.lltypesystem.rstr import STR, mallocstr - if s is None: - return lltype.nullptr(STR) - p = mallocstr(len(s)) - for i in range(len(s)): - p.chars[i] = s[i] - return p - to_rstr = staticmethod(to_rstr) - - def to_runicode(s): - from rpython.rtyper.lltypesystem.rstr import UNICODE, mallocunicode - if s is None: - return lltype.nullptr(UNICODE) - p = mallocunicode(len(s)) - for i in range(len(s)): - p.chars[i] = s[i] - return p - to_runicode = staticmethod(to_runicode) - - def from_rstr(rs): - if not rs: # null pointer - return None - else: - return ''.join([rs.chars[i] for i in range(len(rs.chars))]) - from_rstr = staticmethod(from_rstr) - - def from_rstr_nonnull(rs): - assert rs - return ''.join([rs.chars[i] for i in range(len(rs.chars))]) - from_rstr_nonnull = staticmethod(from_rstr_nonnull) - - -class StringTraits: - str = str - str0 = annmodel.s_Str0 - CHAR = rffi.CHAR - CCHARP = rffi.CCHARP - charp2str = staticmethod(rffi.charp2str) - charpsize2str = staticmethod(rffi.charpsize2str) - scoped_str2charp = staticmethod(rffi.scoped_str2charp) - str2charp = staticmethod(rffi.str2charp) - free_charp = staticmethod(rffi.free_charp) - scoped_alloc_buffer = staticmethod(rffi.scoped_alloc_buffer) - - @staticmethod - def posix_function_name(name): - return UNDERSCORE_ON_WIN32 + name - - @staticmethod - def ll_os_name(name): - return 'll_os.ll_os_' + name - -class UnicodeTraits: - str = unicode - str0 = annmodel.s_Unicode0 - CHAR = rffi.WCHAR_T - CCHARP = rffi.CWCHARP - charp2str = staticmethod(rffi.wcharp2unicode) - charpsize2str = staticmethod(rffi.wcharpsize2unicode) - str2charp = staticmethod(rffi.unicode2wcharp) - scoped_str2charp = staticmethod(rffi.scoped_unicode2wcharp) - free_charp = staticmethod(rffi.free_wcharp) - scoped_alloc_buffer = staticmethod(rffi.scoped_alloc_unicodebuffer) - - @staticmethod - def posix_function_name(name): - return UNDERSCORE_ON_WIN32 + 'w' + name - - @staticmethod - def ll_os_name(name): - return 'll_os.ll_os_w' + name - - -def ll_strcpy(dst_s, src_s, n): - dstchars = dst_s.chars - srcchars = src_s.chars - i = 0 - while i < n: - dstchars[i] = srcchars[i] - i += 1 - -def _ll_strfill(dst_s, srcchars, n): - dstchars = dst_s.chars - i = 0 - while i < n: - dstchars[i] = srcchars[i] - i += 1 diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/test/__init__.py pypy-5.0.1+dfsg/rpython/rtyper/module/test/__init__.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/test/__init__.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/test/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -# diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/test/test_ll_os_environ.py pypy-5.0.1+dfsg/rpython/rtyper/module/test/test_ll_os_environ.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/test/test_ll_os_environ.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/test/test_ll_os_environ.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -from rpython.translator.c.test.test_genc import compile -import os - -def test_environ_items(): - def foo(x): - if x: - return len(os.environ.items()) - else: - return 0 - - f = compile(foo, [int], backendopt=False) - assert f(1) > 0 - -def test_unset_error(): - import sys - def foo(x): - if x: - os.environ['TEST'] = 'STRING' - assert os.environ['TEST'] == 'STRING' - del os.environ['TEST'] - try: - del os.environ['key='] - except (KeyError, OSError): - return 1 - return 2 - else: - return 0 - - f = compile(foo, [int], backendopt=False) - if sys.platform.startswith('win'): - # Do not open error dialog box - import ctypes - SEM_NOGPFAULTERRORBOX = 0x0002 # From MSDN - old_err_mode = ctypes.windll.kernel32.GetErrorMode() - new_err_mode = old_err_mode | SEM_NOGPFAULTERRORBOX - ctypes.windll.kernel32.SetErrorMode(new_err_mode) - assert f(1) == 1 - if sys.platform.startswith('win'): - ctypes.windll.kernel32.SetErrorMode(old_err_mode) diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/test/test_ll_os_path.py pypy-5.0.1+dfsg/rpython/rtyper/module/test/test_ll_os_path.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/test/test_ll_os_path.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/test/test_ll_os_path.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -import py - -import sys, os - -from rpython.rtyper.lltypesystem.module.ll_os_path import Implementation as impl -from rpython.rtyper.test.test_llinterp import interpret -from rpython.tool.udir import udir - - -def test_exists(): - filename = impl.to_rstr(str(py.path.local(__file__))) - assert impl.ll_os_path_exists(filename) == True - assert not impl.ll_os_path_exists(impl.to_rstr( - "strange_filename_that_looks_improbable.sde")) - -def test_posixpath(): - import posixpath - def f(): - assert posixpath.join("/foo", "bar") == "/foo/bar" - assert posixpath.join("/foo", "spam/egg") == "/foo/spam/egg" - assert posixpath.join("/foo", "/bar") == "/bar" - interpret(f, []) - -def test_ntpath(): - import ntpath - def f(): - assert ntpath.join("\\foo", "bar") == "\\foo\\bar" - assert ntpath.join("c:\\foo", "spam\\egg") == "c:\\foo\\spam\\egg" - assert ntpath.join("c:\\foo", "d:\\bar") == "d:\\bar" - interpret(f, []) - -def test_isdir(): - if sys.platform != 'win32': - py.test.skip("XXX cannot run os.stat() on the llinterp yet") - - s = str(udir.join('test_isdir')) - def f(): - return os.path.isdir(s) - res = interpret(f, []) - assert res == os.path.isdir(s) - os.mkdir(s) - res = interpret(f, []) - assert res is True - - # On Windows, the libc stat() is flawed: - # stat('c:/temp') works - # but stat('c:/temp/') does not find the directory... - # This test passes with our own stat() implementation. - s += os.path.sep - def f(): - return os.path.isdir(s) - res = interpret(f, []) - assert res is True diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/test/test_ll_os.py pypy-5.0.1+dfsg/rpython/rtyper/module/test/test_ll_os.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/test/test_ll_os.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/test/test_ll_os.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,324 +0,0 @@ -import os - -from rpython.tool.udir import udir -from rpython.translator.c.test.test_genc import compile -from rpython.rtyper.module import ll_os -#has side effect of registering functions -from rpython.tool.pytest.expecttest import ExpectTest - -from rpython.rtyper import extregistry -import errno -import sys -import py - -def getllimpl(fn): - return extregistry.lookup(fn).lltypeimpl - -def test_access(): - filename = str(udir.join('test_access.txt')) - fd = file(filename, 'w') - fd.close() - - for mode in os.R_OK, os.W_OK, os.X_OK, os.R_OK | os.W_OK | os.X_OK: - result = getllimpl(os.access)(filename, mode) - assert result == os.access(filename, mode) - - -def test_times(): - """ - posix.times should compile as an RPython function and should return a - five-tuple giving float-representations (seconds, effectively) of the four - fields from the underlying struct tms and the return value. - """ - times = eval(compile(lambda: str(os.times()), ())()) - assert isinstance(times, tuple) - assert len(times) == 5 - for value in times: - assert isinstance(value, float) - -def test_getlogin(): - if not hasattr(os, 'getlogin'): - py.test.skip('posix specific function') - try: - expected = os.getlogin() - except OSError, e: - py.test.skip("the underlying os.getlogin() failed: %s" % e) - data = getllimpl(os.getlogin)() - assert data == expected - -def test_statvfs(): - if not hasattr(os, 'statvfs'): - py.test.skip('posix specific function') - try: - os.statvfs('.') - except OSError, e: - py.test.skip("the underlying os.statvfs() failed: %s" % e) - getllimpl(os.statvfs)('.') - -def test_fstatvfs(): - if not hasattr(os, 'fstatvfs'): - py.test.skip('posix specific function') - try: - os.fstatvfs(0) - except OSError, e: - py.test.skip("the underlying os.fstatvfs() failed: %s" % e) - getllimpl(os.fstatvfs)(0) - -def test_utimes(): - if os.name != 'nt': - py.test.skip('Windows specific feature') - # Windows support centiseconds - def f(fname, t1): - os.utime(fname, (t1, t1)) - - fname = udir.join('test_utimes.txt') - fname.ensure() - t1 = 1159195039.25 - compile(f, (str, float))(str(fname), t1) - assert t1 == os.stat(str(fname)).st_mtime - if sys.version_info < (2, 7): - py.test.skip('requires Python 2.7') - t1 = 5000000000.0 - compile(f, (str, float))(str(fname), t1) - assert t1 == os.stat(str(fname)).st_mtime - -def test__getfullpathname(): - if os.name != 'nt': - py.test.skip('nt specific function') - posix = __import__(os.name) - sysdrv = os.getenv('SystemDrive', 'C:') - stuff = sysdrv + 'stuff' - data = getllimpl(posix._getfullpathname)(stuff) - assert data == posix._getfullpathname(stuff) - # the most intriguing failure of ntpath.py should not repeat, here: - assert not data.endswith(stuff) - -def test_getcwd(): - data = getllimpl(os.getcwd)() - assert data == os.getcwd() - -def test_chdir(): - def check_special_envvar(): - if sys.platform != 'win32': - return - pwd = os.getcwd() - import ctypes - buf = ctypes.create_string_buffer(1000) - len = ctypes.windll.kernel32.GetEnvironmentVariableA('=%c:' % pwd[0], buf, 1000) - if (len == 0) and "WINGDB_PYTHON" in os.environ: - # the ctypes call seems not to work in the Wing debugger - return - assert str(buf.value).lower() == pwd.lower() - # ctypes returns the drive letter in uppercase, - # os.getcwd does not, - # but there may be uppercase in os.getcwd path - - pwd = os.getcwd() - try: - check_special_envvar() - getllimpl(os.chdir)('..') - assert os.getcwd() == os.path.dirname(pwd) - check_special_envvar() - finally: - os.chdir(pwd) - -def test_mkdir(): - filename = str(udir.join('test_mkdir.dir')) - getllimpl(os.mkdir)(filename, 0) - exc = py.test.raises(OSError, getllimpl(os.mkdir), filename, 0) - assert exc.value.errno == errno.EEXIST - if sys.platform == 'win32': - assert exc.type is WindowsError - -def test_strerror(): - data = getllimpl(os.strerror)(2) - assert data == os.strerror(2) - -def test_system(): - filename = str(udir.join('test_system.txt')) - arg = '%s -c "print 1+1" > %s' % (sys.executable, filename) - data = getllimpl(os.system)(arg) - assert data == 0 - assert file(filename).read().strip() == '2' - os.unlink(filename) - - -EXECVE_ENV = {"foo": "bar", "baz": "quux"} - -def test_execve(): - if os.name != 'posix': - py.test.skip('posix specific function') - - ll_execve = getllimpl(os.execve) - - def run_execve(program, args=None, env=None, do_path_lookup=False): - if args is None: - args = [program] - else: - args = [program] + args - if env is None: - env = {} - # we cannot directly call ll_execve() because it replaces the - # current process. - fd_read, fd_write = os.pipe() - childpid = os.fork() - if childpid == 0: - # in the child - os.close(fd_read) - os.dup2(fd_write, 1) # stdout - os.close(fd_write) - if do_path_lookup: - os.execvp(program, args) - else: - ll_execve(program, args, env) - assert 0, "should not arrive here" - else: - # in the parent - os.close(fd_write) - child_stdout = [] - while True: - data = os.read(fd_read, 4096) - if not data: break # closed - child_stdout.append(data) - pid, status = os.waitpid(childpid, 0) - os.close(fd_read) - return status, ''.join(child_stdout) - - # Test exit status and code - result, child_stdout = run_execve("/usr/bin/which", ["true"], do_path_lookup=True) - result, child_stdout = run_execve(child_stdout.strip()) # /bin/true or /usr/bin/true - assert os.WIFEXITED(result) - assert os.WEXITSTATUS(result) == 0 - result, child_stdout = run_execve("/usr/bin/which", ["false"], do_path_lookup=True) - result, child_stdout = run_execve(child_stdout.strip()) # /bin/false or /usr/bin/false - assert os.WIFEXITED(result) - assert os.WEXITSTATUS(result) == 1 - - # Test environment - result, child_stdout = run_execve("/usr/bin/env", env=EXECVE_ENV) - assert os.WIFEXITED(result) - assert os.WEXITSTATUS(result) == 0 - assert dict([line.split('=') for line in child_stdout.splitlines()]) == EXECVE_ENV - - # The following won't actually execute anything, so they don't need - # a child process helper. - - # If the target does not exist, an OSError should result - info = py.test.raises( - OSError, ll_execve, "this/file/is/non/existent", [], {}) - assert info.value.errno == errno.ENOENT - - # If the target is not executable, an OSError should result - info = py.test.raises( - OSError, ll_execve, "/etc/passwd", [], {}) - assert info.value.errno == errno.EACCES - -def test_os_write(): - #Same as test in rpython/test/test_rbuiltin - fname = str(udir.join('os_test.txt')) - fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0777) - assert fd >= 0 - f = getllimpl(os.write) - f(fd, 'Hello world') - os.close(fd) - with open(fname) as fid: - assert fid.read() == "Hello world" - fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0777) - os.close(fd) - py.test.raises(OSError, f, fd, 'Hello world') - -def test_os_close(): - fname = str(udir.join('os_test.txt')) - fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0777) - assert fd >= 0 - os.write(fd, 'Hello world') - f = getllimpl(os.close) - f(fd) - py.test.raises(OSError, f, fd) - -def test_os_lseek(): - fname = str(udir.join('os_test.txt')) - fd = os.open(fname, os.O_RDWR|os.O_CREAT, 0777) - assert fd >= 0 - os.write(fd, 'Hello world') - f = getllimpl(os.lseek) - f(fd,0,0) - assert os.read(fd, 11) == 'Hello world' - os.close(fd) - py.test.raises(OSError, f, fd, 0, 0) - -def test_os_fsync(): - fname = str(udir.join('os_test.txt')) - fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0777) - assert fd >= 0 - os.write(fd, 'Hello world') - f = getllimpl(os.fsync) - f(fd) - os.close(fd) - fid = open(fname) - assert fid.read() == 'Hello world' - fid.close() - py.test.raises(OSError, f, fd) - -def test_os_fdatasync(): - try: - f = getllimpl(os.fdatasync) - except: - py.test.skip('No fdatasync in os') - fname = str(udir.join('os_test.txt')) - fd = os.open(fname, os.O_WRONLY|os.O_CREAT, 0777) - assert fd >= 0 - os.write(fd, 'Hello world') - f(fd) - fid = open(fname) - assert fid.read() == 'Hello world' - os.close(fd) - py.test.raises(OSError, f, fd) - - -def test_os_kill(): - if not hasattr(os,'kill') or sys.platform == 'win32': - py.test.skip('No kill in os') - f = getllimpl(os.kill) - import subprocess - import signal - proc = subprocess.Popen([sys.executable, "-c", - "import time;" - "time.sleep(10)", - ], - ) - f(proc.pid, signal.SIGTERM) - expected = -signal.SIGTERM - assert proc.wait() == expected - -def test_isatty(): - try: - f = getllimpl(os.isatty) - except: - py.test.skip('No isatty in os') - assert f(-1) == False - - -class TestOsExpect(ExpectTest): - def setup_class(cls): - if not hasattr(os, 'ttyname'): - py.test.skip("no ttyname") - - def test_ttyname(self): - def f(): - import os - from rpython.rtyper.test.test_llinterp import interpret - - def ll_to_string(s): - return ''.join(s.chars) - - def f(num): - try: - return os.ttyname(num) - except OSError: - return '' - - assert ll_to_string(interpret(f, [0])) == f(0) - assert ll_to_string(interpret(f, [338])) == '' - - self.run_test(f) diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/test/test_ll_os_stat.py pypy-5.0.1+dfsg/rpython/rtyper/module/test/test_ll_os_stat.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/test/test_ll_os_stat.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/test/test_ll_os_stat.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,47 +0,0 @@ -from rpython.rtyper.module import ll_os_stat, ll_os -from rpython.tool.udir import udir -import sys, os -import py - - -class TestLinuxImplementation: - def setup_class(cls): - if not sys.platform.startswith('linux'): - py.test.skip("linux specific tests") - - def test_has_all_fields(self): - assert ll_os_stat.STAT_FIELDS == ll_os_stat.ALL_STAT_FIELDS[:13] - - -class TestWin32Implementation: - def setup_class(cls): - if sys.platform != 'win32': - py.test.skip("win32 specific tests") - - def test_stat(self): - stat = ll_os_stat.make_win32_stat_impl('stat', ll_os.StringTraits()) - wstat = ll_os_stat.make_win32_stat_impl('stat', ll_os.UnicodeTraits()) - def check(f): - # msec resolution, +- rounding error - expected = int(os.stat(f).st_mtime*1000) - assert abs(int(stat(f).st_mtime*1000) - expected) < 2 - assert abs(int(wstat(unicode(f)).st_mtime*1000) - expected) < 2 - - check('c:/') - check(os.environ['TEMP']) - check(sys.executable) - - def test_fstat(self): - fstat = ll_os_stat.make_win32_stat_impl('fstat', ll_os.StringTraits()) - stat = fstat(0) # stdout - assert stat.st_mode != 0 - - def test_stat_large_number(self): - if sys.version_info < (2, 7): - py.test.skip('requires Python 2.7') - fname = udir.join('test_stat_large_number.txt') - fname.ensure() - t1 = 5000000000.0 - os.utime(str(fname), (t1, t1)) - stat = ll_os_stat.make_win32_stat_impl('stat', ll_os.StringTraits()) - assert stat(str(fname)).st_mtime == t1 diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/test/test_ll_strtod.py pypy-5.0.1+dfsg/rpython/rtyper/module/test/test_ll_strtod.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/test/test_ll_strtod.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/test/test_ll_strtod.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -import py - -from rpython.rtyper.test.tool import BaseRtypingTest -from rpython.rlib import rfloat - -class TestStrtod(BaseRtypingTest): - def test_formatd(self): - for flags in [0, - rfloat.DTSF_ADD_DOT_0]: - def f(y): - return rfloat.formatd(y, 'g', 2, flags) - - assert self.ll_to_string(self.interpret(f, [3.0])) == f(3.0) diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/test/test_ll_time.py pypy-5.0.1+dfsg/rpython/rtyper/module/test/test_ll_time.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/test/test_ll_time.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/test/test_ll_time.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,56 +0,0 @@ - -from rpython.rtyper.test.tool import BaseRtypingTest -#from rpython.translator.c.test.test_genc import compile - -import time, sys - -class TestTime(BaseRtypingTest): - def test_time_time(self): - def fn(): - return time.time() - - t0 = time.time() - res0 = self.interpret(fn, []) - t1 = time.time() - res1 = self.interpret(fn, []) - assert t0 <= res0 <= t1 <= res1 - - def test_time_clock(self): - def sleep(t): - # a version of time.sleep() that consumes actual CPU time - start = time.clock() - while abs(time.clock() - start) <= t: - pass - def f(): - return time.clock() - t0 = time.clock() - sleep(0.011) - t1 = self.interpret(f, []) - sleep(0.011) - t2 = time.clock() - sleep(0.011) - t3 = self.interpret(f, []) - sleep(0.011) - t4 = time.clock() - sleep(0.011) - t5 = self.interpret(f, []) - sleep(0.011) - t6 = time.clock() - # time.clock() and t1() might have a different notion of zero, so - # we can only subtract two numbers returned by the same function. - # Moreover they might have different precisions, but it should - # be at least 0.01 seconds, hence the "sleeps". - assert 0.0099 <= t2-t0 <= 9.0 - assert 0.0099 <= t3-t1 <= t4-t0 <= 9.0 - assert 0.0099 <= t4-t2 <= t5-t1 <= t6-t0 <= 9.0 - assert 0.0099 <= t5-t3 <= t6-t2 <= 9.0 - assert 0.0099 <= t6-t4 <= 9.0 - - def test_time_sleep(self): - def does_nothing(): - time.sleep(0.19) - t0 = time.time() - self.interpret(does_nothing, []) - t1 = time.time() - assert t0 <= t1 - assert t1 - t0 >= 0.15 diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/module/test/test_posix.py pypy-5.0.1+dfsg/rpython/rtyper/module/test/test_posix.py --- pypy-4.0.1+dfsg/rpython/rtyper/module/test/test_posix.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/module/test/test_posix.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,304 +0,0 @@ -import py -from rpython.rtyper.test.tool import BaseRtypingTest -from rpython.rtyper.annlowlevel import hlstr -from rpython.tool.udir import udir -from rpython.rlib.rarithmetic import is_valid_int - -import os -exec 'import %s as posix' % os.name - -def setup_module(module): - testf = udir.join('test.txt') - module.path = testf.strpath - -class TestPosix(BaseRtypingTest): - - def setup_method(self, meth): - # prepare/restore the file before each test - testfile = open(path, 'wb') - testfile.write('This is a test') - testfile.close() - - def test_open(self): - def f(): - ff = posix.open(path, posix.O_RDONLY, 0777) - return ff - func = self.interpret(f, []) - assert is_valid_int(func) - - def test_fstat(self): - def fo(fi): - g = posix.fstat(fi) - return g - fi = os.open(path,os.O_RDONLY,0777) - func = self.interpret(fo,[fi]) - stat = os.fstat(fi) - for i in range(len(stat)): - assert long(getattr(func, 'item%d' % i)) == stat[i] - - - def test_stat(self): - def fo(): - g = posix.stat(path) - return g - func = self.interpret(fo,[]) - stat = os.stat(path) - for i in range(len(stat)): - assert long(getattr(func, 'item%d' % i)) == stat[i] - - def test_stat_exception(self): - def fo(): - try: - posix.stat('I/do/not/exist') - except OSError: - return True - else: - return False - res = self.interpret(fo,[]) - assert res - - def test_times(self): - import py; py.test.skip("llinterp does not like tuple returns") - from rpython.rtyper.test.test_llinterp import interpret - times = interpret(lambda: posix.times(), ()) - assert isinstance(times, tuple) - assert len(times) == 5 - for value in times: - assert is_valid_int(value) - - - def test_lseek(self): - def f(fi, pos): - posix.lseek(fi, pos, 0) - fi = os.open(path, os.O_RDONLY, 0777) - func = self.interpret(f, [fi, 5]) - res = os.read(fi, 2) - assert res =='is' - - def test_isatty(self): - def f(fi): - posix.isatty(fi) - fi = os.open(path, os.O_RDONLY, 0777) - func = self.interpret(f, [fi]) - assert not func - os.close(fi) - func = self.interpret(f, [fi]) - assert not func - - def test_getcwd(self): - def f(): - return posix.getcwd() - res = self.interpret(f,[]) - cwd = os.getcwd() - #print res.chars,cwd - assert self.ll_to_string(res) == cwd - - def test_write(self): - def f(fi): - if fi > 0: - text = 'This is a test' - else: - text = '333' - return posix.write(fi,text) - fi = os.open(path,os.O_WRONLY,0777) - text = 'This is a test' - func = self.interpret(f,[fi]) - os.close(fi) - fi = os.open(path,os.O_RDONLY,0777) - res = os.read(fi,20) - assert res == text - - def test_read(self): - def f(fi,len): - return posix.read(fi,len) - fi = os.open(path,os.O_WRONLY,0777) - text = 'This is a test' - os.write(fi,text) - os.close(fi) - fi = os.open(path,os.O_RDONLY,0777) - res = self.interpret(f,[fi,20]) - assert self.ll_to_string(res) == text - - if hasattr(os, 'chown'): - def test_chown(self): - f = open(path, "w") - f.write("xyz") - f.close() - def f(): - try: - posix.chown(path, os.getuid(), os.getgid()) - return 1 - except OSError: - return 2 - - assert self.interpret(f, []) == 1 - os.unlink(path) - assert self.interpret(f, []) == 2 - - def test_close(self): - def f(fi): - return posix.close(fi) - fi = os.open(path,os.O_WRONLY,0777) - text = 'This is a test' - os.write(fi,text) - res = self.interpret(f,[fi]) - py.test.raises( OSError, os.fstat, fi) - - if hasattr(os, 'ftruncate'): - def test_ftruncate(self): - def f(fi,len): - os.ftruncate(fi,len) - fi = os.open(path,os.O_RDWR,0777) - func = self.interpret(f,[fi,6]) - assert os.fstat(fi).st_size == 6 - - if hasattr(os, 'getuid'): - def test_getuid(self): - def f(): - return os.getuid() - assert self.interpret(f, []) == f() - - if hasattr(os, 'getgid'): - def test_getgid(self): - def f(): - return os.getgid() - assert self.interpret(f, []) == f() - - if hasattr(os, 'setuid'): - def test_os_setuid(self): - def f(): - os.setuid(os.getuid()) - return os.getuid() - assert self.interpret(f, []) == f() - - if hasattr(os, 'sysconf'): - def test_os_sysconf(self): - def f(i): - return os.sysconf(i) - assert self.interpret(f, [13]) == f(13) - - if hasattr(os, 'confstr'): - def test_os_confstr(self): - def f(i): - try: - return os.confstr(i) - except OSError: - return "oooops!!" - some_value = os.confstr_names.values()[-1] - res = self.interpret(f, [some_value]) - assert hlstr(res) == f(some_value) - res = self.interpret(f, [94781413]) - assert hlstr(res) == "oooops!!" - - if hasattr(os, 'pathconf'): - def test_os_pathconf(self): - def f(i): - return os.pathconf("/tmp", i) - i = os.pathconf_names["PC_NAME_MAX"] - some_value = self.interpret(f, [i]) - assert some_value >= 31 - - if hasattr(os, 'chroot'): - def test_os_chroot(self): - def f(): - try: - os.chroot('!@$#!#%$#^#@!#!$$#^') - except OSError: - return 1 - return 0 - - assert self.interpret(f, []) == 1 - - def test_os_wstar(self): - from rpython.rtyper.module.ll_os import RegisterOs - for name in RegisterOs.w_star: - if not hasattr(os, name): - continue - def fun(s): - return getattr(os, name)(s) - - for value in [0, 1, 127, 128, 255]: - res = self.interpret(fun, [value]) - assert res == fun(value) - - if hasattr(os, 'getgroups'): - def test_getgroups(self): - def f(): - return os.getgroups() - ll_a = self.interpret(f, []) - assert self.ll_to_list(ll_a) == f() - - if hasattr(os, 'setgroups'): - def test_setgroups(self): - def f(): - try: - os.setgroups(os.getgroups()) - except OSError: - pass - self.interpret(f, []) - - if hasattr(os, 'initgroups'): - def test_initgroups(self): - def f(): - try: - os.initgroups('sUJJeumz', 4321) - except OSError: - return 1 - return 0 - res = self.interpret(f, []) - assert res == 1 - - if hasattr(os, 'tcgetpgrp'): - def test_tcgetpgrp(self): - def f(fd): - try: - return os.tcgetpgrp(fd) - except OSError: - return 42 - res = self.interpret(f, [9999]) - assert res == 42 - - if hasattr(os, 'tcsetpgrp'): - def test_tcsetpgrp(self): - def f(fd, pgrp): - try: - os.tcsetpgrp(fd, pgrp) - except OSError: - return 1 - return 0 - res = self.interpret(f, [9999, 1]) - assert res == 1 - - if hasattr(os, 'getresuid'): - def test_getresuid(self): - def f(): - a, b, c = os.getresuid() - return a + b * 37 + c * 1291 - res = self.interpret(f, []) - a, b, c = os.getresuid() - assert res == a + b * 37 + c * 1291 - - if hasattr(os, 'getresgid'): - def test_getresgid(self): - def f(): - a, b, c = os.getresgid() - return a + b * 37 + c * 1291 - res = self.interpret(f, []) - a, b, c = os.getresgid() - assert res == a + b * 37 + c * 1291 - - if hasattr(os, 'setresuid'): - def test_setresuid(self): - def f(): - a, b, c = os.getresuid() - a = (a + 1) - 1 - os.setresuid(a, b, c) - self.interpret(f, []) - - if hasattr(os, 'setresgid'): - def test_setresgid(self): - def f(): - a, b, c = os.getresgid() - a = (a + 1) - 1 - os.setresgid(a, b, c) - self.interpret(f, []) diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/rbuiltin.py pypy-5.0.1+dfsg/rpython/rtyper/rbuiltin.py --- pypy-4.0.1+dfsg/rpython/rtyper/rbuiltin.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/rbuiltin.py 2016-03-19 16:40:12.000000000 +0000 @@ -676,24 +676,6 @@ resulttype=llmemory.Address) -@typer_for(isinstance) -def rtype_builtin_isinstance(hop): - hop.exception_cannot_occur() - if hop.s_result.is_constant(): - return hop.inputconst(lltype.Bool, hop.s_result.const) - - if hop.args_s[1].is_constant() and hop.args_s[1].const in (str, list, unicode): - if hop.args_s[0].knowntype not in (str, list, unicode): - raise TyperError("isinstance(x, str/list/unicode) expects x to be known" - " statically to be a str/list/unicode or None") - rstrlist = hop.args_r[0] - vstrlist = hop.inputarg(rstrlist, arg=0) - cnone = hop.inputconst(rstrlist, None) - return hop.genop('ptr_ne', [vstrlist, cnone], resulttype=lltype.Bool) - - assert isinstance(hop.args_r[0], rclass.InstanceRepr) - return hop.args_r[0].rtype_isinstance(hop) - @typer_for(objectmodel.instantiate) def rtype_instantiate(hop, i_nonmovable=None): hop.exception_cannot_occur() diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/rclass.py pypy-5.0.1+dfsg/rpython/rtyper/rclass.py --- pypy-4.0.1+dfsg/rpython/rtyper/rclass.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/rclass.py 2016-03-19 16:40:12.000000000 +0000 @@ -13,8 +13,9 @@ from rpython.rtyper.lltypesystem.lltype import ( Ptr, Struct, GcStruct, malloc, cast_pointer, castable, nullptr, RuntimeTypeInfo, getRuntimeTypeInfo, typeOf, Void, FuncType, Bool, Signed, - functionptr) + functionptr, attachRuntimeTypeInfo) from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.lltypesystem import rstr from rpython.rtyper.rmodel import ( Repr, getgcflavor, inputconst, warning, mangle) @@ -445,6 +446,13 @@ def rtyper_makekey(self): return self.__class__, self.classdef +class __extend__(annmodel.SomeException): + def rtyper_makerepr(self, rtyper): + return self.as_SomeInstance().rtyper_makerepr(rtyper) + + def rtyper_makekey(self): + return self.__class__, frozenset(self.classdefs) + class __extend__(annmodel.SomeType): def rtyper_makerepr(self, rtyper): return get_type_repr(rtyper) @@ -583,10 +591,17 @@ _callable=graph.func) else: destrptr = None - OBJECT = OBJECT_BY_FLAVOR[LLFLAVOR[self.gcflavor]] - self.rtyper.attachRuntimeTypeInfoFunc(self.object_type, - ll_runtime_type_info, - OBJECT, destrptr) + self.rtyper.call_all_setups() # compute ForwardReferences now + args_s = [SomePtr(Ptr(OBJECT))] + graph = self.rtyper.annotate_helper(ll_runtime_type_info, args_s) + s = self.rtyper.annotation(graph.getreturnvar()) + if (not isinstance(s, SomePtr) or + s.ll_ptrtype != Ptr(RuntimeTypeInfo)): + raise TyperError("runtime type info function returns %r, " + "expected Ptr(RuntimeTypeInfo)" % (s)) + funcptr = self.rtyper.getcallable(graph) + attachRuntimeTypeInfo(self.object_type, funcptr, destrptr) + vtable = self.rclass.getvtable() self.rtyper.set_type_for_typeptr(vtable, self.lowleveltype.TO) diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/rlist.py pypy-5.0.1+dfsg/rpython/rtyper/rlist.py --- pypy-4.0.1+dfsg/rpython/rtyper/rlist.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/rlist.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,7 +1,7 @@ from rpython.annotator import model as annmodel from rpython.flowspace.model import Constant from rpython.rlib import rgc, jit, types -from rpython.rlib.debug import ll_assert +from rpython.rtyper.debug import ll_assert from rpython.rlib.objectmodel import malloc_zero_filled, enforceargs, specialize from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import ovfcheck, widen, r_uint, intmask @@ -965,14 +965,25 @@ ll_listdelslice_startstop.oopspec = 'list.delslice_startstop(l, start, stop)' def ll_listsetslice(l1, start, stop, l2): - count = l2.ll_length() + len1 = l1.ll_length() + len2 = l2.ll_length() ll_assert(start >= 0, "l[start:x] = l with unexpectedly negative start") - ll_assert(start <= l1.ll_length(), "l[start:x] = l with start > len(l)") - ll_assert(stop <= l1.ll_length(), "stop cannot be past the end of l1") - ll_assert(count == stop - start, - "setslice cannot resize lists in RPython") - # XXX ...but it would be easy enough to support if really needed - ll_arraycopy(l2, l1, 0, start, count) + ll_assert(start <= len1, "l[start:x] = l with start > len(l)") + ll_assert(stop <= len1, "stop cannot be past the end of l1") + if len2 == stop - start: + ll_arraycopy(l2, l1, 0, start, len2) + elif len2 < stop - start: + ll_arraycopy(l2, l1, 0, start, len2) + ll_arraycopy(l1, l1, stop, start + len2, len1 - stop) + l1._ll_resize_le(len1 + len2 - (stop - start)) + else: # len2 > stop - start: + try: + newlength = ovfcheck(len1 + len2) + except OverflowError: + raise MemoryError + l1._ll_resize_ge(newlength) + ll_arraycopy(l1, l1, stop, start + len2, len1 - stop) + ll_arraycopy(l2, l1, 0, start, len2) # ____________________________________________________________ diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/rmodel.py pypy-5.0.1+dfsg/rpython/rtyper/rmodel.py --- pypy-4.0.1+dfsg/rpython/rtyper/rmodel.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/rmodel.py 2016-03-19 16:40:15.000000000 +0000 @@ -204,6 +204,21 @@ else: return hop.genop('int_is_true', [vlen], resulttype=Bool) + def rtype_isinstance(self, hop): + hop.exception_cannot_occur() + if hop.s_result.is_constant(): + return hop.inputconst(lltype.Bool, hop.s_result.const) + + if hop.args_s[1].is_constant() and hop.args_s[1].const in (str, list, unicode): + if hop.args_s[0].knowntype not in (str, list, unicode): + raise TyperError("isinstance(x, str/list/unicode) expects x to be known" + " statically to be a str/list/unicode or None") + rstrlist = hop.args_r[0] + vstrlist = hop.inputarg(rstrlist, arg=0) + cnone = hop.inputconst(rstrlist, None) + return hop.genop('ptr_ne', [vstrlist, cnone], resulttype=lltype.Bool) + raise TyperError + def rtype_hash(self, hop): ll_hash = self.get_ll_hash_function() v, = hop.inputargs(self) diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/rpbc.py pypy-5.0.1+dfsg/rpython/rtyper/rpbc.py --- pypy-4.0.1+dfsg/rpython/rtyper/rpbc.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/rpbc.py 2016-03-19 16:40:12.000000000 +0000 @@ -7,7 +7,7 @@ from rpython.annotator.classdesc import ClassDesc from rpython.flowspace.model import Constant from rpython.annotator.argument import simple_args -from rpython.rlib.debug import ll_assert +from rpython.rtyper.debug import ll_assert from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper import rclass, callparse from rpython.rtyper.rclass import CLASSTYPE, OBJECT_VTABLE, OBJECTPTR diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/rptr.py pypy-5.0.1+dfsg/rpython/rtyper/rptr.py --- pypy-4.0.1+dfsg/rpython/rtyper/rptr.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/rptr.py 2016-03-19 16:40:12.000000000 +0000 @@ -11,7 +11,7 @@ class __extend__(SomePtr): def rtyper_makerepr(self, rtyper): - return PtrRepr(self.ll_ptrtype) + return PtrRepr(self.ll_ptrtype, rtyper) def rtyper_makekey(self): return self.__class__, self.ll_ptrtype @@ -26,9 +26,11 @@ class PtrRepr(Repr): - def __init__(self, ptrtype): + def __init__(self, ptrtype, rtyper=None): assert isinstance(ptrtype, lltype.Ptr) self.lowleveltype = ptrtype + if rtyper is not None: + self.rtyper = rtyper # only for _convert_const_ptr() def ll_str(self, p): from rpython.rtyper.lltypesystem.rstr import ll_str @@ -108,6 +110,13 @@ def rtype_call_args(self, hop): raise TyperError("kwds args not supported") + def convert_const(self, value): + if hasattr(value, '_convert_const_ptr'): + assert hasattr(self, 'rtyper') + return value._convert_const_ptr(self) + return Repr.convert_const(self, value) + + class __extend__(pairtype(PtrRepr, PtrRepr)): def convert_from_to((r_ptr1, r_ptr2), v, llop): if r_ptr1.lowleveltype == r_ptr2.lowleveltype: diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/rtyper.py pypy-5.0.1+dfsg/rpython/rtyper/rtyper.py --- pypy-4.0.1+dfsg/rpython/rtyper/rtyper.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/rtyper.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,14 +1,14 @@ """ RTyper: converts high-level operations into low-level operations in flow graphs. -The main class, with code to walk blocks and dispatch individual operations -to the care of the rtype_*() methods implemented in the other r* modules. -For each high-level operation 'hop', the rtype_*() methods produce low-level -operations that are collected in the 'llops' list defined here. When necessary, -conversions are inserted. +The main class, with code to walk blocks and dispatch individual operations to +the care of the rtype_*() methods implemented in the other r* modules. For +each high-level operation 'hop', the rtype_*() methods produce low-level +operations that are collected in the 'llops' list defined here. When +necessary, conversions are inserted. -This logic borrows a bit from rpython.annotator.annrpython, without the fixpoint -computation part. +This logic borrows a bit from rpython.annotator.annrpython, without the +fixpoint computation part. """ import os @@ -16,26 +16,40 @@ import py, math from rpython.annotator import model as annmodel, unaryop, binaryop -from rpython.rtyper.llannotation import SomePtr, lltype_to_annotation +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.flowspace.model import Variable, Constant, SpaceOperation -from rpython.rtyper.annlowlevel import annotate_lowlevel_helper, LowLevelAnnotatorPolicy +from rpython.rtyper.annlowlevel import ( + annotate_lowlevel_helper, LowLevelAnnotatorPolicy) from rpython.rtyper.error import TyperError from rpython.rtyper.exceptiondata import ExceptionData from rpython.rtyper.lltypesystem.lltype import (Signed, Void, LowLevelType, - Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, - attachRuntimeTypeInfo, Primitive, getfunctionptr) -from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError + ContainerType, typeOf, Primitive, getfunctionptr) +from rpython.rtyper.rmodel import Repr, inputconst from rpython.rtyper import rclass from rpython.rtyper.rclass import RootClassRepr from rpython.tool.pairtype import pair from rpython.translator.unsimplify import insert_empty_block +from rpython.translator.sandbox.rsandbox import make_sandbox_trampoline + + +class RTyperBackend(object): + pass + +class GenCBackend(RTyperBackend): + pass +genc_backend = GenCBackend() + +class LLInterpBackend(RTyperBackend): + pass +llinterp_backend = LLInterpBackend() class RPythonTyper(object): from rpython.rtyper.rmodel import log - def __init__(self, annotator): + def __init__(self, annotator, backend=genc_backend): self.annotator = annotator + self.backend = backend self.lowlevel_ann_policy = LowLevelAnnotatorPolicy(self) self.reprs = {} self._reprs_must_call_setup = [] @@ -561,6 +575,17 @@ def getcallable(self, graph): def getconcretetype(v): return self.bindingrepr(v).lowleveltype + if self.annotator.translator.config.translation.sandbox: + try: + name = graph.func._sandbox_external_name + except AttributeError: + pass + else: + args_s = [v.annotation for v in graph.getargs()] + s_result = graph.getreturnvar().annotation + sandboxed = make_sandbox_trampoline(name, args_s, s_result) + return self.getannmixlevel().delayedfunction( + sandboxed, args_s, s_result) return getfunctionptr(graph, getconcretetype) @@ -590,21 +615,6 @@ graph = self.annotate_helper(ll_function, argtypes) return self.getcallable(graph) - def attachRuntimeTypeInfoFunc(self, GCSTRUCT, func, ARG_GCSTRUCT=None, - destrptr=None): - self.call_all_setups() # compute ForwardReferences now - if ARG_GCSTRUCT is None: - ARG_GCSTRUCT = GCSTRUCT - args_s = [SomePtr(Ptr(ARG_GCSTRUCT))] - graph = self.annotate_helper(func, args_s) - s = self.annotation(graph.getreturnvar()) - if (not isinstance(s, SomePtr) or - s.ll_ptrtype != Ptr(RuntimeTypeInfo)): - raise TyperError("runtime type info function %r returns %r, " - "excepted Ptr(RuntimeTypeInfo)" % (func, s)) - funcptr = self.getcallable(graph) - attachRuntimeTypeInfo(GCSTRUCT, funcptr, destrptr) - # register operations from annotation model RPythonTyper._registeroperations(unaryop.UNARY_OPERATIONS, binaryop.BINARY_OPERATIONS) @@ -876,18 +886,6 @@ return self.genop('direct_call', [c]+newargs_v, resulttype = typeOf(fobj).RESULT) - def genexternalcall(self, fnname, args_v, resulttype=None, **flags): - if isinstance(resulttype, Repr): - resulttype = resulttype.lowleveltype - argtypes = [v.concretetype for v in args_v] - FUNCTYPE = FuncType(argtypes, resulttype or Void) - f = functionptr(FUNCTYPE, fnname, **flags) - cf = inputconst(typeOf(f), f) - return self.genop('direct_call', [cf]+list(args_v), resulttype) - - def gencapicall(self, cfnname, args_v, resulttype=None, **flags): - return self.genexternalcall(cfnname, args_v, resulttype=resulttype, external="CPython", **flags) - def genconst(self, ll_value): return inputconst(typeOf(ll_value), ll_value) diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/test/test_extfunc.py pypy-5.0.1+dfsg/rpython/rtyper/test/test_extfunc.py --- pypy-4.0.1+dfsg/rpython/rtyper/test/test_extfunc.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/test/test_extfunc.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,7 +1,6 @@ import py -from rpython.rtyper.extfunc import ExtFuncEntry, register_external,\ - is_external, lazy_register +from rpython.rtyper.extfunc import register_external from rpython.annotator.model import SomeInteger, SomeString, AnnotatorError from rpython.annotator.annrpython import RPythonAnnotator from rpython.annotator.policy import AnnotatorPolicy @@ -19,11 +18,7 @@ "NOT_RPYTHON" return eval("x+40") - class BTestFuncEntry(ExtFuncEntry): - _about_ = b - name = 'b' - signature_args = [SomeInteger()] - signature_result = SomeInteger() + register_external(b, [int], result=int) def f(): return b(2) @@ -43,15 +38,11 @@ def c(y, x): yyy - class CTestFuncEntry(ExtFuncEntry): - _about_ = c - name = 'ccc' - signature_args = [SomeInteger()] * 2 - signature_result = SomeInteger() - - def lltypeimpl(y, x): - return y + x - lltypeimpl = staticmethod(lltypeimpl) + def llimpl(y, x): + return y + x + + register_external(c, [int, int], result=int, llimpl=llimpl, + export_name='ccc') def f(): return c(3, 4) @@ -59,22 +50,6 @@ res = interpret(f, []) assert res == 7 - def test_register_external_signature(self): - """ - Test the standard interface for external functions. - """ - def dd(): - pass - register_external(dd, [int], int) - - def f(): - return dd(3) - - policy = AnnotatorPolicy() - a = RPythonAnnotator(policy=policy) - s = a.build_types(f, []) - assert isinstance(s, SomeInteger) - def test_register_external_tuple_args(self): """ Verify the annotation of a registered external function which takes a @@ -121,23 +96,6 @@ s = a.build_types(f, []) assert isinstance(s, SomeInteger) - def test_register_external_specialcase(self): - """ - When args=None, the external function accepts any arguments unmodified. - """ - def function_withspecialcase(arg): - return repr(arg) - register_external(function_withspecialcase, args=None, result=str) - - def f(): - x = function_withspecialcase - return x(33) + x("aaa") + x([]) + "\n" - - policy = AnnotatorPolicy() - a = RPythonAnnotator(policy=policy) - s = a.build_types(f, []) - assert isinstance(s, SomeString) - def test_str0(self): str0 = SomeString(no_nul=True) def os_open(s): @@ -182,3 +140,22 @@ # fails with TooLateForChange a.build_types(g, [[str]]) a.build_types(g, [[str0]]) # Does not raise + + def test_register_external_llfakeimpl(self): + def a(i): + return i + def a_llimpl(i): + return i * 2 + def a_llfakeimpl(i): + return i * 3 + register_external(a, [int], int, llimpl=a_llimpl, + llfakeimpl=a_llfakeimpl) + def f(i): + return a(i) + + res = interpret(f, [7]) + assert res == 21 + + from rpython.translator.c.test.test_genc import compile + fc = compile(f, [int]) + assert fc(7) == 14 diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/test/test_extfuncregister.py pypy-5.0.1+dfsg/rpython/rtyper/test/test_extfuncregister.py --- pypy-4.0.1+dfsg/rpython/rtyper/test/test_extfuncregister.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/test/test_extfuncregister.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,113 +0,0 @@ - -""" A small test suite for discovering whether lazy registration -of register_external functions work as intendet -""" - -import py -from rpython.rtyper.extfunc import lazy_register, BaseLazyRegistering, \ - registering, registering_if, extdef -from rpython.rtyper.test.test_llinterp import interpret - -def test_lazy_register(): - def f(): - return 3 - - def g(): - return f() - - def reg_func(): - 1/0 - - lazy_register(f, reg_func) - - py.test.raises(ZeroDivisionError, interpret, g, []) - -def test_lazy_register_class_raising(): - def f(): - return 3 - - def g(): - return 3 - - class LazyRegister(BaseLazyRegistering): - def __init__(self): - self.stuff = 8 - self.x = [] - - @registering(f) - def register_f(self): - self.x.append(1) - 1/0 - - @registering(g) - def register_g(self): - self.x.append(2) - self.register(g, [], int, llimpl=lambda : self.stuff) - - py.test.raises(TypeError, "LazyRegister()") - assert LazyRegister.instance.x == [1, 2] - py.test.raises(ZeroDivisionError, interpret, lambda : f(), []) - assert interpret(lambda : g(), []) == 8 - -def test_lazy_register_extdef(): - def g(): - return 3 - - x = [] - - def register_g(): - x.append('g') - return extdef([], int, llimpl=lambda : 21) - - nothing = lazy_register(g, register_g) - - assert x == ['g'] - assert nothing is None - assert interpret(lambda : g(), []) == 21 - -def test_lazy_register_raising_init(): - def f(): - return 3 - - def g(): - return 3 - - class LazyRegister(BaseLazyRegistering): - def __init__(self): - 1/0 - - @registering(f) - def register_f(self): - pass - - @registering(g) - def register_g(self): - pass - - py.test.raises(ZeroDivisionError, interpret, lambda : f(), []) - py.test.raises(ZeroDivisionError, interpret, lambda : g(), []) - -def test_registering_if(): - class A: - @staticmethod - def f(): - pass - - @registering_if(A, 'f') - def foo(): - pass - - assert foo._registering_func is A.f - - @registering_if(A, 'g') - def bar(): - pass - - assert bar is None - - @registering_if(A, 'f', False) - def baz(): - pass - - assert baz is None - diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/test/test_llannotation.py pypy-5.0.1+dfsg/rpython/rtyper/test/test_llannotation.py --- pypy-4.0.1+dfsg/rpython/rtyper/test/test_llannotation.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/test/test_llannotation.py 2016-03-19 16:40:12.000000000 +0000 @@ -15,6 +15,7 @@ def __init__(self, cls=C): self.cls = cls self.name = cls.__name__ + self.classdesc = cls def test_ll_to_annotation(): s_z = ll_to_annotation(lltype.Signed._defl()) diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/test/test_llinterp.py pypy-5.0.1+dfsg/rpython/rtyper/test/test_llinterp.py --- pypy-4.0.1+dfsg/rpython/rtyper/test/test_llinterp.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/test/test_llinterp.py 2016-03-19 16:40:15.000000000 +0000 @@ -13,7 +13,7 @@ from rpython.rlib.rarithmetic import r_uint, ovfcheck from rpython.tool import leakfinder from rpython.conftest import option - +from rpython.rtyper.rtyper import llinterp_backend # switch on logging of interp to show more info on failing tests @@ -39,6 +39,7 @@ t.view() global typer # we need it for find_exception typer = t.buildrtyper() + typer.backend = llinterp_backend typer.specialize() #t.view() t.checkgraphs() diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/test/test_nongc.py pypy-5.0.1+dfsg/rpython/rtyper/test/test_nongc.py --- pypy-4.0.1+dfsg/rpython/rtyper/test/test_nongc.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/test/test_nongc.py 2016-03-19 16:40:12.000000000 +0000 @@ -40,7 +40,7 @@ #does not raise: s = a.build_types(f, []) Adef = a.bookkeeper.getuniqueclassdef(A) - assert s.knowntype == Adef + assert s.classdef == Adef rtyper = RPythonTyper(a) rtyper.specialize() assert (Adef, 'raw') in rtyper.instance_reprs @@ -59,7 +59,7 @@ s = a.build_types(f, []) Adef = a.bookkeeper.getuniqueclassdef(A) Bdef = a.bookkeeper.getuniqueclassdef(B) - assert s.knowntype == Bdef + assert s.classdef == Bdef rtyper = RPythonTyper(a) rtyper.specialize() assert (Adef, 'raw') in rtyper.instance_reprs diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/test/test_rbuiltin.py pypy-5.0.1+dfsg/rpython/rtyper/test/test_rbuiltin.py --- pypy-4.0.1+dfsg/rpython/rtyper/test/test_rbuiltin.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/test/test_rbuiltin.py 2016-03-19 16:40:12.000000000 +0000 @@ -3,8 +3,7 @@ import py -from rpython.rlib.debug import llinterpcall -from rpython.rlib.objectmodel import instantiate, running_on_llinterp, compute_unique_id, current_object_addr_as_int +from rpython.rlib.objectmodel import instantiate, compute_unique_id, current_object_addr_as_int from rpython.rlib.rarithmetic import (intmask, longlongmask, r_int64, is_valid_int, r_int, r_uint, r_longlong, r_ulonglong) from rpython.rlib.rstring import StringBuilder, UnicodeBuilder @@ -284,7 +283,7 @@ count = 0 for dir_call in enum_direct_calls(test_llinterp.typer.annotator.translator, wr_open): cfptr = dir_call.args[0] - assert self.get_callable(cfptr.value).__name__.startswith('os_open') + assert self.get_callable(cfptr.value).__name__ == 'open' count += 1 assert count == 1 @@ -456,26 +455,6 @@ res = self.interpret(fn, [3.25]) assert res == 7.25 - def test_debug_llinterpcall(self): - S = lltype.Struct('S', ('m', lltype.Signed)) - SPTR = lltype.Ptr(S) - def foo(n): - "NOT_RPYTHON" - s = lltype.malloc(S, immortal=True) - s.m = eval("n*6", locals()) - return s - def fn(n): - if running_on_llinterp: - return llinterpcall(SPTR, foo, n).m - else: - return 321 - res = self.interpret(fn, [7]) - assert res == 42 - from rpython.translator.c.test.test_genc import compile - f = compile(fn, [int]) - res = f(7) - assert res == 321 - def test_id(self): class A: pass diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/test/test_rdict.py pypy-5.0.1+dfsg/rpython/rtyper/test/test_rdict.py --- pypy-4.0.1+dfsg/rpython/rtyper/test/test_rdict.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/test/test_rdict.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,25 +1,62 @@ +import sys +from contextlib import contextmanager +import signal + from rpython.translator.translator import TranslationContext +from rpython.annotator.model import ( + SomeInteger, SomeString, SomeChar, SomeUnicodeString, SomeUnicodeCodePoint) +from rpython.annotator.dictdef import DictKey, DictValue from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rtyper import rint -from rpython.rtyper.lltypesystem import rdict, rstr +from rpython.rtyper.lltypesystem import rdict from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rlib.objectmodel import r_dict from rpython.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong import py -py.log.setconsumer("rtyper", py.log.STDOUT) - -def not_really_random(): - """A random-ish generator, which also generates nice patterns from time to time. - Could be useful to detect problems associated with specific usage patterns.""" - import random - x = random.random() - print 'random seed: %r' % (x,) - for i in range(12000): - r = 3.4 + i/20000.0 - x = r*x - x*x - assert 0 <= x < 4 - yield x +from hypothesis import settings +from hypothesis.strategies import ( + builds, sampled_from, binary, just, integers, text, characters, tuples) +from hypothesis.stateful import GenericStateMachine, run_state_machine_as_test + +def ann2strategy(s_value): + if isinstance(s_value, SomeChar): + return builds(chr, integers(min_value=0, max_value=255)) + elif isinstance(s_value, SomeString): + if s_value.can_be_None: + return binary() | just(None) + else: + return binary() + elif isinstance(s_value, SomeUnicodeCodePoint): + return characters() + elif isinstance(s_value, SomeUnicodeString): + if s_value.can_be_None: + return text() | just(None) + else: + return text() + elif isinstance(s_value, SomeInteger): + return integers(min_value=~sys.maxint, max_value=sys.maxint) + else: + raise TypeError("Cannot convert annotation %s to a strategy" % s_value) + + +if hasattr(signal, 'alarm'): + @contextmanager + def signal_timeout(n): + """A flaky context manager that throws an exception if the body of the + `with` block runs for longer than `n` seconds. + """ + def handler(signum, frame): + raise RuntimeError('timeout') + signal.signal(signal.SIGALRM, handler) + signal.alarm(n) + try: + yield + finally: + signal.alarm(0) +else: + @contextmanager + def signal_timeout(n): + yield class BaseTestRDict(BaseRtypingTest): @@ -199,9 +236,8 @@ def test_dict_copy(self): def func(): - # XXX this does not work if we use chars, only! dic = self.newdict() - dic['ab'] = 1 + dic['a'] = 1 dic['b'] = 2 d2 = dic.copy() ok = 1 @@ -999,33 +1035,11 @@ s_BA_dic = s.items[1] r_AB_dic = rtyper.getrepr(s_AB_dic) - r_BA_dic = rtyper.getrepr(s_AB_dic) + r_BA_dic = rtyper.getrepr(s_BA_dic) assert r_AB_dic.lowleveltype == r_BA_dic.lowleveltype - def test_dict_resize(self): - py.test.skip("test written for non-ordered dicts, update or kill") - # XXX we no longer automatically resize on 'del'. We need to - # hack a bit in this test to trigger a resize by continuing to - # fill the dict's table while keeping the actual size very low - # in order to force a resize to shrink the table back - def func(want_empty): - d = self.newdict() - for i in range(rdict.DICT_INITSIZE << 1): - d[chr(ord('a') + i)] = i - if want_empty: - for i in range(rdict.DICT_INITSIZE << 1): - del d[chr(ord('a') + i)] - for i in range(rdict.DICT_INITSIZE << 3): - d[chr(ord('A') - i)] = i - del d[chr(ord('A') - i)] - return d - res = self.interpret(func, [0]) - assert len(res.entries) > rdict.DICT_INITSIZE - res = self.interpret(func, [1]) - assert len(res.entries) == rdict.DICT_INITSIZE - def test_opt_dummykeymarker(self): def f(): d = {"hello": None} @@ -1117,183 +1131,131 @@ DICT = lltype.typeOf(llres.item1) assert sorted(DICT.TO.entries.TO.OF._flds) == ['f_hash', 'key', 'value'] - def test_deleted_entry_reusage_with_colliding_hashes(self): - py.test.skip("test written for non-ordered dicts, update or kill") - def lowlevelhash(value): - p = rstr.mallocstr(len(value)) - for i in range(len(value)): - p.chars[i] = value[i] - return rstr.LLHelpers.ll_strhash(p) - - def func(c1, c2): - c1 = chr(c1) - c2 = chr(c2) - d = self.newdict() - d[c1] = 1 - d[c2] = 2 - del d[c1] - return d[c2] - - char_by_hash = {} - base = rdict.DICT_INITSIZE - for y in range(0, 256): - y = chr(y) - y_hash = lowlevelhash(y) % base - char_by_hash.setdefault(y_hash, []).append(y) - - x, y = char_by_hash[0][:2] # find a collision - - res = self.interpret(func, [ord(x), ord(y)]) - assert res == 2 - - def func2(c1, c2): - c1 = chr(c1) - c2 = chr(c2) - d = self.newdict() - d[c1] = 1 - d[c2] = 2 - del d[c1] - d[c1] = 3 - return d - - res = self.interpret(func2, [ord(x), ord(y)]) - for i in range(len(res.entries)): - assert not (res.entries.everused(i) and not res.entries.valid(i)) - - def func3(c0, c1, c2, c3, c4, c5, c6, c7): - d = self.newdict() - c0 = chr(c0) ; d[c0] = 1; del d[c0] - c1 = chr(c1) ; d[c1] = 1; del d[c1] - c2 = chr(c2) ; d[c2] = 1; del d[c2] - c3 = chr(c3) ; d[c3] = 1; del d[c3] - c4 = chr(c4) ; d[c4] = 1; del d[c4] - c5 = chr(c5) ; d[c5] = 1; del d[c5] - c6 = chr(c6) ; d[c6] = 1; del d[c6] - c7 = chr(c7) ; d[c7] = 1; del d[c7] - return d - if rdict.DICT_INITSIZE != 8: - py.test.skip("make dict tests more indepdent from initsize") - res = self.interpret(func3, [ord(char_by_hash[i][0]) - for i in range(rdict.DICT_INITSIZE)]) - count_frees = 0 - for i in range(len(res.entries)): - if not res.entries.everused(i): - count_frees += 1 - assert count_frees >= 3 - -class TestStress: - - def test_stress(self): - from rpython.annotator.dictdef import DictKey, DictValue - from rpython.annotator import model as annmodel - dictrepr = rdict.DictRepr(None, rint.signed_repr, rint.signed_repr, - DictKey(None, annmodel.SomeInteger()), - DictValue(None, annmodel.SomeInteger())) +class Action(object): + def __init__(self, method, args): + self.method = method + self.args = args + + def execute(self, space): + getattr(space, self.method)(*self.args) + + def __repr__(self): + return "space.%s(%s)" % (self.method, ', '.join(map(repr, self.args))) + +class PseudoRTyper: + cache_dummy_values = {} + +# XXX: None keys crash the test, but translation sort-of allows it +keytypes_s = [ + SomeString(), SomeInteger(), SomeChar(), + SomeUnicodeString(), SomeUnicodeCodePoint()] +st_keys = sampled_from(keytypes_s) +st_values = sampled_from(keytypes_s + [SomeString(can_be_None=True)]) + +class MappingSpace(object): + def __init__(self, s_key, s_value): + self.s_key = s_key + self.s_value = s_value + rtyper = PseudoRTyper() + r_key = s_key.rtyper_makerepr(rtyper) + r_value = s_value.rtyper_makerepr(rtyper) + dictrepr = self.MappingRepr(rtyper, r_key, r_value, + DictKey(None, s_key), + DictValue(None, s_value)) dictrepr.setup() - l_dict = rdict.ll_newdict(dictrepr.DICT) - referencetable = [None] * 400 - referencelength = 0 - value = 0 - - def complete_check(): - for n, refvalue in zip(range(len(referencetable)), referencetable): - try: - gotvalue = rdict.ll_dict_getitem(l_dict, n) - except KeyError: - assert refvalue is None - else: - assert gotvalue == refvalue - - for x in not_really_random(): - n = int(x*100.0) # 0 <= x < 400 - op = repr(x)[-1] - if op <= '2' and referencetable[n] is not None: - rdict.ll_dict_delitem(l_dict, n) - referencetable[n] = None - referencelength -= 1 - elif op <= '6': - rdict.ll_dict_setitem(l_dict, n, value) - if referencetable[n] is None: - referencelength += 1 - referencetable[n] = value - value += 1 - else: - try: - gotvalue = rdict.ll_dict_getitem(l_dict, n) - except KeyError: - assert referencetable[n] is None - else: - assert gotvalue == referencetable[n] - if 1.38 <= x <= 1.39: - complete_check() - print 'current dict length:', referencelength - assert l_dict.num_items == referencelength - complete_check() - - def test_stress_2(self): - yield self.stress_combination, True, False - yield self.stress_combination, False, True - yield self.stress_combination, False, False - yield self.stress_combination, True, True - - def stress_combination(self, key_can_be_none, value_can_be_none): - from rpython.rtyper.lltypesystem.rstr import string_repr - from rpython.annotator.dictdef import DictKey, DictValue - from rpython.annotator import model as annmodel - - print - print "Testing combination with can_be_None: keys %s, values %s" % ( - key_can_be_none, value_can_be_none) - - class PseudoRTyper: - cache_dummy_values = {} - dictrepr = rdict.DictRepr(PseudoRTyper(), string_repr, string_repr, - DictKey(None, annmodel.SomeString(key_can_be_none)), - DictValue(None, annmodel.SomeString(value_can_be_none))) - dictrepr.setup() - print dictrepr.lowleveltype - for key, value in dictrepr.DICTENTRY._adtmeths.items(): - print ' %s = %s' % (key, value) - l_dict = rdict.ll_newdict(dictrepr.DICT) - referencetable = [None] * 400 - referencelength = 0 - values = not_really_random() - keytable = [string_repr.convert_const("foo%d" % n) - for n in range(len(referencetable))] - - def complete_check(): - for n, refvalue in zip(range(len(referencetable)), referencetable): - try: - gotvalue = rdict.ll_dict_getitem(l_dict, keytable[n]) - except KeyError: - assert refvalue is None - else: - assert gotvalue == refvalue - - for x in not_really_random(): - n = int(x*100.0) # 0 <= x < 400 - op = repr(x)[-1] - if op <= '2' and referencetable[n] is not None: - rdict.ll_dict_delitem(l_dict, keytable[n]) - referencetable[n] = None - referencelength -= 1 - elif op <= '6': - ll_value = string_repr.convert_const(str(values.next())) - rdict.ll_dict_setitem(l_dict, keytable[n], ll_value) - if referencetable[n] is None: - referencelength += 1 - referencetable[n] = ll_value - else: - try: - gotvalue = rdict.ll_dict_getitem(l_dict, keytable[n]) - except KeyError: - assert referencetable[n] is None - else: - assert gotvalue == referencetable[n] - if 1.38 <= x <= 1.39: - complete_check() - print 'current dict length:', referencelength - assert l_dict.num_items == referencelength - complete_check() - + self.l_dict = self.newdict(dictrepr) + self.reference = self.new_reference() + self.ll_key = r_key.convert_const + self.ll_value = r_value.convert_const + + def setitem(self, key, value): + ll_key = self.ll_key(key) + ll_value = self.ll_value(value) + self.ll_setitem(self.l_dict, ll_key, ll_value) + self.reference[key] = value + assert self.ll_contains(self.l_dict, ll_key) + + def delitem(self, key): + ll_key = self.ll_key(key) + self.ll_delitem(self.l_dict, ll_key) + del self.reference[key] + assert not self.ll_contains(self.l_dict, ll_key) + + def copydict(self): + self.l_dict = self.ll_copy(self.l_dict) + assert self.ll_len(self.l_dict) == len(self.reference) + + def cleardict(self): + self.ll_clear(self.l_dict) + self.reference.clear() + assert self.ll_len(self.l_dict) == 0 + + def fullcheck(self): + assert self.ll_len(self.l_dict) == len(self.reference) + for key, value in self.reference.iteritems(): + assert (self.ll_getitem(self.l_dict, self.ll_key(key)) == + self.ll_value(value)) + +class MappingSM(GenericStateMachine): + def __init__(self): + self.space = None + + def st_setitem(self): + return builds(Action, + just('setitem'), tuples(self.st_keys, self.st_values)) + + def st_updateitem(self): + return builds(Action, + just('setitem'), + tuples(sampled_from(self.space.reference), self.st_values)) + + def st_delitem(self): + return builds(Action, + just('delitem'), tuples(sampled_from(self.space.reference))) + + def steps(self): + if not self.space: + return builds(Action, just('setup'), tuples(st_keys, st_values)) + global_actions = [Action('copydict', ()), Action('cleardict', ())] + if self.space.reference: + return ( + self.st_setitem() | sampled_from(global_actions) | + self.st_updateitem() | self.st_delitem()) + else: + return (self.st_setitem() | sampled_from(global_actions)) + + def execute_step(self, action): + if action.method == 'setup': + self.space = self.Space(*action.args) + self.st_keys = ann2strategy(self.space.s_key) + self.st_values = ann2strategy(self.space.s_value) + return + with signal_timeout(1): # catches infinite loops + action.execute(self.space) + + def teardown(self): + if self.space: + self.space.fullcheck() + + +class DictSpace(MappingSpace): + MappingRepr = rdict.DictRepr + new_reference = dict + ll_getitem = staticmethod(rdict.ll_dict_getitem) + ll_setitem = staticmethod(rdict.ll_dict_setitem) + ll_delitem = staticmethod(rdict.ll_dict_delitem) + ll_len = staticmethod(rdict.ll_dict_len) + ll_contains = staticmethod(rdict.ll_contains) + ll_copy = staticmethod(rdict.ll_copy) + ll_clear = staticmethod(rdict.ll_clear) + + def newdict(self, repr): + return rdict.ll_newdict(repr.DICT) + +class DictSM(MappingSM): + Space = DictSpace + +def test_hypothesis(): + run_state_machine_as_test( + DictSM, settings(max_examples=500, stateful_step_count=100)) diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/test/test_rfloat.py pypy-5.0.1+dfsg/rpython/rtyper/test/test_rfloat.py --- pypy-4.0.1+dfsg/rpython/rtyper/test/test_rfloat.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/test/test_rfloat.py 2016-03-19 16:40:12.000000000 +0000 @@ -204,6 +204,14 @@ res = self.ll_to_string(self.interpret(f, [10/3.0])) assert res == '3.33' + def test_formatd_g(self): + from rpython.rlib import rfloat + for flags in [0, rfloat.DTSF_ADD_DOT_0]: + def f(y): + return rfloat.formatd(y, 'g', 2, flags) + + assert self.ll_to_string(self.interpret(f, [3.0])) == f(3.0) + def test_formatd_repr(self): from rpython.rlib.rfloat import formatd def f(x): diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/test/test_rlist.py pypy-5.0.1+dfsg/rpython/rtyper/test/test_rlist.py --- pypy-4.0.1+dfsg/rpython/rtyper/test/test_rlist.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/test/test_rlist.py 2016-03-19 16:40:12.000000000 +0000 @@ -3,7 +3,7 @@ import py -from rpython.rlib.debug import ll_assert +from rpython.rtyper.debug import ll_assert from rpython.rtyper.error import TyperError from rpython.rtyper.llinterp import LLException, LLAssertFailure from rpython.rtyper.lltypesystem import rlist as ll_rlist @@ -394,6 +394,47 @@ assert res.item2 == 8 assert res.item3 == 7 + def dummyfn(): + l = [10, 9, 8, 7] + l[1:3] = [42] + return l[0], l[1], l[2] + res = self.interpret(dummyfn, ()) + assert res.item0 == 10 + assert res.item1 == 42 + assert res.item2 == 7 + + def dummyfn(): + l = [10, 9, 8, 7] + l[1:3] = [6, 5, 0] + return l[0], l[1], l[2], l[3], l[4] + res = self.interpret(dummyfn, ()) + assert res.item0 == 10 + assert res.item1 == 6 + assert res.item2 == 5 + assert res.item3 == 0 + assert res.item4 == 7 + + def dummyfn(): + l = [10, 9, 8, 7] + l[1:1] = [6, 5, 0] + return l[0], l[1], l[2], l[3], l[4], l[5], l[6] + res = self.interpret(dummyfn, ()) + assert res.item0 == 10 + assert res.item1 == 6 + assert res.item2 == 5 + assert res.item3 == 0 + assert res.item4 == 9 + assert res.item5 == 8 + assert res.item6 == 7 + + def dummyfn(): + l = [10, 9, 8, 7] + l[1:3] = [] + return l[0], l[1] + res = self.interpret(dummyfn, ()) + assert res.item0 == 10 + assert res.item1 == 7 + def test_delslice(self): def dummyfn(): l = [10, 9, 8, 7] diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/test/test_rordereddict.py pypy-5.0.1+dfsg/rpython/rtyper/test/test_rordereddict.py --- pypy-4.0.1+dfsg/rpython/rtyper/test/test_rordereddict.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/test/test_rordereddict.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,14 +1,18 @@ - import py from collections import OrderedDict +from hypothesis import settings +from hypothesis.stateful import run_state_machine_as_test + from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem import rordereddict, rstr from rpython.rlib.rarithmetic import intmask from rpython.rtyper.annlowlevel import llstr, hlstr -from rpython.rtyper.test.test_rdict import BaseTestRDict +from rpython.rtyper.test.test_rdict import ( + BaseTestRDict, MappingSpace, MappingSM) from rpython.rlib import objectmodel +rodct = rordereddict def get_indexes(ll_d): return ll_d.indexes._obj.container._as_ptr() @@ -330,124 +334,48 @@ assert res == 6 -class TestStress: +class ODictSpace(MappingSpace): + MappingRepr = rodct.OrderedDictRepr + new_reference = OrderedDict + ll_getitem = staticmethod(rodct.ll_dict_getitem) + ll_setitem = staticmethod(rodct.ll_dict_setitem) + ll_delitem = staticmethod(rodct.ll_dict_delitem) + ll_len = staticmethod(rodct.ll_dict_len) + ll_contains = staticmethod(rodct.ll_dict_contains) + ll_copy = staticmethod(rodct.ll_dict_copy) + ll_clear = staticmethod(rodct.ll_dict_clear) + + def newdict(self, repr): + return rodct.ll_newdict(repr.DICT) - def test_stress(self): - from rpython.annotator.dictdef import DictKey, DictValue - from rpython.annotator import model as annmodel - from rpython.rtyper import rint - from rpython.rtyper.test.test_rdict import not_really_random - rodct = rordereddict - dictrepr = rodct.OrderedDictRepr( - None, rint.signed_repr, rint.signed_repr, - DictKey(None, annmodel.SomeInteger()), - DictValue(None, annmodel.SomeInteger())) - dictrepr.setup() - l_dict = rodct.ll_newdict(dictrepr.DICT) - referencetable = [None] * 400 - referencelength = 0 - value = 0 - - def complete_check(): - for n, refvalue in zip(range(len(referencetable)), referencetable): - try: - gotvalue = rodct.ll_dict_getitem(l_dict, n) - except KeyError: - assert refvalue is None - else: - assert gotvalue == refvalue - - for x in not_really_random(): - n = int(x*100.0) # 0 <= x < 400 - op = repr(x)[-1] - if op <= '2' and referencetable[n] is not None: - rodct.ll_dict_delitem(l_dict, n) - referencetable[n] = None - referencelength -= 1 - elif op <= '6': - rodct.ll_dict_setitem(l_dict, n, value) - if referencetable[n] is None: - referencelength += 1 - referencetable[n] = value - value += 1 - else: - try: - gotvalue = rodct.ll_dict_getitem(l_dict, n) - except KeyError: - assert referencetable[n] is None - else: - assert gotvalue == referencetable[n] - if 1.38 <= x <= 1.39: - complete_check() - print 'current dict length:', referencelength - assert l_dict.num_live_items == referencelength - complete_check() - - def test_stress_2(self): - yield self.stress_combination, True, False - yield self.stress_combination, False, True - yield self.stress_combination, False, False - yield self.stress_combination, True, True - - def stress_combination(self, key_can_be_none, value_can_be_none): - from rpython.rtyper.lltypesystem.rstr import string_repr - from rpython.annotator.dictdef import DictKey, DictValue - from rpython.annotator import model as annmodel - from rpython.rtyper.test.test_rdict import not_really_random - rodct = rordereddict - - print - print "Testing combination with can_be_None: keys %s, values %s" % ( - key_can_be_none, value_can_be_none) - - class PseudoRTyper: - cache_dummy_values = {} - dictrepr = rodct.OrderedDictRepr( - PseudoRTyper(), string_repr, string_repr, - DictKey(None, annmodel.SomeString(key_can_be_none)), - DictValue(None, annmodel.SomeString(value_can_be_none))) - dictrepr.setup() - print dictrepr.lowleveltype - #for key, value in dictrepr.DICTENTRY._adtmeths.items(): - # print ' %s = %s' % (key, value) - l_dict = rodct.ll_newdict(dictrepr.DICT) - referencetable = [None] * 400 - referencelength = 0 - values = not_really_random() - keytable = [string_repr.convert_const("foo%d" % n) - for n in range(len(referencetable))] - - def complete_check(): - for n, refvalue in zip(range(len(referencetable)), referencetable): - try: - gotvalue = rodct.ll_dict_getitem(l_dict, keytable[n]) - except KeyError: - assert refvalue is None - else: - assert gotvalue == refvalue - - for x in not_really_random(): - n = int(x*100.0) # 0 <= x < 400 - op = repr(x)[-1] - if op <= '2' and referencetable[n] is not None: - rodct.ll_dict_delitem(l_dict, keytable[n]) - referencetable[n] = None - referencelength -= 1 - elif op <= '6': - ll_value = string_repr.convert_const(str(values.next())) - rodct.ll_dict_setitem(l_dict, keytable[n], ll_value) - if referencetable[n] is None: - referencelength += 1 - referencetable[n] = ll_value - else: - try: - gotvalue = rodct.ll_dict_getitem(l_dict, keytable[n]) - except KeyError: - assert referencetable[n] is None - else: - assert gotvalue == referencetable[n] - if 1.38 <= x <= 1.39: - complete_check() - print 'current dict length:', referencelength - assert l_dict.num_live_items == referencelength - complete_check() + def get_keys(self): + DICT = lltype.typeOf(self.l_dict).TO + ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT)) + ll_iter = rordereddict.ll_dictiter(ITER, self.l_dict) + ll_dictnext = rordereddict._ll_dictnext + keys_ll = [] + while True: + try: + num = ll_dictnext(ll_iter) + keys_ll.append(self.l_dict.entries[num].key) + except StopIteration: + break + return keys_ll + + def fullcheck(self): + # overridden to also check key order + assert self.ll_len(self.l_dict) == len(self.reference) + keys_ll = self.get_keys() + assert len(keys_ll) == len(self.reference) + for key, ll_key in zip(self.reference, keys_ll): + assert self.ll_key(key) == ll_key + assert (self.ll_getitem(self.l_dict, self.ll_key(key)) == + self.ll_value(self.reference[key])) + + +class ODictSM(MappingSM): + Space = ODictSpace + +def test_hypothesis(): + run_state_machine_as_test( + ODictSM, settings(max_examples=500, stateful_step_count=100)) diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/test/test_rpbc.py pypy-5.0.1+dfsg/rpython/rtyper/test/test_rpbc.py --- pypy-4.0.1+dfsg/rpython/rtyper/test/test_rpbc.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/test/test_rpbc.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,7 +1,7 @@ import py from rpython.annotator import model as annmodel -from rpython.annotator import policy, specialize +from rpython.annotator import specialize from rpython.rtyper.lltypesystem.lltype import typeOf from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.llannotation import SomePtr, lltype_to_annotation @@ -1690,59 +1690,6 @@ # ____________________________________________________________ -class TestRPBCExtra(BaseRtypingTest): - - def test_folding_specialize_support(self): - - class S(object): - - def w(s, x): - if isinstance(x, int): - return x - if isinstance(x, str): - return len(x) - return -1 - w._annspecialcase_ = "specialize:w" - - def _freeze_(self): - return True - - s = S() - - def f(i, n): - w = s.w - if i == 0: - return w(0) - elif i == 1: - return w("abc") - elif i == 2: - return w(3*n) - elif i == 3: - return w(str(n)) - return -1 - - class P(policy.AnnotatorPolicy): - def specialize__w(pol, funcdesc, args_s): - typ = args_s[1].knowntype - if args_s[0].is_constant() and args_s[1].is_constant(): - x = args_s[1].const - v = s.w(x) - builder = specialize.make_constgraphbuilder(2, v) - return funcdesc.cachedgraph(x, builder=builder) - return funcdesc.cachedgraph(typ) - - p = P() - - res = self.interpret(f, [0, 66], policy=p) - assert res == 0 - res = self.interpret(f, [1, 66], policy=p) - assert res == 3 - res = self.interpret(f, [2, 4], policy=p) - assert res == 12 - res = self.interpret(f, [3, 5555], policy=p) - assert res == 4 - - def test_hlinvoke_simple(): def f(a,b): return a + b @@ -1998,7 +1945,7 @@ def interpret(self, fn, args, **kwds): kwds['config'] = self.config - return TestRPBC.interpret(self, fn, args, **kwds) + return TestRPBC.interpret(fn, args, **kwds) def test_smallfuncsets_basic(): from rpython.translator.translator import TranslationContext, graphof diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/test/tool.py pypy-5.0.1+dfsg/rpython/rtyper/test/tool.py --- pypy-4.0.1+dfsg/rpython/rtyper/test/tool.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/test/tool.py 2016-03-19 16:40:12.000000000 +0000 @@ -5,22 +5,27 @@ class BaseRtypingTest(object): FLOAT_PRECISION = 8 - def gengraph(self, func, argtypes=[], viewbefore='auto', policy=None, + @staticmethod + def gengraph(func, argtypes=[], viewbefore='auto', policy=None, backendopt=False, config=None): return gengraph(func, argtypes, viewbefore, policy, backendopt=backendopt, config=config) - def interpret(self, fn, args, **kwds): + @staticmethod + def interpret(fn, args, **kwds): return interpret(fn, args, **kwds) - def interpret_raises(self, exc, fn, args, **kwds): + @staticmethod + def interpret_raises(exc, fn, args, **kwds): return interpret_raises(exc, fn, args, **kwds) - def float_eq(self, x, y): + @staticmethod + def float_eq(x, y): return x == y - def float_eq_approx(self, x, y): - maxError = 10**-self.FLOAT_PRECISION + @classmethod + def float_eq_approx(cls, x, y): + maxError = 10**-cls.FLOAT_PRECISION if abs(x-y) < maxError: return True @@ -31,45 +36,66 @@ return relativeError < maxError - def is_of_type(self, x, type_): + @staticmethod + def is_of_type(x, type_): return type(x) is type_ - def _skip_llinterpreter(self, reason): + @staticmethod + def _skip_llinterpreter(reason): py.test.skip("lltypesystem doesn't support %s, yet" % reason) - def ll_to_string(self, s): + @staticmethod + def ll_to_string(s): if not s: return None return ''.join(s.chars) - def ll_to_unicode(self, s): + @staticmethod + def ll_to_unicode(s): return u''.join(s.chars) - def string_to_ll(self, s): - from rpython.rtyper.module.support import LLSupport - return LLSupport.to_rstr(s) - - def unicode_to_ll(self, s): - from rpython.rtyper.module.support import LLSupport - return LLSupport.to_runicode(s) + @staticmethod + def string_to_ll(s): + from rpython.rtyper.lltypesystem.rstr import STR, mallocstr + if s is None: + return lltype.nullptr(STR) + p = mallocstr(len(s)) + for i in range(len(s)): + p.chars[i] = s[i] + return p + + @staticmethod + def unicode_to_ll(s): + from rpython.rtyper.lltypesystem.rstr import UNICODE, mallocunicode + if s is None: + return lltype.nullptr(UNICODE) + p = mallocunicode(len(s)) + for i in range(len(s)): + p.chars[i] = s[i] + return p - def ll_to_list(self, l): + @staticmethod + def ll_to_list(l): r = [] items = l.ll_items() for i in range(l.ll_length()): r.append(items[i]) return r - def ll_unpack_tuple(self, t, length): + @staticmethod + def ll_unpack_tuple(t, length): return tuple([getattr(t, 'item%d' % i) for i in range(length)]) - def get_callable(self, fnptr): + @staticmethod + def get_callable(fnptr): return fnptr._obj._callable - def class_name(self, value): + @staticmethod + def class_name(value): return ''.join(value.super.typeptr.name.chars) - def read_attr(self, value, attr_name): + @staticmethod + def read_attr(value, attr_name): value = value._obj while value is not None: attr = getattr(value, "inst_" + attr_name, None) @@ -79,6 +105,7 @@ return attr raise AttributeError() - def is_of_instance_type(self, val): + @staticmethod + def is_of_instance_type(val): T = lltype.typeOf(val) return isinstance(T, lltype.Ptr) and isinstance(T.TO, lltype.GcStruct) diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/tool/rffi_platform.py pypy-5.0.1+dfsg/rpython/rtyper/tool/rffi_platform.py --- pypy-4.0.1+dfsg/rpython/rtyper/tool/rffi_platform.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/tool/rffi_platform.py 2016-03-19 16:40:12.000000000 +0000 @@ -263,10 +263,11 @@ """An entry in a CConfig class that stands for an externally defined structure. """ - def __init__(self, name, interesting_fields, ifdef=None): + def __init__(self, name, interesting_fields, ifdef=None, adtmeths={}): self.name = name self.interesting_fields = interesting_fields self.ifdef = ifdef + self.adtmeths = adtmeths def prepare_code(self): if self.ifdef is not None: @@ -313,7 +314,9 @@ offset = info['fldofs ' + fieldname] size = info['fldsize ' + fieldname] sign = info.get('fldunsigned ' + fieldname, False) - if (size, sign) != rffi.size_and_sign(fieldtype): + if is_array_nolength(fieldtype): + pass # ignore size and sign + elif (size, sign) != rffi.size_and_sign(fieldtype): fieldtype = fixup_ctype(fieldtype, fieldname, (size, sign)) layout_addfield(layout, offset, fieldtype, fieldname) @@ -353,7 +356,7 @@ name = name[7:] else: hints['typedef'] = True - kwds = {'hints': hints} + kwds = {'hints': hints, 'adtmeths': self.adtmeths} return rffi.CStruct(name, *fields, **kwds) class SimpleType(CConfigEntry): @@ -682,8 +685,14 @@ def __repr__(self): return '' % (self.name, self.ctype) +def is_array_nolength(TYPE): + return isinstance(TYPE, lltype.Array) and TYPE._hints.get('nolength', False) + def layout_addfield(layout, offset, ctype, prefix): - size = _sizeof(ctype) + if is_array_nolength(ctype): + size = len(layout) - offset # all the rest of the struct + else: + size = _sizeof(ctype) name = prefix i = 0 while name in layout: diff -Nru pypy-4.0.1+dfsg/rpython/rtyper/tool/test/test_rffi_platform.py pypy-5.0.1+dfsg/rpython/rtyper/tool/test/test_rffi_platform.py --- pypy-4.0.1+dfsg/rpython/rtyper/tool/test/test_rffi_platform.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/rtyper/tool/test/test_rffi_platform.py 2016-03-19 16:40:12.000000000 +0000 @@ -270,6 +270,19 @@ [("d_name", lltype.FixedSizeArray(rffi.CHAR, 1))]) assert dirent.c_d_name.length == 32 +def test_array_varsized_struct(): + dirent = rffi_platform.getstruct("struct dirent", + """ + struct dirent /* for this example only, not the exact dirent */ + { + int d_off; + char d_name[1]; + }; + """, + [("d_name", rffi.CArray(rffi.CHAR))]) + assert rffi.offsetof(dirent, 'c_d_name') == 4 + assert dirent.c_d_name == rffi.CArray(rffi.CHAR) + def test_has_0001(): assert rffi_platform.has("x", "int x = 3;") assert not rffi_platform.has("x", "") @@ -277,10 +290,14 @@ assert not rffi_platform.has("x", "#include ") def test_has_0002(): + if platform.name == 'msvc': + py.test.skip('no m.lib in msvc') assert rffi_platform.has("pow", "#include ", libraries=["m"]) def test_has_0003(): """multiple libraries""" + if platform.name == 'msvc': + py.test.skip('no m.lib in msvc') assert rffi_platform.has("pow", "#include ", libraries=["m", "c"]) def test_has_0004(): diff -Nru pypy-4.0.1+dfsg/rpython/tool/jitlogparser/parser.py pypy-5.0.1+dfsg/rpython/tool/jitlogparser/parser.py --- pypy-4.0.1+dfsg/rpython/tool/jitlogparser/parser.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/tool/jitlogparser/parser.py 2016-03-19 16:40:12.000000000 +0000 @@ -167,7 +167,6 @@ def update_memo(self, val, name): pass - class NonCodeError(Exception): pass diff -Nru pypy-4.0.1+dfsg/rpython/tool/jitlogparser/test/logtest2.log pypy-5.0.1+dfsg/rpython/tool/jitlogparser/test/logtest2.log --- pypy-4.0.1+dfsg/rpython/tool/jitlogparser/test/logtest2.log 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/tool/jitlogparser/test/logtest2.log 2016-03-19 16:40:12.000000000 +0000 @@ -139,7 +139,7 @@ debug_merge_point(0, 0, ' #12 LOAD_CONST') +289: guard_value(p4, ConstPtr(ptr22), descr=) [p1, p0, p4, p2, p3, p6, p11, p13, p17] debug_merge_point(0, 0, ' #15 COMPARE_OP') -+308: i23 = getfield_gc_pure_i(p11, descr=) ++308: i23 = getfield_gc_i(p11, descr=) +312: i25 = int_lt(i23, 10) guard_true(i25, descr=) [p1, p0, p11, p2, p3, p6, p13] debug_merge_point(0, 0, ' #18 POP_JUMP_IF_FALSE') @@ -285,9 +285,9 @@ +283: p23 = getfield_gc_r(p21, descr=) +287: guard_class(p23, 26517736, descr=) [p1, p0, p15, i22, p23, p21, p2, p3, p4, i5, p6, p11, p13, p17] +299: p25 = getfield_gc_r(p21, descr=) -+303: i26 = getfield_gc_pure_i(p25, descr=) -+307: i27 = getfield_gc_pure_i(p25, descr=) -+311: i28 = getfield_gc_pure_i(p25, descr=) ++303: i26 = getfield_gc_i(p25, descr=) ++307: i27 = getfield_gc_i(p25, descr=) ++311: i28 = getfield_gc_i(p25, descr=) +315: i30 = int_lt(i22, 0) guard_false(i30, descr=) [p1, p0, p15, i22, i28, i27, i26, p2, p3, p4, i5, p6, p11, p13, p17] +325: i31 = int_ge(i22, i28) diff -Nru pypy-4.0.1+dfsg/rpython/tool/pairtype.py pypy-5.0.1+dfsg/rpython/tool/pairtype.py --- pypy-4.0.1+dfsg/rpython/tool/pairtype.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/tool/pairtype.py 2016-03-19 16:40:12.000000000 +0000 @@ -94,3 +94,40 @@ def __setitem__(self, clspair, value): self._registry[clspair] = value self._cache = self._registry.copy() + +def doubledispatch(func): + """ + Decorator returning a double-dispatch function + + Usage + ----- + >>> @doubledispatch + ... def func(x, y): + ... return 0 + >>> @func.register(basestring, basestring) + ... def func_string_string(x, y): + ... return 42 + >>> func(1, 2) + 0 + >>> func('x', u'y') + 42 + """ + return DoubleDispatchFunction(func) + +class DoubleDispatchFunction(object): + def __init__(self, func): + self._registry = DoubleDispatchRegistry() + self._default = func + + def __call__(self, arg1, arg2, *args, **kwargs): + try: + func = self._registry[type(arg1), type(arg2)] + except KeyError: + func = self._default + return func(arg1, arg2, *args, **kwargs) + + def register(self, cls1, cls2): + def decorator(func): + self._registry[cls1, cls2] = func + return func + return decorator diff -Nru pypy-4.0.1+dfsg/rpython/tool/runsubprocess.py pypy-5.0.1+dfsg/rpython/tool/runsubprocess.py --- pypy-4.0.1+dfsg/rpython/tool/runsubprocess.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/tool/runsubprocess.py 2016-03-19 16:40:15.000000000 +0000 @@ -9,6 +9,8 @@ from subprocess import PIPE, Popen def run_subprocess(executable, args, env=None, cwd=None): + if isinstance(args, list): + args = [a.encode('latin1') for a in args] return _run(executable, args, env, cwd) shell_default = False diff -Nru pypy-4.0.1+dfsg/rpython/tool/test/test_pairtype.py pypy-5.0.1+dfsg/rpython/tool/test/test_pairtype.py --- pypy-4.0.1+dfsg/rpython/tool/test/test_pairtype.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/tool/test/test_pairtype.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,5 +1,6 @@ from rpython.tool.pairtype import ( - pairtype, pair, extendabletype, pairmro, DoubleDispatchRegistry) + pairtype, pair, extendabletype, pairmro, DoubleDispatchRegistry, + doubledispatch) def test_binop(): ### Binary operation example @@ -115,7 +116,7 @@ parent_pairtypes = pairtype(A3, B2).__mro__[:-2] assert (tuple(pairtype(a, b) for a, b in pairmro(A3, B2)) == parent_pairtypes) -def test_doubledispatch(): +def test_doubledispatch_registry(): class A(object): pass class A2(A): pass class A3(A2): pass @@ -129,3 +130,15 @@ assert reg[A3, B2] == "A2-B2" reg[A3, B] = "A3-B" assert reg[A3, B2] == "A2-B2" # note that A2,B2 wins over A3,B + +def test_doubledispatch_function(): + @doubledispatch + def f(x, y, z): + return z + + @f.register(int, int) + def f_int(x, y, z): + return 42 + + assert f(1., 1., 0) == 0 + assert f(1, 1, 0) == 42 diff -Nru pypy-4.0.1+dfsg/rpython/translator/backendopt/canraise.py pypy-5.0.1+dfsg/rpython/translator/backendopt/canraise.py --- pypy-4.0.1+dfsg/rpython/translator/backendopt/canraise.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/backendopt/canraise.py 2016-03-19 16:40:15.000000000 +0000 @@ -22,8 +22,7 @@ log.WARNING("Unknown operation: %s" % op.opname) return True - def analyze_external_call(self, op, seen=None): - fnobj = op.args[0].value._obj + def analyze_external_call(self, fnobj, seen=None): return getattr(fnobj, 'canraise', True) analyze_exceptblock = None # don't call this diff -Nru pypy-4.0.1+dfsg/rpython/translator/backendopt/finalizer.py pypy-5.0.1+dfsg/rpython/translator/backendopt/finalizer.py --- pypy-4.0.1+dfsg/rpython/translator/backendopt/finalizer.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/backendopt/finalizer.py 2016-03-19 16:40:12.000000000 +0000 @@ -18,7 +18,7 @@ """ ok_operations = ['ptr_nonzero', 'ptr_eq', 'ptr_ne', 'free', 'same_as', 'direct_ptradd', 'force_cast', 'track_alloc_stop', - 'raw_free'] + 'raw_free', 'adr_eq', 'adr_ne'] def analyze_light_finalizer(self, graph): result = self.analyze_direct_call(graph) diff -Nru pypy-4.0.1+dfsg/rpython/translator/backendopt/gilanalysis.py pypy-5.0.1+dfsg/rpython/translator/backendopt/gilanalysis.py --- pypy-4.0.1+dfsg/rpython/translator/backendopt/gilanalysis.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/backendopt/gilanalysis.py 2016-03-19 16:40:12.000000000 +0000 @@ -21,12 +21,8 @@ self, graph, seen) def analyze_external_call(self, op, seen=None): - funcobj = op.args[0].value._obj - if getattr(funcobj, 'transactionsafe', False): - return False - else: - return False - + return False + def analyze_simple_operation(self, op, graphinfo): return False diff -Nru pypy-4.0.1+dfsg/rpython/translator/backendopt/graphanalyze.py pypy-5.0.1+dfsg/rpython/translator/backendopt/graphanalyze.py --- pypy-4.0.1+dfsg/rpython/translator/backendopt/graphanalyze.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/backendopt/graphanalyze.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,5 +1,4 @@ from rpython.rtyper.lltypesystem.lltype import DelayedPointer -from rpython.translator.simplify import get_graph from rpython.tool.algo.unionfind import UnionFind @@ -55,11 +54,7 @@ def analyze_startblock(self, block, seen=None): return self.bottom_result() - def analyze_external_call(self, op, seen=None): - try: - funcobj = op.args[0].value._obj - except DelayedPointer: - return self.bottom_result() + def analyze_external_call(self, funcobj, seen=None): result = self.bottom_result() if hasattr(funcobj, '_callbacks'): bk = self.translator.annotator.bookkeeper @@ -80,12 +75,24 @@ def analyze(self, op, seen=None, graphinfo=None): if op.opname == "direct_call": - graph = get_graph(op.args[0], self.translator) - if graph is None: - x = self.analyze_external_call(op, seen) + try: + funcobj = op.args[0].value._obj + except DelayedPointer: + return self.top_result() + if funcobj is None: + # We encountered a null pointer. Calling it will crash. + # However, the call could be on a dead path, so we return the + # bottom result here. + return self.bottom_result() + if getattr(funcobj, 'external', None) is not None: + x = self.analyze_external_call(funcobj, seen) if self.verbose and x: self.dump_info('analyze_external_call %s: %r' % (op, x)) return x + try: + graph = funcobj.graph + except AttributeError: + return self.top_result() x = self.analyze_direct_call(graph, seen) if self.verbose and x: self.dump_info('analyze_direct_call(%s): %r' % (graph, x)) diff -Nru pypy-4.0.1+dfsg/rpython/translator/backendopt/test/test_canraise.py pypy-5.0.1+dfsg/rpython/translator/backendopt/test/test_canraise.py --- pypy-4.0.1+dfsg/rpython/translator/backendopt/test/test_canraise.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/backendopt/test/test_canraise.py 2016-03-19 16:40:12.000000000 +0000 @@ -204,8 +204,7 @@ result = ra.can_raise(fgraph.startblock.operations[0]) assert not result - z = lltype.functionptr(lltype.FuncType([lltype.Signed], lltype.Signed), - 'foobar') + z = llexternal('z', [lltype.Signed], lltype.Signed) def g(x): return z(x) t, ra = self.translate(g, [int]) diff -Nru pypy-4.0.1+dfsg/rpython/translator/backendopt/test/test_graphanalyze.py pypy-5.0.1+dfsg/rpython/translator/backendopt/test/test_graphanalyze.py --- pypy-4.0.1+dfsg/rpython/translator/backendopt/test/test_graphanalyze.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/backendopt/test/test_graphanalyze.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,7 +1,7 @@ import random from rpython.tool.algo.unionfind import UnionFind -from rpython.translator.backendopt.graphanalyze import Dependency -from rpython.translator.backendopt.graphanalyze import DependencyTracker +from rpython.translator.backendopt.graphanalyze import (Dependency, + DependencyTracker, BoolGraphAnalyzer) class FakeGraphAnalyzer: @@ -49,3 +49,30 @@ method1 = rectrack(n, tracker) method2 = expected(n) assert method1 == method2 + + +def test_delayed_fnptr(): + from rpython.flowspace.model import SpaceOperation + from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator + from rpython.translator.translator import TranslationContext + t = TranslationContext() + t.buildannotator() + t.buildrtyper() + annhelper = MixLevelHelperAnnotator(t.rtyper) + def f(): + pass + c_f = annhelper.constfunc(f, [], None) + op = SpaceOperation('direct_call', [c_f], None) + analyzer = BoolGraphAnalyzer(t) + assert analyzer.analyze(op) + + +def test_null_fnptr(): + from rpython.flowspace.model import SpaceOperation, Constant + from rpython.rtyper.lltypesystem.lltype import Void, FuncType, nullptr + from rpython.translator.translator import TranslationContext + t = TranslationContext() + fnptr = nullptr(FuncType([], Void)) + op = SpaceOperation('direct_call', [Constant(fnptr)], None) + analyzer = BoolGraphAnalyzer(t) + assert not analyzer.analyze(op) diff -Nru pypy-4.0.1+dfsg/rpython/translator/backendopt/test/test_mallocprediction.py pypy-5.0.1+dfsg/rpython/translator/backendopt/test/test_mallocprediction.py --- pypy-4.0.1+dfsg/rpython/translator/backendopt/test/test_mallocprediction.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/backendopt/test/test_mallocprediction.py 2016-03-19 16:40:12.000000000 +0000 @@ -18,7 +18,7 @@ if option.view: t.view() return t, graph - + def check_inlining(t, graph, args, result): callgraph, caller_candidates = find_malloc_removal_candidates(t, t.graphs) @@ -61,7 +61,7 @@ pass class B(A): pass - def g2(b, i): + def g2(b, i): b.i = h(i) def g1(a, b, i): a.b = b @@ -123,7 +123,7 @@ fgraph = graphof(t, f) hgraph = graphof(t, h) assert callgraph == {graph: {fgraph: True}, fgraph: {hgraph: True}} - + def test_indirect_call(): class A(object): pass @@ -169,14 +169,14 @@ t, graph = rtype(entrypoint, [int]) total0 = preparation(t, t.graphs, heuristic=heuristic) total = clever_inlining_and_malloc_removal(t) - assert total == 5 # XXX total0 appears to vary + assert total == 6 # XXX total0 appears to vary def test_richards(): from rpython.translator.goal.richards import entry_point t, graph = rtype(entry_point, [int]) total0 = preparation(t, t.graphs) total = clever_inlining_and_malloc_removal(t) - assert total0 + total == 9 + assert total0 + total == 10 def test_loop(): l = [10, 12, 15, 1] diff -Nru pypy-4.0.1+dfsg/rpython/translator/backendopt/test/test_malloc.py pypy-5.0.1+dfsg/rpython/translator/backendopt/test/test_malloc.py --- pypy-4.0.1+dfsg/rpython/translator/backendopt/test/test_malloc.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/backendopt/test/test_malloc.py 2016-03-19 16:40:12.000000000 +0000 @@ -159,7 +159,7 @@ def __del__(self): delcalls[0] += 1 - os.write(1, "__del__\n") + #os.write(1, "__del__\n") def f(x=int): a = A() diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/database.py pypy-5.0.1+dfsg/rpython/translator/c/database.py --- pypy-4.0.1+dfsg/rpython/translator/c/database.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/database.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,3 +1,4 @@ +from collections import OrderedDict from rpython.rtyper.lltypesystem.lltype import (Primitive, Ptr, typeOf, RuntimeTypeInfo, Struct, Array, FuncType, Void, ContainerType, OpaqueType, @@ -8,9 +9,9 @@ from rpython.rtyper.lltypesystem import llgroup from rpython.tool.sourcetools import valid_identifier from rpython.translator.c.primitive import PrimitiveName, PrimitiveType -from rpython.translator.c.node import StructDefNode, ArrayDefNode -from rpython.translator.c.node import FixedSizeArrayDefNode, BareBoneArrayDefNode -from rpython.translator.c.node import ContainerNodeFactory, ExtTypeOpaqueDefNode +from rpython.translator.c.node import ( + StructDefNode, ArrayDefNode, FixedSizeArrayDefNode, BareBoneArrayDefNode, + ContainerNodeFactory, ExtTypeOpaqueDefNode, FuncNode) from rpython.translator.c.support import cdecl, CNameManager from rpython.translator.c.support import log, barebonearray from rpython.translator.c.extfunc import do_the_getting @@ -28,6 +29,7 @@ def __init__(self, translator=None, standalone=False, gcpolicyclass=None, + exctransformer=None, thread_enabled=False, sandbox=False): self.translator = translator @@ -36,6 +38,7 @@ if gcpolicyclass is None: gcpolicyclass = gc.RefcountingGcPolicy self.gcpolicy = gcpolicyclass(self, thread_enabled) + self.exctransformer = exctransformer self.structdefnodes = {} self.pendingsetupnodes = [] @@ -45,7 +48,7 @@ self.delayedfunctionptrs = [] self.completedcontainers = 0 self.containerstats = {} - self.helper2ptr = {} + self.helpers = OrderedDict() # late_initializations is for when the value you want to # assign to a constant object is something C doesn't think is @@ -53,12 +56,8 @@ self.late_initializations = [] self.namespace = CNameManager() - if translator is None or translator.rtyper is None: - self.exctransformer = None - else: - self.exctransformer = translator.getexceptiontransformer() if translator is not None: - self.gctransformer = self.gcpolicy.gettransformer() + self.gctransformer = self.gcpolicy.gettransformer(translator) self.completed = False self.instrument_ncounter = 0 @@ -348,6 +347,8 @@ assert not self.delayedfunctionptrs self.completed = True + if self.gctransformer is not None and self.gctransformer.inline: + self.gctransformer.inline_helpers(self.all_graphs()) if show_progress: dump() log.database("Completed") @@ -379,27 +380,10 @@ produce(node) return result - def need_sandboxing(self, fnobj): - if not self.sandbox: - return False - if hasattr(fnobj, '_safe_not_sandboxed'): - return not fnobj._safe_not_sandboxed - else: - return "if_external" - - def prepare_inline_helpers(self): - all_nodes = self.globalcontainers() - funcnodes = [node for node in all_nodes if node.nodekind == 'func'] - graphs = [] - for node in funcnodes: - for graph in node.graphs_to_patch(): - graphs.append(graph) - self.gctransformer.prepare_inline_helpers(graphs) - def all_graphs(self): graphs = [] for node in self.containerlist: - if node.nodekind == 'func': + if isinstance(node, FuncNode): for graph in node.graphs_to_patch(): graphs.append(graph) return graphs diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/external.py pypy-5.0.1+dfsg/rpython/translator/c/external.py --- pypy-4.0.1+dfsg/rpython/translator/c/external.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/external.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -from rpython.rtyper.lltypesystem.lltype import typeOf, Void -from rpython.translator.c.support import USESLOTS # set to False if necessary while refactoring -from rpython.translator.c.support import cdecl, somelettersfrom - -class CExternalFunctionCodeGenerator(object): - if USESLOTS: - __slots__ = """db fnptr FUNCTYPE argtypenames resulttypename""".split() - - def __init__(self, fnptr, db): - self.fnptr = fnptr - self.db = db - self.FUNCTYPE = typeOf(fnptr) - assert Void not in self.FUNCTYPE.ARGS - self.argtypenames = [db.gettype(T) for T in self.FUNCTYPE.ARGS] - self.resulttypename = db.gettype(self.FUNCTYPE.RESULT) - - def graphs_to_patch(self): - return [] - - def name(self, cname): #virtual - return cname - - def argnames(self): - return ['%s%d' % (somelettersfrom(self.argtypenames[i]), i) - for i in range(len(self.argtypenames))] - - def allconstantvalues(self): - return [] - - def implementation_begin(self): - pass - - def cfunction_declarations(self): - if self.FUNCTYPE.RESULT is not Void: - yield '%s;' % cdecl(self.resulttypename, 'result') - - def cfunction_body(self): - try: - convert_params = self.fnptr.convert_params - except AttributeError: - convert_params = lambda backend, args: [arg for _,arg in args] - call = '%s(%s)' % (self.fnptr._name, ', '.join(convert_params("c", zip(self.FUNCTYPE.ARGS, self.argnames())))) - if self.FUNCTYPE.RESULT is not Void: - yield 'result = %s;' % call - yield 'if (PyErr_Occurred()) RPyConvertExceptionFromCPython();' - yield 'return result;' - else: - yield '%s;' % call - yield 'if (PyErr_Occurred()) RPyConvertExceptionFromCPython();' - - def implementation_end(self): - pass - -assert not USESLOTS or '__dict__' not in dir(CExternalFunctionCodeGenerator) diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/extfunc.py pypy-5.0.1+dfsg/rpython/translator/c/extfunc.py --- pypy-4.0.1+dfsg/rpython/translator/c/extfunc.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/extfunc.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,23 +1,23 @@ import types from rpython.flowspace.model import FunctionGraph -from rpython.rtyper.lltypesystem import lltype, rstr, rlist +from rpython.annotator.listdef import s_list_of_strings +from rpython.rtyper.lltypesystem import lltype, rlist from rpython.rtyper.lltypesystem.rstr import STR, mallocstr from rpython.translator.c.support import cdecl def find_list_of_str(rtyper): - for r in rtyper.reprs.itervalues(): - if isinstance(r, rlist.ListRepr) and r.item_repr is rstr.string_repr: - return r.lowleveltype.TO - return None + r_strlist = rtyper.getrepr(s_list_of_strings) + rtyper.call_all_setups() + return r_strlist.lowleveltype.TO + def predeclare_common_types(db, rtyper): # Common types yield ('RPyString', STR) LIST_OF_STR = find_list_of_str(rtyper) - if LIST_OF_STR is not None: - yield ('RPyListOfString', LIST_OF_STR) + yield ('RPyListOfString', LIST_OF_STR) def predeclare_utility_functions(db, rtyper): # Common utility functions @@ -32,40 +32,38 @@ # returned directly as results LIST_OF_STR = find_list_of_str(rtyper) - if LIST_OF_STR is not None: - p = lltype.Ptr(LIST_OF_STR) + p = lltype.Ptr(LIST_OF_STR) - def _RPyListOfString_New(length=lltype.Signed): - return LIST_OF_STR.ll_newlist(length) + def _RPyListOfString_New(length=lltype.Signed): + return LIST_OF_STR.ll_newlist(length) - def _RPyListOfString_SetItem(l=p, - index=lltype.Signed, - newstring=lltype.Ptr(STR)): - rlist.ll_setitem_nonneg(rlist.dum_nocheck, l, index, newstring) - - def _RPyListOfString_GetItem(l=p, - index=lltype.Signed): - return rlist.ll_getitem_fast(l, index) + def _RPyListOfString_SetItem(l=p, + index=lltype.Signed, + newstring=lltype.Ptr(STR)): + rlist.ll_setitem_nonneg(rlist.dum_nocheck, l, index, newstring) + + def _RPyListOfString_GetItem(l=p, + index=lltype.Signed): + return rlist.ll_getitem_fast(l, index) - def _RPyListOfString_Length(l=p): - return rlist.ll_length(l) + def _RPyListOfString_Length(l=p): + return rlist.ll_length(l) for fname, f in locals().items(): if isinstance(f, types.FunctionType): # XXX this is painful :( - if (LIST_OF_STR, fname) in db.helper2ptr: - yield (fname, db.helper2ptr[LIST_OF_STR, fname]) + if fname in db.helpers: + yield (fname, db.helpers[fname]) else: # hack: the defaults give the type of the arguments graph = rtyper.annotate_helper(f, f.func_defaults) - db.helper2ptr[LIST_OF_STR, fname] = graph + db.helpers[fname] = graph yield (fname, graph) -def predeclare_exception_data(db, rtyper): +def predeclare_exception_data(exctransformer, rtyper): # Exception-related types and constants exceptiondata = rtyper.exceptiondata - exctransformer = db.exctransformer yield ('RPYTHON_EXCEPTION_VTABLE', exceptiondata.lltype_of_exception_type) yield ('RPYTHON_EXCEPTION', exceptiondata.lltype_of_exception_value) @@ -93,19 +91,19 @@ def predeclare_all(db, rtyper): for fn in [predeclare_common_types, predeclare_utility_functions, - predeclare_exception_data, ]: for t in fn(db, rtyper): yield t + exctransformer = db.exctransformer + for t in predeclare_exception_data(exctransformer, rtyper): + yield t + def get_all(db, rtyper): - for fn in [predeclare_common_types, - predeclare_utility_functions, - predeclare_exception_data, - ]: - for t in fn(db, rtyper): - yield t[1] + for name, fnptr in predeclare_all(db, rtyper): + yield fnptr + # ____________________________________________________________ diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/funcgen.py pypy-5.0.1+dfsg/rpython/translator/c/funcgen.py --- pypy-4.0.1+dfsg/rpython/translator/c/funcgen.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/funcgen.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,9 +1,8 @@ import sys -from rpython.translator.c.support import USESLOTS # set to False if necessary while refactoring from rpython.translator.c.support import cdecl from rpython.translator.c.support import llvalue_from_constant, gen_assignments from rpython.translator.c.support import c_string_constant, barebonearray -from rpython.flowspace.model import Variable, Constant, copygraph +from rpython.flowspace.model import Variable, Constant from rpython.rtyper.lltypesystem.lltype import (Ptr, Void, Bool, Signed, Unsigned, SignedLongLong, Float, UnsignedLongLong, Char, UniChar, ContainerType, Array, FixedSizeArray, ForwardReference, FuncType) @@ -19,39 +18,30 @@ KEEP_INLINED_GRAPHS = False +def make_funcgen(graph, db, exception_policy, functionname): + graph._seen_by_the_backend = True + # apply the exception transformation + if db.exctransformer: + db.exctransformer.create_exception_handling(graph) + # apply the gc transformation + if db.gctransformer: + db.gctransformer.transform_graph(graph) + return FunctionCodeGenerator(graph, db, exception_policy, functionname) + class FunctionCodeGenerator(object): """ Collects information about a function which we have to generate from a flow graph. """ - if USESLOTS: - __slots__ = """graph db gcpolicy - exception_policy - more_ll_values - vars all_cached_consts - illtypes - functionname - blocknum - innerloops - oldgraph""".split() - - def __init__(self, graph, db, exception_policy=None, functionname=None): - graph._seen_by_the_backend = True + def __init__(self, graph, db, exception_policy, functionname): self.graph = graph self.db = db self.gcpolicy = db.gcpolicy self.exception_policy = exception_policy self.functionname = functionname - # apply the exception transformation - if self.db.exctransformer: - self.db.exctransformer.create_exception_handling(self.graph) - # apply the gc transformation - if self.db.gctransformer: - self.db.gctransformer.transform_graph(self.graph) - #self.graph.show() - self.collect_var_and_types() + self.collect_var_and_types() for v in self.vars: T = v.concretetype # obscure: skip forward references and hope for the best @@ -84,12 +74,6 @@ self.more_ll_values.append(link.llexitcase) elif link.exitcase is not None: mix.append(Constant(link.exitcase)) - if self.exception_policy == "CPython": - v, exc_cleanup_ops = self.graph.exc_cleanup - mix.append(v) - for cleanupop in exc_cleanup_ops: - mix.extend(cleanupop.args) - mix.append(cleanupop.result) uniquemix = [] seen = identity_dict() @@ -99,20 +83,7 @@ seen[v] = True self.vars = uniquemix - def name(self, cname): #virtual - return cname - - def patch_graph(self, copy_graph): - graph = self.graph - if self.db.gctransformer and self.db.gctransformer.inline: - if copy_graph: - graph = copygraph(graph, shallow=True) - self.db.gctransformer.inline_helpers(graph) - return graph - def implementation_begin(self): - self.oldgraph = self.graph - self.graph = self.patch_graph(copy_graph=True) SSI_to_SSA(self.graph) self.collect_var_and_types() self.blocknum = {} @@ -138,8 +109,6 @@ self.vars = None self.blocknum = None self.innerloops = None - self.graph = self.oldgraph - del self.oldgraph def argnames(self): return [LOCALVAR % v.name for v in self.graph.getargs()] @@ -247,8 +216,6 @@ yield '}' link = block.exits[0] assert link.exitcase in (False, True) - #yield 'assert(%s == %s);' % (self.expr(block.exitswitch), - # self.genc.nameofvalue(link.exitcase, ct)) for op in self.gen_link(link): yield op elif TYPE in (Signed, Unsigned, SignedLongLong, @@ -299,7 +266,7 @@ def gen_op(self, op): macro = 'OP_%s' % op.opname.upper() line = None - if op.opname.startswith('gc_'): + if op.opname.startswith('gc_') and op.opname != 'gc_load_indexed': meth = getattr(self.gcpolicy, macro, None) if meth: line = meth(self, op) @@ -709,6 +676,19 @@ "%(result)s = ((%(typename)s) (((char *)%(addr)s) + %(offset)s))[0];" % locals()) + def OP_GC_LOAD_INDEXED(self, op): + addr = self.expr(op.args[0]) + index = self.expr(op.args[1]) + scale = self.expr(op.args[2]) + base_ofs = self.expr(op.args[3]) + result = self.expr(op.result) + TYPE = op.result.concretetype + typename = cdecl(self.db.gettype(TYPE).replace('@', '*@'), '') + return ( + "%(result)s = ((%(typename)s) (((char *)%(addr)s) + " + "%(base_ofs)s + %(scale)s * %(index)s))[0];" + % locals()) + def OP_CAST_PRIMITIVE(self, op): TYPE = self.lltypemap(op.result) val = self.expr(op.args[0]) @@ -862,6 +842,12 @@ def OP_JIT_FFI_SAVE_RESULT(self, op): return '/* JIT_FFI_SAVE_RESULT %s */' % op + def OP_JIT_ENTER_PORTAL_FRAME(self, op): + return '/* JIT_ENTER_PORTAL_FRAME %s */' % op + + def OP_JIT_LEAVE_PORTAL_FRAME(self, op): + return '/* JIT_LEAVE_PORTAL_FRAME %s */' % op + def OP_GET_GROUP_MEMBER(self, op): typename = self.db.gettype(op.result.concretetype) return '%s = (%s)_OP_GET_GROUP_MEMBER(%s, %s);' % ( @@ -881,14 +867,11 @@ def getdebugfunctionname(self): name = self.functionname - if not name: - return "?" if name.startswith('pypy_g_'): name = name[7:] return name def OP_DEBUG_RECORD_TRACEBACK(self, op): - #if self.functionname is None, we print "?" as the argument */ return 'PYPY_DEBUG_RECORD_TRACEBACK("%s");' % ( self.getdebugfunctionname(),) @@ -928,5 +911,3 @@ cdecl(typename, ''), self.expr(op.args[0]), self.expr(op.result)) - -assert not USESLOTS or '__dict__' not in dir(FunctionCodeGenerator) diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/gcc/test/test_asmgcroot.py pypy-5.0.1+dfsg/rpython/translator/c/gcc/test/test_asmgcroot.py --- pypy-4.0.1+dfsg/rpython/translator/c/gcc/test/test_asmgcroot.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/gcc/test/test_asmgcroot.py 2016-03-19 16:40:12.000000000 +0000 @@ -9,7 +9,7 @@ from rpython.translator.platform import platform as compiler from rpython.rlib.rarithmetic import is_emulated_long from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib.entrypoint import entrypoint, secondary_entrypoints +from rpython.rlib.entrypoint import entrypoint_highlevel, secondary_entrypoints from rpython.rtyper.lltypesystem.lloperation import llop _MSVC = compiler.name == "msvc" @@ -195,7 +195,8 @@ except KeyError: pass - @entrypoint("x42", [lltype.Signed, lltype.Signed], c_name='callback') + @entrypoint_highlevel("x42", [lltype.Signed, lltype.Signed], + c_name='callback') def mycallback(a, b): gc.collect() return a + b diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/gcc/trackgcroot.py pypy-5.0.1+dfsg/rpython/translator/c/gcc/trackgcroot.py --- pypy-4.0.1+dfsg/rpython/translator/c/gcc/trackgcroot.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/gcc/trackgcroot.py 2016-03-19 16:40:12.000000000 +0000 @@ -528,6 +528,8 @@ 'rex64', # movbe, converts from big-endian, so most probably not GC pointers 'movbe', + # xchgb, byte-sized, so not GC pointers + 'xchgb', ]) # a partial list is hopefully good enough for now; it's all to support diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/gc.py pypy-5.0.1+dfsg/rpython/translator/c/gc.py --- pypy-4.0.1+dfsg/rpython/translator/c/gc.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/gc.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,8 +1,7 @@ import sys from rpython.flowspace.model import Constant -from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.lltypesystem.lltype import (typeOf, RttiStruct, - RuntimeTypeInfo, top_container) +from rpython.rtyper.lltypesystem.lltype import (RttiStruct, + RuntimeTypeInfo) from rpython.translator.c.node import ContainerNode from rpython.translator.c.support import cdecl from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -18,23 +17,12 @@ return defnode.db.gctransformer.HDR return None - def common_gcheader_initdata(self, defnode): - if defnode.db.gctransformer is not None: - raise NotImplementedError - return None - def struct_gcheader_definition(self, defnode): return self.common_gcheader_definition(defnode) - def struct_gcheader_initdata(self, defnode): - return self.common_gcheader_initdata(defnode) - def array_gcheader_definition(self, defnode): return self.common_gcheader_definition(defnode) - def array_gcheader_initdata(self, defnode): - return self.common_gcheader_initdata(defnode) - def compilation_info(self): if not self.db: return ExternalCompilationInfo() @@ -46,9 +34,6 @@ ] ) - def get_prebuilt_hash(self, obj): - return None - def need_no_typeptr(self): return False @@ -109,16 +94,9 @@ class RefcountingGcPolicy(BasicGcPolicy): - def gettransformer(self): + def gettransformer(self, translator): from rpython.memory.gctransform import refcounting - return refcounting.RefcountingGCTransformer(self.db.translator) - - def common_gcheader_initdata(self, defnode): - if defnode.db.gctransformer is not None: - gct = defnode.db.gctransformer - top = top_container(defnode.obj) - return gct.gcheaderbuilder.header_of_object(top)._obj - return None + return refcounting.RefcountingGCTransformer(translator) # for structs @@ -197,16 +175,9 @@ class BoehmGcPolicy(BasicGcPolicy): - def gettransformer(self): + def gettransformer(self, translator): from rpython.memory.gctransform import boehm - return boehm.BoehmGCTransformer(self.db.translator) - - def common_gcheader_initdata(self, defnode): - if defnode.db.gctransformer is not None: - hdr = lltype.malloc(defnode.db.gctransformer.HDR, immortal=True) - hdr.hash = lltype.identityhash_nocache(defnode.obj._as_ptr()) - return hdr._obj - return None + return boehm.BoehmGCTransformer(translator) def array_setup(self, arraydefnode): pass @@ -313,9 +284,9 @@ class BasicFrameworkGcPolicy(BasicGcPolicy): - def gettransformer(self): + def gettransformer(self, translator): if hasattr(self, 'transformerclass'): # for rpython/memory tests - return self.transformerclass(self.db.translator) + return self.transformerclass(translator) raise NotImplementedError def struct_setup(self, structdefnode, rtti): @@ -362,24 +333,6 @@ args = [funcgen.expr(v) for v in op.args] return '%s = %s; /* for moving GCs */' % (args[1], args[0]) - def common_gcheader_initdata(self, defnode): - o = top_container(defnode.obj) - needs_hash = self.get_prebuilt_hash(o) is not None - hdr = defnode.db.gctransformer.gc_header_for(o, needs_hash) - return hdr._obj - - def get_prebuilt_hash(self, obj): - # for prebuilt objects that need to have their hash stored and - # restored. Note that only structures that are StructNodes all - # the way have their hash stored (and not e.g. structs with var- - # sized arrays at the end). 'obj' must be the top_container. - TYPE = typeOf(obj) - if not isinstance(TYPE, lltype.GcStruct): - return None - if TYPE._is_varsize(): - return None - return getattr(obj, '_hash_cache_', None) - def need_no_typeptr(self): config = self.db.translator.config return config.translation.gcremovetypeptr @@ -440,15 +393,15 @@ class ShadowStackFrameworkGcPolicy(BasicFrameworkGcPolicy): - def gettransformer(self): + def gettransformer(self, translator): from rpython.memory.gctransform import shadowstack - return shadowstack.ShadowStackFrameworkGCTransformer(self.db.translator) + return shadowstack.ShadowStackFrameworkGCTransformer(translator) class AsmGcRootFrameworkGcPolicy(BasicFrameworkGcPolicy): - def gettransformer(self): + def gettransformer(self, translator): from rpython.memory.gctransform import asmgcroot - return asmgcroot.AsmGcRootFrameworkGCTransformer(self.db.translator) + return asmgcroot.AsmGcRootFrameworkGCTransformer(translator) def GC_KEEPALIVE(self, funcgen, v): return 'pypy_asm_keepalive(%s);' % funcgen.expr(v) diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/genc.py pypy-5.0.1+dfsg/rpython/translator/c/genc.py --- pypy-4.0.1+dfsg/rpython/translator/c/genc.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/genc.py 2016-03-19 16:40:12.000000000 +0000 @@ -2,7 +2,6 @@ import py import sys, os from rpython.rlib import exports -from rpython.rlib.entrypoint import entrypoint from rpython.rtyper.lltypesystem.lltype import getfunctionptr from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool import runsubprocess @@ -127,8 +126,10 @@ if not self.standalone: raise NotImplementedError("--gcrootfinder=asmgcc requires standalone") + exctransformer = translator.getexceptiontransformer() db = LowLevelDatabase(translator, standalone=self.standalone, gcpolicyclass=gcpolicyclass, + exctransformer=exctransformer, thread_enabled=self.config.translation.thread, sandbox=self.config.translation.sandbox) self.db = db @@ -194,22 +195,8 @@ DEBUG_DEFINES = {'RPY_ASSERT': 1, 'RPY_LL_ASSERT': 1} - def generate_graphs_for_llinterp(self, db=None): - # prepare the graphs as when the source is generated, but without - # actually generating the source. - if db is None: - db = self.build_database() - graphs = db.all_graphs() - db.gctransformer.prepare_inline_helpers(graphs) - for node in db.containerlist: - if hasattr(node, 'funcgens'): - for funcgen in node.funcgens: - funcgen.patch_graph(copy_graph=False) - return db - def generate_source(self, db=None, defines={}, exe_name=None): assert self.c_source_filename is None - if db is None: db = self.build_database() pf = self.getentrypointptr() @@ -560,6 +547,11 @@ relpypath = localpath.relto(pypkgpath.dirname) assert relpypath, ("%r should be relative to %r" % (localpath, pypkgpath.dirname)) + if len(relpypath.split(os.path.sep)) > 2: + # pypy detail to agregate the c files by directory, + # since the enormous number of files was causing + # memory issues linking on win32 + return os.path.split(relpypath)[0] + '.c' return relpypath.replace('.py', '.c') return None if hasattr(node.obj, 'graph'): @@ -734,6 +726,9 @@ print >> f, 'struct pypy_threadlocal_s {' print >> f, '\tint ready;' print >> f, '\tchar *stack_end;' + print >> f, '\tstruct pypy_threadlocal_s *prev, *next;' + # note: if the four fixed fields above are changed, you need + # to adapt threadlocal.c's linkedlist_head declaration too for field in fields: typename = database.gettype(field.FIELDTYPE) print >> f, '\t%s;' % cdecl(typename, field.fieldname) @@ -844,7 +839,6 @@ # sg = SourceGenerator(database) sg.set_strategy(targetdir, split) - database.prepare_inline_helpers() sg.gen_readable_parts_of_source(f) headers_to_precompile = sg.headers_to_precompile[:] headers_to_precompile.insert(0, incfilename) diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/node.py pypy-5.0.1+dfsg/rpython/translator/c/node.py --- pypy-4.0.1+dfsg/rpython/translator/c/node.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/node.py 2016-03-19 16:40:12.000000000 +0000 @@ -3,8 +3,7 @@ Void, OpaqueType, Float, RuntimeTypeInfo, getRuntimeTypeInfo, Char, _subarray) from rpython.rtyper.lltypesystem import llmemory, llgroup -from rpython.translator.c.funcgen import FunctionCodeGenerator -from rpython.translator.c.external import CExternalFunctionCodeGenerator +from rpython.translator.c.funcgen import make_funcgen from rpython.translator.c.support import USESLOTS # set to False if necessary while refactoring from rpython.translator.c.support import cdecl, forward_cdecl, somelettersfrom from rpython.translator.c.support import c_char_array_constant, barebonearray @@ -540,7 +539,17 @@ class StructNode(ContainerNode): nodekind = 'struct' if USESLOTS: - __slots__ = () + __slots__ = ('gc_init',) + + def __init__(self, db, T, obj): + ContainerNode.__init__(self, db, T, obj) + if needs_gcheader(T): + gct = self.db.gctransformer + if gct is not None: + self.gc_init = gct.gcheader_initdata(self.obj) + db.getcontainernode(self.gc_init) + else: + self.gc_init = None def basename(self): T = self.getTYPE() @@ -567,8 +576,7 @@ data = [] if needs_gcheader(T): - gc_init = self.db.gcpolicy.struct_gcheader_initdata(self) - data.append(('gcheader', gc_init)) + data.append(('gcheader', self.gc_init)) for name in defnode.fieldnames: data.append((name, getattr(self.obj, name))) @@ -641,7 +649,7 @@ def implementation(self): hash_typename = self.get_hash_typename() - hash = self.db.gcpolicy.get_prebuilt_hash(self.obj) + hash = self.db.gctransformer.get_prebuilt_hash(self.obj) assert hash is not None lines = list(self.initializationexpr()) lines.insert(0, '%s = { {' % ( @@ -651,7 +659,8 @@ return lines def gcstructnode_factory(db, T, obj): - if db.gcpolicy.get_prebuilt_hash(obj) is not None: + if (db.gctransformer and + db.gctransformer.get_prebuilt_hash(obj) is not None): cls = GcStructNodeWithHash else: cls = StructNode @@ -661,7 +670,17 @@ class ArrayNode(ContainerNode): nodekind = 'array' if USESLOTS: - __slots__ = () + __slots__ = ('gc_init',) + + def __init__(self, db, T, obj): + ContainerNode.__init__(self, db, T, obj) + if needs_gcheader(T): + gct = self.db.gctransformer + if gct is not None: + self.gc_init = gct.gcheader_initdata(self.obj) + db.getcontainernode(self.gc_init) + else: + self.gc_init = None def getptrname(self): if barebonearray(self.getTYPE()): @@ -681,8 +700,7 @@ T = self.getTYPE() yield '{' if needs_gcheader(T): - gc_init = self.db.gcpolicy.array_gcheader_initdata(self) - lines = generic_initializationexpr(self.db, gc_init, 'gcheader', + lines = generic_initializationexpr(self.db, self.gc_init, 'gcheader', '%sgcheader' % (decoration,)) for line in lines: yield line @@ -781,81 +799,64 @@ comma = '' expr += comma i = expr.find('\n') - if i<0: i = len(expr) + if i < 0: + i = len(expr) expr = '%s\t/* %s */%s' % (expr[:i], decoration, expr[i:]) return expr.split('\n') # ____________________________________________________________ -class FuncNode(ContainerNode): +class FuncNodeBase(ContainerNode): nodekind = 'func' eci_name = 'compilation_info' # there not so many node of this kind, slots should not # be necessary - - def __init__(self, db, T, obj, forcename=None): + def __init__(self, db, T, obj, ptrname): Node.__init__(self, db) self.globalcontainer = True self.T = T self.obj = obj - callable = getattr(obj, '_callable', None) - if (callable is not None and - getattr(callable, 'c_name', None) is not None): - self.name = forcename or obj._callable.c_name - elif getattr(obj, 'external', None) == 'C' and not db.need_sandboxing(obj): - self.name = forcename or self.basename() - else: - self.name = (forcename or - db.namespace.uniquename('g_' + self.basename())) - self.make_funcgens() + self.name = ptrname self.typename = db.gettype(T) #, who_asks=self) def getptrname(self): return self.name - def make_funcgens(self): - self.funcgens = select_function_code_generators(self.obj, self.db, self.name) - if self.funcgens: - argnames = self.funcgens[0].argnames() #Assume identical for all funcgens - self.implementationtypename = self.db.gettype(self.T, argnames=argnames) - self._funccodegen_owner = self.funcgens[0] - else: - self._funccodegen_owner = None - def basename(self): return self.obj._name + +class FuncNode(FuncNodeBase): + def __init__(self, db, T, obj, ptrname): + FuncNodeBase.__init__(self, db, T, obj, ptrname) + exception_policy = getattr(obj, 'exception_policy', None) + self.funcgen = make_funcgen(obj.graph, db, exception_policy, ptrname) + argnames = self.funcgen.argnames() + self.implementationtypename = db.gettype(T, argnames=argnames) + self._funccodegen_owner = self.funcgen + def enum_dependencies(self): - if not self.funcgens: - return [] - return self.funcgens[0].allconstantvalues() #Assume identical for all funcgens + return self.funcgen.allconstantvalues() def forward_declaration(self): callable = getattr(self.obj, '_callable', None) is_exported = getattr(callable, 'exported_symbol', False) - for funcgen in self.funcgens: - yield '%s;' % ( - forward_cdecl(self.implementationtypename, - funcgen.name(self.name), self.db.standalone, - is_exported=is_exported)) - - def implementation(self): - for funcgen in self.funcgens: - for s in self.funcgen_implementation(funcgen): - yield s + yield '%s;' % ( + forward_cdecl(self.implementationtypename, + self.name, self.db.standalone, is_exported=is_exported)) def graphs_to_patch(self): - for funcgen in self.funcgens: - for i in funcgen.graphs_to_patch(): - yield i + for i in self.funcgen.graphs_to_patch(): + yield i - def funcgen_implementation(self, funcgen): + def implementation(self): + funcgen = self.funcgen funcgen.implementation_begin() # recompute implementationtypename as the argnames may have changed argnames = funcgen.argnames() implementationtypename = self.db.gettype(self.T, argnames=argnames) - yield '%s {' % cdecl(implementationtypename, funcgen.name(self.name)) + yield '%s {' % cdecl(implementationtypename, self.name) # # declare the local variables # @@ -866,7 +867,7 @@ while start < len(localnames): # pack the local declarations over as few lines as possible total = lengths[start] + 8 - end = start+1 + end = start + 1 while total + lengths[end] < 77: total += lengths[end] + 1 end += 1 @@ -897,44 +898,59 @@ del bodyiter funcgen.implementation_end() -def sandbox_stub(fnobj, db): - # unexpected external function for --sandbox translation: replace it - # with a "Not Implemented" stub. To support these functions, port them - # to the new style registry (e.g. rpython.module.ll_os.RegisterOs). - from rpython.translator.sandbox import rsandbox - graph = rsandbox.get_external_function_sandbox_graph(fnobj, db, - force_stub=True) - return [FunctionCodeGenerator(graph, db)] - -def sandbox_transform(fnobj, db): - # for --sandbox: replace a function like os_open_llimpl() with - # code that communicates with the external process to ask it to - # perform the operation. - from rpython.translator.sandbox import rsandbox - graph = rsandbox.get_external_function_sandbox_graph(fnobj, db) - return [FunctionCodeGenerator(graph, db)] - -def select_function_code_generators(fnobj, db, functionname): - sandbox = db.need_sandboxing(fnobj) - if hasattr(fnobj, 'graph'): - if sandbox and sandbox != "if_external": - # apply the sandbox transformation - return sandbox_transform(fnobj, db) - exception_policy = getattr(fnobj, 'exception_policy', None) - return [FunctionCodeGenerator(fnobj.graph, db, exception_policy, - functionname)] - elif getattr(fnobj, 'external', None) is not None: - if sandbox: - return sandbox_stub(fnobj, db) - elif fnobj.external == 'C': - return [] - else: - assert fnobj.external == 'CPython' - return [CExternalFunctionCodeGenerator(fnobj, db)] - elif hasattr(fnobj._callable, "c_name"): - return [] # this case should only be used for entrypoints +class ExternalFuncNode(FuncNodeBase): + def __init__(self, db, T, obj, ptrname): + FuncNodeBase.__init__(self, db, T, obj, ptrname) + self._funccodegen_owner = None + + def enum_dependencies(self): + return [] + + def forward_declaration(self): + return [] + + def implementation(self): + return [] + +def new_funcnode(db, T, obj, forcename=None): + from rpython.rtyper.rtyper import llinterp_backend + if db.sandbox: + if (getattr(obj, 'external', None) is not None and + not obj._safe_not_sandboxed): + from rpython.translator.sandbox import rsandbox + obj.__dict__['graph'] = rsandbox.get_sandbox_stub( + obj, db.translator.rtyper) + obj.__dict__.pop('_safe_not_sandboxed', None) + obj.__dict__.pop('external', None) + if forcename: + name = forcename + else: + name = _select_name(db, obj) + if hasattr(obj, 'graph'): + return FuncNode(db, T, obj, name) + elif getattr(obj, 'external', None) is not None: + assert obj.external == 'C' + if db.sandbox: + assert obj._safe_not_sandboxed + return ExternalFuncNode(db, T, obj, name) + elif hasattr(obj._callable, "c_name"): + return ExternalFuncNode(db, T, obj, name) # this case should only be used for entrypoints + elif db.translator.rtyper.backend is llinterp_backend: + # on llinterp, anything goes + return ExternalFuncNode(db, T, obj, name) else: - raise ValueError("don't know how to generate code for %r" % (fnobj,)) + raise ValueError("don't know how to generate code for %r" % (obj,)) + + +def _select_name(db, obj): + try: + return obj._callable.c_name + except AttributeError: + pass + if getattr(obj, 'external', None) == 'C': + return obj._name + return db.namespace.uniquename('g_' + obj._name) + class ExtType_OpaqueNode(ContainerNode): nodekind = 'rpyopaque' @@ -1044,7 +1060,7 @@ Array: ArrayNode, GcArray: ArrayNode, FixedSizeArray: FixedSizeArrayNode, - FuncType: FuncNode, + FuncType: new_funcnode, OpaqueType: opaquenode_factory, llmemory._WeakRefType: weakrefnode_factory, llgroup.GroupType: GroupNode, diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/src/entrypoint.c pypy-5.0.1+dfsg/rpython/translator/c/src/entrypoint.c --- pypy-4.0.1+dfsg/rpython/translator/c/src/entrypoint.c 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/src/entrypoint.c 2016-03-19 16:40:12.000000000 +0000 @@ -33,6 +33,29 @@ # include #endif +#ifdef RPY_WITH_GIL +# include +#endif + +RPY_EXPORTED +void rpython_startup_code(void) +{ +#ifdef RPY_WITH_GIL + RPyGilAcquire(); +#endif +#ifdef PYPY_USE_ASMGCC + pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; +#endif + pypy_asm_stack_bottom(); + RPython_StartupCode(); +#ifdef PYPY_USE_ASMGCC + pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter--; +#endif +#ifdef RPY_WITH_GIL + RPyGilRelease(); +#endif +} + RPY_EXTERN int pypy_main_function(int argc, char *argv[]) @@ -46,6 +69,14 @@ _setmode(1, _O_BINARY); #endif +#ifdef RPY_WITH_GIL + /* Note that the GIL's mutexes are not automatically made; if the + program starts threads, it needs to call rgil.gil_allocate(). + RPyGilAcquire() still works without that, but crash if it finds + that it really needs to wait on a mutex. */ + RPyGilAcquire(); +#endif + #ifdef PYPY_USE_ASMGCC pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; #endif @@ -82,6 +113,10 @@ pypy_malloc_counters_results(); +#ifdef RPY_WITH_GIL + RPyGilRelease(); +#endif + return exitcode; memory_out: diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/src/mem.c pypy-5.0.1+dfsg/rpython/translator/c/src/mem.c --- pypy-4.0.1+dfsg/rpython/translator/c/src/mem.c 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/src/mem.c 2016-03-19 16:40:12.000000000 +0000 @@ -120,11 +120,8 @@ got += 1; fd = ((void* *) (((char *)fd) + sizeof(void*)))[0]; } - if (rpy_fastgil != 1) { - RPyAssert(rpy_fastgil != 0, - "pypy_check_stack_count doesn't have the GIL"); - got++; /* <= the extra one currently stored in rpy_fastgil */ - } + RPyAssert(rpy_fastgil == 1, + "pypy_check_stack_count doesn't have the GIL"); RPyAssert(got == stacks_counter - 1, "bad stacks_counter or non-closed stacks around"); # endif diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/src/thread_gil.c pypy-5.0.1+dfsg/rpython/translator/c/src/thread_gil.c --- pypy-4.0.1+dfsg/rpython/translator/c/src/thread_gil.c 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/src/thread_gil.c 2016-03-19 16:40:12.000000000 +0000 @@ -36,25 +36,42 @@ value of 'rpy_fastgil' to 1. */ -long rpy_fastgil = 1; -long rpy_waiting_threads = -42; /* GIL not initialized */ + +/* The GIL is initially released; see pypy_main_function(), which calls + RPyGilAcquire/RPyGilRelease. The point is that when building + RPython libraries, they can be a collection of regular functions that + also call RPyGilAcquire/RPyGilRelease; see test_standalone.TestShared. +*/ +long rpy_fastgil = 0; +static long rpy_waiting_threads = -42; /* GIL not initialized */ static mutex1_t mutex_gil_stealer; static mutex2_t mutex_gil; -void RPyGilAllocate(void) + +static void rpy_init_mutexes(void) { - assert(RPY_FASTGIL_LOCKED(rpy_fastgil)); mutex1_init(&mutex_gil_stealer); mutex2_init_locked(&mutex_gil); rpy_waiting_threads = 0; } -void RPyGilAcquire(void) +void RPyGilAllocate(void) { - /* Acquires the GIL. - */ - long old_fastgil = lock_test_and_set(&rpy_fastgil, 1); + if (rpy_waiting_threads < 0) { + assert(rpy_waiting_threads == -42); + rpy_init_mutexes(); +#ifdef HAVE_PTHREAD_ATFORK + pthread_atfork(NULL, NULL, rpy_init_mutexes); +#endif + } +} +void RPyGilAcquireSlowPath(long old_fastgil) +{ + /* Acquires the GIL. This assumes that we already did: + + old_fastgil = lock_test_and_set(&rpy_fastgil, 1); + */ if (!RPY_FASTGIL_LOCKED(old_fastgil)) { /* The fastgil was not previously locked: success. 'mutex_gil' should still be locked at this point. @@ -63,10 +80,22 @@ else { /* Otherwise, another thread is busy with the GIL. */ + if (rpy_waiting_threads < 0) { + /* I tried to have RPyGilAllocate() called from + * here, but it fails occasionally on an example + * (2.7/test/test_threading.py). I think what occurs is + * that if one thread runs RPyGilAllocate(), it still + * doesn't have the GIL; then the other thread might fork() + * at precisely this moment, killing the first thread. + */ + fprintf(stderr, "Fatal RPython error: a thread is trying to wait " + "for the GIL, but the GIL was not initialized\n"); + abort(); + } + /* Register me as one of the threads that is actively waiting for the GIL. The number of such threads is found in rpy_waiting_threads. */ - assert(rpy_waiting_threads >= 0); atomic_increment(&rpy_waiting_threads); /* Enter the waiting queue from the end. Assuming a roughly @@ -164,6 +193,13 @@ _RPyGilRelease(); } +#undef RPyGilAcquire +RPY_EXTERN +void RPyGilAcquire(void) +{ + _RPyGilAcquire(); +} + #undef RPyFetchFastGil RPY_EXTERN long *RPyFetchFastGil(void) diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/src/thread.h pypy-5.0.1+dfsg/rpython/translator/c/src/thread.h --- pypy-4.0.1+dfsg/rpython/translator/c/src/thread.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/src/thread.h 2016-03-19 16:40:12.000000000 +0000 @@ -28,7 +28,8 @@ RPY_EXTERN void RPyGilAllocate(void); RPY_EXTERN long RPyGilYieldThread(void); -RPY_EXTERN void RPyGilAcquire(void); +RPY_EXTERN void RPyGilAcquireSlowPath(long); +#define RPyGilAcquire _RPyGilAcquire #define RPyGilRelease _RPyGilRelease #define RPyFetchFastGil _RPyFetchFastGil @@ -40,9 +41,14 @@ RPY_EXTERN long rpy_fastgil; +static inline void _RPyGilAcquire(void) { + long old_fastgil = lock_test_and_set(&rpy_fastgil, 1); + if (old_fastgil != 0) + RPyGilAcquireSlowPath(old_fastgil); +} static inline void _RPyGilRelease(void) { assert(RPY_FASTGIL_LOCKED(rpy_fastgil)); - rpy_fastgil = 0; + lock_release(&rpy_fastgil); } static inline long *_RPyFetchFastGil(void) { return &rpy_fastgil; diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/src/threadlocal.c pypy-5.0.1+dfsg/rpython/translator/c/src/threadlocal.c --- pypy-4.0.1+dfsg/rpython/translator/c/src/threadlocal.c 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/src/threadlocal.c 2016-03-19 16:40:12.000000000 +0000 @@ -3,20 +3,104 @@ #include #include #include -#ifndef _WIN32 -# include -#endif +#include #include "src/threadlocal.h" +#include "src/thread.h" + + +/* this is a spin-lock that must be acquired around each doubly-linked-list + manipulation (because such manipulations can occur without the GIL) */ +static long pypy_threadlocal_lock = 0; + +static int check_valid(void); + +void _RPython_ThreadLocals_Acquire(void) { + while (!lock_test_and_set(&pypy_threadlocal_lock, 1)) { + /* busy loop */ + } + assert(check_valid()); +} +void _RPython_ThreadLocals_Release(void) { + assert(check_valid()); + lock_release(&pypy_threadlocal_lock); +} + + +pthread_key_t pypy_threadlocal_key +#ifdef _WIN32 += TLS_OUT_OF_INDEXES +#endif +; + +static struct pypy_threadlocal_s linkedlist_head = { + -1, /* ready */ + NULL, /* stack_end */ + &linkedlist_head, /* prev */ + &linkedlist_head }; /* next */ +static int check_valid(void) +{ + struct pypy_threadlocal_s *prev, *cur; + prev = &linkedlist_head; + while (1) { + cur = prev->next; + assert(cur->prev == prev); + if (cur == &linkedlist_head) + break; + assert(cur->ready == 42); + assert(cur->next != cur); + prev = cur; + } + assert(cur->ready == -1); + return 1; +} + +static void cleanup_after_fork(void) +{ + /* assume that at most one pypy_threadlocal_s survived, the current one */ + struct pypy_threadlocal_s *cur; +#ifdef USE___THREAD + cur = &pypy_threadlocal; +#else + cur = (struct pypy_threadlocal_s *)_RPy_ThreadLocals_Get(); +#endif + if (cur && cur->ready == 42) { + cur->next = cur->prev = &linkedlist_head; + linkedlist_head.next = linkedlist_head.prev = cur; + } + else { + linkedlist_head.next = linkedlist_head.prev = &linkedlist_head; + } + _RPython_ThreadLocals_Release(); +} + + +struct pypy_threadlocal_s * +_RPython_ThreadLocals_Enum(struct pypy_threadlocal_s *prev) +{ + if (prev == NULL) + prev = &linkedlist_head; + if (prev->next == &linkedlist_head) + return NULL; + return prev->next; +} + +struct pypy_threadlocal_s *_RPython_ThreadLocals_Head(void) +{ + return &linkedlist_head; +} static void _RPy_ThreadLocals_Init(void *p) { + struct pypy_threadlocal_s *tls = (struct pypy_threadlocal_s *)p; + struct pypy_threadlocal_s *oldnext; memset(p, 0, sizeof(struct pypy_threadlocal_s)); + #ifdef RPY_TLOFS_p_errno - ((struct pypy_threadlocal_s *)p)->p_errno = &errno; + tls->p_errno = &errno; #endif #ifdef RPY_TLOFS_thread_ident - ((struct pypy_threadlocal_s *)p)->thread_ident = + tls->thread_ident = # ifdef _WIN32 GetCurrentThreadId(); # else @@ -26,7 +110,93 @@ where it is not the case are rather old nowadays. */ # endif #endif - ((struct pypy_threadlocal_s *)p)->ready = 42; + _RPython_ThreadLocals_Acquire(); + oldnext = linkedlist_head.next; + tls->prev = &linkedlist_head; + tls->next = oldnext; + linkedlist_head.next = tls; + oldnext->prev = tls; + tls->ready = 42; + _RPython_ThreadLocals_Release(); +} + +static void threadloc_unlink(void *p) +{ + /* warning: this can be called at completely random times without + the GIL. */ + struct pypy_threadlocal_s *tls = (struct pypy_threadlocal_s *)p; + _RPython_ThreadLocals_Acquire(); + if (tls->ready == 42) { + tls->next->prev = tls->prev; + tls->prev->next = tls->next; + memset(tls, 0xDD, sizeof(struct pypy_threadlocal_s)); /* debug */ + tls->ready = 0; + } + _RPython_ThreadLocals_Release(); +#ifndef USE___THREAD + free(p); +#endif +} + +#ifdef _WIN32 +/* xxx Defines a DllMain() function. It's horrible imho: it only + works if we happen to compile a DLL (not a EXE); and of course you + get link-time errors if two files in the same DLL do the same. + There are some alternatives known, but they are horrible in other + ways (e.g. using undocumented behavior). This seems to be the + simplest, but feel free to fix if you need that. + + For this reason we have the line 'not _win32 or config.translation.shared' + in rpython.rlib.rthread. +*/ +BOOL WINAPI DllMain(HINSTANCE hinstDLL, + DWORD reason_for_call, + LPVOID reserved) +{ + LPVOID p; + switch (reason_for_call) { + case DLL_THREAD_DETACH: + if (pypy_threadlocal_key != TLS_OUT_OF_INDEXES) { + p = TlsGetValue(pypy_threadlocal_key); + if (p != NULL) { + TlsSetValue(pypy_threadlocal_key, NULL); + threadloc_unlink(p); + } + } + break; + default: + break; + } + return TRUE; +} +#endif + +void RPython_ThreadLocals_ProgramInit(void) +{ + /* Initialize the pypy_threadlocal_key, together with a destructor + that will be called every time a thread shuts down (if there is + a non-null thread-local value). This is needed even in the + case where we use '__thread' below, for the destructor. + */ + assert(pypy_threadlocal_lock == 0); +#ifdef _WIN32 + pypy_threadlocal_key = TlsAlloc(); + if (pypy_threadlocal_key == TLS_OUT_OF_INDEXES) +#else + if (pthread_key_create(&pypy_threadlocal_key, threadloc_unlink) != 0) +#endif + { + fprintf(stderr, "Internal RPython error: " + "out of thread-local storage indexes"); + abort(); + } + _RPython_ThreadLocals_Build(); + +#ifndef _WIN32 + pthread_atfork(_RPython_ThreadLocals_Acquire, + _RPython_ThreadLocals_Release, + cleanup_after_fork); +#endif } @@ -39,23 +209,22 @@ available, managed by gcc. */ __thread struct pypy_threadlocal_s pypy_threadlocal; -void RPython_ThreadLocals_ProgramInit(void) -{ - _RPy_ThreadLocals_Init(&pypy_threadlocal); -} - char *_RPython_ThreadLocals_Build(void) { - RPyAssert(pypy_threadlocal.ready == 0, "corrupted thread-local"); + RPyAssert(pypy_threadlocal.ready == 0, "unclean thread-local"); _RPy_ThreadLocals_Init(&pypy_threadlocal); + + /* we also set up &pypy_threadlocal as a POSIX thread-local variable, + because we need the destructor behavior. */ + pthread_setspecific(pypy_threadlocal_key, (void *)&pypy_threadlocal); + return (char *)&pypy_threadlocal; } void RPython_ThreadLocals_ThreadDie(void) { - memset(&pypy_threadlocal, 0xDD, - sizeof(struct pypy_threadlocal_s)); /* debug */ - pypy_threadlocal.ready = 0; + pthread_setspecific(pypy_threadlocal_key, NULL); + threadloc_unlink(&pypy_threadlocal); } @@ -68,24 +237,6 @@ explicitly, with malloc()/free(), and attached to (a single) thread- local key using the API of Windows or pthread. */ -pthread_key_t pypy_threadlocal_key; - - -void RPython_ThreadLocals_ProgramInit(void) -{ -#ifdef _WIN32 - pypy_threadlocal_key = TlsAlloc(); - if (pypy_threadlocal_key == TLS_OUT_OF_INDEXES) -#else - if (pthread_key_create(&pypy_threadlocal_key, NULL) != 0) -#endif - { - fprintf(stderr, "Internal RPython error: " - "out of thread-local storage indexes"); - abort(); - } - _RPython_ThreadLocals_Build(); -} char *_RPython_ThreadLocals_Build(void) { @@ -105,8 +256,7 @@ void *p = _RPy_ThreadLocals_Get(); if (p != NULL) { _RPy_ThreadLocals_Set(NULL); - memset(p, 0xDD, sizeof(struct pypy_threadlocal_s)); /* debug */ - free(p); + threadloc_unlink(p); /* includes free(p) */ } } diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/src/threadlocal.h pypy-5.0.1+dfsg/rpython/translator/c/src/threadlocal.h --- pypy-4.0.1+dfsg/rpython/translator/c/src/threadlocal.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/src/threadlocal.h 2016-03-19 16:40:12.000000000 +0000 @@ -13,14 +13,27 @@ to die. */ RPY_EXTERN void RPython_ThreadLocals_ThreadDie(void); -/* There are two llops: 'threadlocalref_addr' and 'threadlocalref_make'. - They both return the address of the thread-local structure (of the - C type 'struct pypy_threadlocal_s'). The difference is that - OP_THREADLOCALREF_MAKE() checks if we have initialized this thread- - local structure in the current thread, and if not, calls the following - helper. */ +/* 'threadlocalref_addr' returns the address of the thread-local + structure (of the C type 'struct pypy_threadlocal_s'). It first + checks if we have initialized this thread-local structure in the + current thread, and if not, calls the following helper. */ RPY_EXTERN char *_RPython_ThreadLocals_Build(void); +RPY_EXTERN void _RPython_ThreadLocals_Acquire(void); +RPY_EXTERN void _RPython_ThreadLocals_Release(void); + +/* Must acquire/release the thread-local lock around a series of calls + to the following function */ +RPY_EXTERN struct pypy_threadlocal_s * +_RPython_ThreadLocals_Enum(struct pypy_threadlocal_s *prev); + +/* will return the head of the list */ +RPY_EXTERN struct pypy_threadlocal_s *_RPython_ThreadLocals_Head(); + +#define OP_THREADLOCALREF_ACQUIRE(r) _RPython_ThreadLocals_Acquire() +#define OP_THREADLOCALREF_RELEASE(r) _RPython_ThreadLocals_Release() +#define OP_THREADLOCALREF_ENUM(p, r) r = _RPython_ThreadLocals_Enum(p) + /* ------------------------------------------------------------ */ #ifdef USE___THREAD @@ -29,6 +42,8 @@ /* Use the '__thread' specifier, so far only on Linux */ +#include + RPY_EXTERN __thread struct pypy_threadlocal_s pypy_threadlocal; #define OP_THREADLOCALREF_ADDR(r) \ @@ -64,8 +79,6 @@ # define _RPy_ThreadLocals_Set(x) pthread_setspecific(pypy_threadlocal_key, x) #endif -RPY_EXTERN pthread_key_t pypy_threadlocal_key; - #define OP_THREADLOCALREF_ADDR(r) \ do { \ @@ -87,6 +100,9 @@ /* ------------------------------------------------------------ */ +RPY_EXTERN pthread_key_t pypy_threadlocal_key; + + /* only for the fall-back path in the JIT */ #define OP_THREADLOCALREF_GET_NONCONST(RESTYPE, offset, r) \ do { \ diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/src/thread_nt.c pypy-5.0.1+dfsg/rpython/translator/c/src/thread_nt.c --- pypy-4.0.1+dfsg/rpython/translator/c/src/thread_nt.c 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/src/thread_nt.c 2016-03-19 16:40:12.000000000 +0000 @@ -231,17 +231,21 @@ return (result != WAIT_TIMEOUT); } -#define mutex1_t mutex2_t -#define mutex1_init mutex2_init -#define mutex1_lock mutex2_lock -#define mutex1_unlock mutex2_unlock - -#ifdef _M_IA64 -/* On Itanium, use 'acquire' memory ordering semantics */ -#define lock_test_and_set(ptr, value) InterlockedExchangeAcquire(ptr, value) -#else -#define lock_test_and_set(ptr, value) InterlockedExchange(ptr, value) -#endif +typedef CRITICAL_SECTION mutex1_t; + +static inline void mutex1_init(mutex1_t *mutex) { + InitializeCriticalSection(mutex); +} + +static inline void mutex1_lock(mutex1_t *mutex) { + EnterCriticalSection(mutex); +} + +static inline void mutex1_unlock(mutex1_t *mutex) { + LeaveCriticalSection(mutex); +} + +//#define lock_test_and_set(ptr, value) see thread_nt.h #define atomic_increment(ptr) InterlockedIncrement(ptr) #define atomic_decrement(ptr) InterlockedDecrement(ptr) diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/src/thread_nt.h pypy-5.0.1+dfsg/rpython/translator/c/src/thread_nt.h --- pypy-4.0.1+dfsg/rpython/translator/c/src/thread_nt.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/src/thread_nt.h 2016-03-19 16:40:12.000000000 +0000 @@ -30,3 +30,12 @@ RPY_EXTERN long RPyThreadSetStackSize(long); #endif + + +#ifdef _M_IA64 +/* On Itanium, use 'acquire' memory ordering semantics */ +#define lock_test_and_set(ptr, value) InterlockedExchangeAcquire(ptr, value) +#else +#define lock_test_and_set(ptr, value) InterlockedExchange(ptr, value) +#endif +#define lock_release(ptr) (*((volatile long *)ptr) = 0) diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/src/thread_pthread.c pypy-5.0.1+dfsg/rpython/translator/c/src/thread_pthread.c --- pypy-4.0.1+dfsg/rpython/translator/c/src/thread_pthread.c 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/src/thread_pthread.c 2016-03-19 16:40:12.000000000 +0000 @@ -546,8 +546,9 @@ return result; } -#define lock_test_and_set(ptr, value) __sync_lock_test_and_set(ptr, value) +//#define lock_test_and_set(ptr, value) see thread_pthread.h #define atomic_increment(ptr) __sync_fetch_and_add(ptr, 1) #define atomic_decrement(ptr) __sync_fetch_and_sub(ptr, 1) +#define HAVE_PTHREAD_ATFORK 1 #include "src/thread_gil.c" diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/src/thread_pthread.h pypy-5.0.1+dfsg/rpython/translator/c/src/thread_pthread.h --- pypy-4.0.1+dfsg/rpython/translator/c/src/thread_pthread.h 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/src/thread_pthread.h 2016-03-19 16:40:12.000000000 +0000 @@ -78,3 +78,7 @@ long RPyThreadSetStackSize(long); RPY_EXTERN void RPyThreadAfterFork(void); + + +#define lock_test_and_set(ptr, value) __sync_lock_test_and_set(ptr, value) +#define lock_release(ptr) __sync_lock_release(ptr) diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/test/test_boehm.py pypy-5.0.1+dfsg/rpython/translator/c/test/test_boehm.py --- pypy-4.0.1+dfsg/rpython/translator/c/test/test_boehm.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/test/test_boehm.py 2016-03-19 16:40:12.000000000 +0000 @@ -23,6 +23,7 @@ class AbstractGCTestClass(object): gcpolicy = "boehm" use_threads = False + extra_options = {} # deal with cleanups def setup_method(self, meth): @@ -33,8 +34,10 @@ #print "CLEANUP" self._cleanups.pop()() - def getcompiled(self, func, argstypelist=[], annotatorpolicy=None): - return compile(func, argstypelist, gcpolicy=self.gcpolicy, thread=self.use_threads) + def getcompiled(self, func, argstypelist=[], annotatorpolicy=None, + extra_options={}): + return compile(func, argstypelist, gcpolicy=self.gcpolicy, + thread=self.use_threads, **extra_options) class TestUsingBoehm(AbstractGCTestClass): diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/test/test_database.py pypy-5.0.1+dfsg/rpython/translator/c/test/test_database.py --- pypy-4.0.1+dfsg/rpython/translator/c/test/test_database.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/test/test_database.py 2016-03-19 16:40:12.000000000 +0000 @@ -9,8 +9,6 @@ def dump_on_stdout(database): - if database.gctransformer: - database.prepare_inline_helpers() print '/*********************************/' structdeflist = database.getstructdeflist() for node in structdeflist: @@ -171,7 +169,7 @@ F = FuncType([Signed], Signed) f = functionptr(F, "f", graph=graph) - db = LowLevelDatabase(t) + db = LowLevelDatabase(t, exctransformer=t.getexceptiontransformer()) db.get(f) db.complete() dump_on_stdout(db) @@ -186,7 +184,7 @@ return p.x * p.y t, graph = makegraph(ll_f, [int]) - db = LowLevelDatabase(t) + db = LowLevelDatabase(t, exctransformer=t.getexceptiontransformer()) db.get(getfunctionptr(graph)) db.complete() dump_on_stdout(db) @@ -207,7 +205,7 @@ return s.ptr1.x * s.ptr2.x t, graph = makegraph(ll_f, [int]) - db = LowLevelDatabase(t) + db = LowLevelDatabase(t, exctransformer=t.getexceptiontransformer()) db.get(getfunctionptr(graph)) db.complete() dump_on_stdout(db) diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/test/test_genc.py pypy-5.0.1+dfsg/rpython/translator/c/test/test_genc.py --- pypy-4.0.1+dfsg/rpython/translator/c/test/test_genc.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/test/test_genc.py 2016-03-19 16:40:12.000000000 +0000 @@ -4,7 +4,7 @@ import py from rpython.rlib.rfloat import NAN, INFINITY -from rpython.rlib.entrypoint import entrypoint +from rpython.rlib.entrypoint import entrypoint_highlevel from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint, intmask from rpython.rlib.objectmodel import specialize @@ -495,7 +495,7 @@ return 3 key = "test_entrypoints42" - @entrypoint(key, [int], "foobar") + @entrypoint_highlevel(key, [int], "foobar") def g(x): return x + 42 @@ -596,7 +596,7 @@ t.context._graphof(foobar_fn).inhibit_tail_call = True t.source_c() lines = t.driver.cbuilder.c_source_filename.join('..', - 'rpython_translator_c_test_test_genc.c').readlines() + 'rpython_translator_c_test.c').readlines() for i, line in enumerate(lines): if '= pypy_g_foobar_fn' in line: break diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/test/test_newgc.py pypy-5.0.1+dfsg/rpython/translator/c/test/test_newgc.py --- pypy-4.0.1+dfsg/rpython/translator/c/test/test_newgc.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/test/test_newgc.py 2016-03-19 16:40:15.000000000 +0000 @@ -406,7 +406,7 @@ try: g() except: - os.write(1, "hallo") + pass #os.write(1, "hallo") def f1(i): if i: raise TypeError diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/test/test_refcount.py pypy-5.0.1+dfsg/rpython/translator/c/test/test_refcount.py --- pypy-4.0.1+dfsg/rpython/translator/c/test/test_refcount.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/test/test_refcount.py 2016-03-19 16:40:12.000000000 +0000 @@ -106,37 +106,6 @@ assert fn(1) == 4 assert fn(0) == 5 - def test_del_basic(self): - py.test.skip("xxx fix or kill") - S = lltype.GcStruct('S', ('x', lltype.Signed), rtti=True) - TRASH = lltype.GcStruct('TRASH', ('x', lltype.Signed)) - GLOBAL = lltype.Struct('GLOBAL', ('x', lltype.Signed)) - glob = lltype.malloc(GLOBAL, immortal=True) - def destructor(s): - glob.x = s.x + 1 - def type_info_S(s): - return lltype.getRuntimeTypeInfo(S) - - def g(n): - s = lltype.malloc(S) - s.x = n - # now 's' should go away - def entrypoint(n): - g(n) - # llop.gc__collect(lltype.Void) - return glob.x - - t = TranslationContext() - t.buildannotator().build_types(entrypoint, [int]) - rtyper = t.buildrtyper() - destrptr = rtyper.annotate_helper_fn(destructor, [lltype.Ptr(S)]) - rtyper.attachRuntimeTypeInfoFunc(S, type_info_S, destrptr=destrptr) - rtyper.specialize() - fn = self.compile_func(entrypoint, None, t) - - res = fn(123) - assert res == 124 - def test_del_catches(self): import os def g(): @@ -146,7 +115,7 @@ try: g() except: - os.write(1, "hallo") + pass #os.write(1, "hallo") def f1(i): if i: raise TypeError diff -Nru pypy-4.0.1+dfsg/rpython/translator/c/test/test_standalone.py pypy-5.0.1+dfsg/rpython/translator/c/test/test_standalone.py --- pypy-4.0.1+dfsg/rpython/translator/c/test/test_standalone.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/c/test/test_standalone.py 2016-03-19 16:40:12.000000000 +0000 @@ -8,7 +8,7 @@ from rpython.rlib.debug import ll_assert, have_debug_prints, debug_flush from rpython.rlib.debug import debug_print, debug_start, debug_stop from rpython.rlib.debug import debug_offset, have_debug_prints_for -from rpython.rlib.entrypoint import entrypoint, secondary_entrypoints +from rpython.rlib.entrypoint import entrypoint_highlevel, secondary_entrypoints from rpython.rtyper.lltypesystem import lltype from rpython.translator.translator import TranslationContext from rpython.translator.backendopt import all @@ -81,7 +81,7 @@ # # verify that the executable re-export symbols, but not too many if sys.platform.startswith('linux') and not kwds.get('shared', False): - seen_main = False + seen = set() g = os.popen("objdump -T '%s'" % builder.executable_name, 'r') for line in g: if not line.strip(): @@ -91,18 +91,22 @@ name = line.split()[-1] if name.startswith('__'): continue + seen.add(name) if name == 'main': - seen_main = True continue if name == 'pypy_debug_file': # ok to export this one continue + if name == 'rpython_startup_code': # ok for this one too + continue if 'pypy' in name.lower() or 'rpy' in name.lower(): raise Exception("Unexpected exported name %r. " "What is likely missing is RPY_EXTERN before the " "declaration of this C function or global variable" % (name,)) g.close() - assert seen_main, "did not see 'main' exported" + # list of symbols that we *want* to be exported: + for name in ['main', 'pypy_debug_file', 'rpython_startup_code']: + assert name in seen, "did not see '%r' exported" % name # return t, builder @@ -121,9 +125,9 @@ # Verify that the generated C files have sane names: gen_c_files = [str(f) for f in cbuilder.extrafiles] - for expfile in ('rpython_rlib_rposix.c', - 'rpython_rtyper_lltypesystem_rstr.c', - 'rpython_translator_c_test_test_standalone.c'): + for expfile in ('rpython_rlib.c', + 'rpython_rtyper_lltypesystem.c', + 'rpython_translator_c_test.c'): assert cbuilder.targetdir.join(expfile) in gen_c_files def test_print(self): @@ -1093,23 +1097,11 @@ import time from rpython.rlib import rthread from rpython.rtyper.lltypesystem import lltype - from rpython.rlib.objectmodel import invoke_around_extcall class State: pass state = State() - def before(): - debug_print("releasing...") - ll_assert(not rthread.acquire_NOAUTO(state.ll_lock, False), - "lock not held!") - rthread.release_NOAUTO(state.ll_lock) - debug_print("released") - def after(): - debug_print("waiting...") - rthread.acquire_NOAUTO(state.ll_lock, True) - debug_print("acquired") - def recurse(n): if n > 0: return recurse(n-1)+1 @@ -1145,10 +1137,7 @@ s1 = State(); s2 = State(); s3 = State() s1.x = 0x11111111; s2.x = 0x22222222; s3.x = 0x33333333 # start 3 new threads - state.ll_lock = rthread.allocate_ll_lock() - after() state.count = 0 - invoke_around_extcall(before, after) ident1 = rthread.start_new_thread(bootstrap, ()) ident2 = rthread.start_new_thread(bootstrap, ()) ident3 = rthread.start_new_thread(bootstrap, ()) @@ -1192,20 +1181,11 @@ import time, gc from rpython.rlib import rthread, rposix from rpython.rtyper.lltypesystem import lltype - from rpython.rlib.objectmodel import invoke_around_extcall class State: pass state = State() - def before(): - ll_assert(not rthread.acquire_NOAUTO(state.ll_lock, False), - "lock not held!") - rthread.release_NOAUTO(state.ll_lock) - def after(): - rthread.acquire_NOAUTO(state.ll_lock, True) - rthread.gc_thread_run() - class Cons: def __init__(self, head, tail): self.head = head @@ -1235,9 +1215,6 @@ state.xlist = [] x2 = Cons(51, Cons(62, Cons(74, None))) # start 5 new threads - state.ll_lock = rthread.allocate_ll_lock() - after() - invoke_around_extcall(before, after) ident1 = new_thread() ident2 = new_thread() # @@ -1281,7 +1258,6 @@ def test_gc_with_fork_without_threads(self): - from rpython.rlib.objectmodel import invoke_around_extcall if not hasattr(os, 'fork'): py.test.skip("requires fork()") @@ -1308,22 +1284,18 @@ # alive are really freed. import time, gc, os from rpython.rlib import rthread - from rpython.rlib.objectmodel import invoke_around_extcall if not hasattr(os, 'fork'): py.test.skip("requires fork()") + from rpython.rtyper.lltypesystem import rffi, lltype + direct_write = rffi.llexternal( + "write", [rffi.INT, rffi.CCHARP, rffi.SIZE_T], lltype.Void, + _nowrapper=True) + class State: pass state = State() - def before(): - ll_assert(not rthread.acquire_NOAUTO(state.ll_lock, False), - "lock not held!") - rthread.release_NOAUTO(state.ll_lock) - def after(): - rthread.acquire_NOAUTO(state.ll_lock, True) - rthread.gc_thread_run() - class Cons: def __init__(self, head, tail): self.head = head @@ -1331,7 +1303,10 @@ class Stuff: def __del__(self): - os.write(state.write_end, 'd') + p = rffi.str2charp('d') + one = rffi.cast(rffi.SIZE_T, 1) + direct_write(rffi.cast(rffi.INT, state.write_end), p, one) + rffi.free_charp(p) def allocate_stuff(): s = Stuff() @@ -1380,9 +1355,6 @@ state.read_end, state.write_end = os.pipe() x2 = Cons(51, Cons(62, Cons(74, None))) # start 5 new threads - state.ll_lock = rthread.allocate_ll_lock() - after() - invoke_around_extcall(before, after) start_arthreads() # force freeing gc.collect() @@ -1420,7 +1392,7 @@ config = get_combined_translation_config(translating=True) self.config = config - @entrypoint('test', [lltype.Signed], c_name='foo') + @entrypoint_highlevel('test', [lltype.Signed], c_name='foo') def f(a): return a + 3 @@ -1428,7 +1400,8 @@ return 0 t, cbuilder = self.compile(entry_point, shared=True, - entrypoints=[f], local_icon='red.ico') + entrypoints=[f.exported_wrapper], + local_icon='red.ico') ext_suffix = '.so' if cbuilder.eci.platform.name == 'msvc': ext_suffix = '.dll' diff -Nru pypy-4.0.1+dfsg/rpython/translator/driver.py pypy-5.0.1+dfsg/rpython/translator/driver.py --- pypy-4.0.1+dfsg/rpython/translator/driver.py 2015-11-19 19:21:43.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/driver.py 2016-03-19 16:40:15.000000000 +0000 @@ -203,9 +203,8 @@ try: points = secondary_entrypoints[key] except KeyError: - raise KeyError( - "Entrypoints not found. I only know the keys %r." % - (", ".join(secondary_entrypoints.keys()), )) + raise KeyError("Entrypoint %r not found (not in %r)" % + (key, secondary_entrypoints.keys())) self.secondary_entrypoints.extend(points) self.translator.driver_instrument_result = self.instrument_result @@ -488,13 +487,14 @@ exe = py.path.local(exename) exename = exe.new(purebasename=exe.purebasename + 'w') shutil_copy(str(exename), str(newexename)) - # the import library is named python27.lib, according - # to the pragma in pyconfig.h - libname = str(newsoname.dirpath().join('python27.lib')) + # for pypy, the import library is renamed and moved to + # libs/python27.lib, according to the pragma in pyconfig.h + libname = self.config.translation.libname + libname = libname or soname.new(ext='lib').basename + libname = str(newsoname.dirpath().join(libname)) shutil.copyfile(str(soname.new(ext='lib')), libname) self.log.info("copied: %s" % (libname,)) - # XXX TODO : replace the nonsense above with - # ext_to_copy = ['lib', 'pdb'] + # the pdb file goes in the same place as pypy(w).exe ext_to_copy = ['pdb',] for ext in ext_to_copy: name = soname.new(ext=ext) @@ -502,7 +502,6 @@ shutil.copyfile(str(name), str(newname.new(ext=ext))) self.log.info("copied: %s" % (newname,)) self.c_entryp = newexename - self.log.info('usession directory: %s' % (udir,)) self.log.info("created: %s" % (self.c_entryp,)) @taskdef(['source_c'], "Compiling c source") @@ -548,7 +547,9 @@ goals = [goals] goals.extend(self.extra_goals) goals = self.backend_select_goals(goals) - return self._execute(goals, task_skip = self._maybe_skip()) + result = self._execute(goals, task_skip = self._maybe_skip()) + self.log.info('usession directory: %s' % (udir,)) + return result @staticmethod def from_targetspec(targetspec_dic, config=None, args=None, diff -Nru pypy-4.0.1+dfsg/rpython/translator/goal/query.py pypy-5.0.1+dfsg/rpython/translator/goal/query.py --- pypy-4.0.1+dfsg/rpython/translator/goal/query.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/goal/query.py 2016-03-19 16:40:12.000000000 +0000 @@ -48,7 +48,7 @@ s_ev = annotator.annotation(ev) if s_et: if s_et.knowntype == type: - if s_et.__class__ == annmodel.SomeType: + if s_et.__class__ == annmodel.SomeTypeOf: if hasattr(s_et, 'is_type_of') and s_et.is_type_of == [ev]: continue else: diff -Nru pypy-4.0.1+dfsg/rpython/translator/interactive.py pypy-5.0.1+dfsg/rpython/translator/interactive.py --- pypy-4.0.1+dfsg/rpython/translator/interactive.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/interactive.py 2016-03-19 16:40:12.000000000 +0000 @@ -32,12 +32,6 @@ self.context.viewcg() def ensure_setup(self, argtypes=None, policy=None): - standalone = argtypes is None - if standalone: - assert argtypes is None - else: - if argtypes is None: - argtypes = [] self.driver.setup(self.entry_point, argtypes, policy, empty_translator=self.context) self.ann_argtypes = argtypes diff -Nru pypy-4.0.1+dfsg/rpython/translator/platform/darwin.py pypy-5.0.1+dfsg/rpython/translator/platform/darwin.py --- pypy-4.0.1+dfsg/rpython/translator/platform/darwin.py 2015-11-19 19:21:43.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/platform/darwin.py 2016-03-19 16:40:12.000000000 +0000 @@ -28,11 +28,11 @@ # needed for cross compiling on ARM, needs fixing if relevant for darwin if len(rel_libdirs) > 0: print 'in get_rpath_flags, rel_libdirs is not fixed up',rel_libdirs - return self.rpath_flags + return self.rpath_flags def _args_for_shared(self, args): return (list(self.shared_only) - + ['-dynamiclib', '-install_name', '@rpath/$(TARGET)', '-undefined', 'dynamic_lookup'] + + ['-dynamiclib', '-install_name', '@rpath/$(TARGET)', '-undefined', 'dynamic_lookup', '-flat_namespace'] + args) def _include_dirs_for_libffi(self): diff -Nru pypy-4.0.1+dfsg/rpython/translator/platform/posix.py pypy-5.0.1+dfsg/rpython/translator/platform/posix.py --- pypy-4.0.1+dfsg/rpython/translator/platform/posix.py 2015-11-19 19:21:43.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/platform/posix.py 2016-03-19 16:40:12.000000000 +0000 @@ -138,6 +138,13 @@ rel = lpath.relto(rpypath) if rel: return os.path.join('$(RPYDIR)', rel) + # Hack: also relativize from the path '$RPYDIR/..'. + # Otherwise, when translating pypy, we get the paths in + # pypy/module/* that are kept as absolute, which makes the + # whole purpose of $RPYDIR rather pointless. + rel = lpath.relto(rpypath.join('..')) + if rel: + return os.path.join('$(RPYDIR)', '..', rel) m_dir = m.makefile_dir if m_dir == lpath: return '.' diff -Nru pypy-4.0.1+dfsg/rpython/translator/platform/windows.py pypy-5.0.1+dfsg/rpython/translator/platform/windows.py --- pypy-4.0.1+dfsg/rpython/translator/platform/windows.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/platform/windows.py 2016-03-19 16:40:12.000000000 +0000 @@ -151,7 +151,7 @@ # Increase stack size, for the linker and the stack check code. stack_size = 8 << 20 # 8 Mb - self.link_flags.append('/STACK:%d' % stack_size) + self.link_flags = self.link_flags + ('/STACK:%d' % stack_size,) # The following symbol is used in c/src/stack.h self.cflags.append('/DMAX_STACK_SIZE=%d' % (stack_size - 1024)) diff -Nru pypy-4.0.1+dfsg/rpython/translator/sandbox/rsandbox.py pypy-5.0.1+dfsg/rpython/translator/sandbox/rsandbox.py --- pypy-4.0.1+dfsg/rpython/translator/sandbox/rsandbox.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/sandbox/rsandbox.py 2016-03-19 16:40:15.000000000 +0000 @@ -13,9 +13,9 @@ # Sandboxing code generator for external functions # +from rpython.rlib import rposix from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.llannotation import lltype_to_annotation -from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator from rpython.tool.ansi_print import ansi_log @@ -25,18 +25,19 @@ # a version of os.read() and os.write() that are not mangled # by the sandboxing mechanism -ll_read_not_sandboxed = rffi.llexternal('read', +ll_read_not_sandboxed = rposix.external('read', [rffi.INT, rffi.CCHARP, rffi.SIZE_T], rffi.SIZE_T, sandboxsafe=True) -ll_write_not_sandboxed = rffi.llexternal('write', +ll_write_not_sandboxed = rposix.external('write', [rffi.INT, rffi.CCHARP, rffi.SIZE_T], rffi.SIZE_T, sandboxsafe=True) -@signature(types.int(), types.ptr(rffi.CCHARP.TO), types.int(), returns=types.none()) +@signature(types.int(), types.ptr(rffi.CCHARP.TO), types.int(), + returns=types.none()) def writeall_not_sandboxed(fd, buf, length): while length > 0: size = rffi.cast(rffi.SIZE_T, length) @@ -84,15 +85,24 @@ return loader def reraise_error(error, loader): - if error == 1: raise OSError(load_int(loader), "external error") - elif error == 2: raise IOError - elif error == 3: raise OverflowError - elif error == 4: raise ValueError - elif error == 5: raise ZeroDivisionError - elif error == 6: raise MemoryError - elif error == 7: raise KeyError - elif error == 8: raise IndexError - else: raise RuntimeError + if error == 1: + raise OSError(load_int(loader), "external error") + elif error == 2: + raise IOError + elif error == 3: + raise OverflowError + elif error == 4: + raise ValueError + elif error == 5: + raise ZeroDivisionError + elif error == 6: + raise MemoryError + elif error == 7: + raise KeyError + elif error == 8: + raise IndexError + else: + raise RuntimeError @signature(types.str(), returns=types.impossible()) @@ -100,47 +110,46 @@ STDERR = 2 with rffi.scoped_str2charp(msg + '\n') as buf: writeall_not_sandboxed(STDERR, buf, len(msg) + 1) - raise RuntimeError(msg) # XXX in RPython, the msg is ignored at the moment + raise RuntimeError(msg) # XXX in RPython, the msg is ignored + +def make_stub(fnname, msg): + """Build always-raising stub function to replace unsupported external.""" + log.WARNING(msg) + + def execute(*args): + not_implemented_stub(msg) + execute.__name__ = 'sandboxed_%s' % (fnname,) + return execute + +def sig_ll(fnobj): + FUNCTYPE = lltype.typeOf(fnobj) + args_s = [lltype_to_annotation(ARG) for ARG in FUNCTYPE.ARGS] + s_result = lltype_to_annotation(FUNCTYPE.RESULT) + return args_s, s_result dump_string = rmarshal.get_marshaller(str) -load_int = rmarshal.get_loader(int) +load_int = rmarshal.get_loader(int) -def get_external_function_sandbox_graph(fnobj, db, force_stub=False): - """Build the graph of a helper trampoline function to be used - in place of real calls to the external function 'fnobj'. The - trampoline marshals its input arguments, dumps them to STDOUT, - and waits for an answer on STDIN. - """ +def get_sandbox_stub(fnobj, rtyper): fnname = fnobj._name - if hasattr(fnobj, 'graph'): - # get the annotation of the input arguments and the result - graph = fnobj.graph - annotator = db.translator.annotator - args_s = [annotator.binding(v) for v in graph.getargs()] - s_result = annotator.binding(graph.getreturnvar()) - else: - # pure external function - fall back to the annotations - # corresponding to the ll types - FUNCTYPE = lltype.typeOf(fnobj) - args_s = [lltype_to_annotation(ARG) for ARG in FUNCTYPE.ARGS] - s_result = lltype_to_annotation(FUNCTYPE.RESULT) - + args_s, s_result = sig_ll(fnobj) + msg = "Not implemented: sandboxing for external function '%s'" % (fnname,) + execute = make_stub(fnname, msg) + return _annotate(rtyper, execute, args_s, s_result) + +def make_sandbox_trampoline(fnname, args_s, s_result): + """Create a trampoline function with the specified signature. + + The trampoline is meant to be used in place of real calls to the external + function named 'fnname'. It marshals its input arguments, dumps them to + STDOUT, and waits for an answer on STDIN. + """ try: - if force_stub: # old case - don't try to support suggested_primitive - raise NotImplementedError("sandboxing for external function '%s'" - % (fnname,)) - dump_arguments = rmarshal.get_marshaller(tuple(args_s)) load_result = rmarshal.get_loader(s_result) - - except (NotImplementedError, - rmarshal.CannotMarshal, - rmarshal.CannotUnmarshall), e: - msg = 'Not Implemented: %s' % (e,) - log.WARNING(msg) - def execute(*args): - not_implemented_stub(msg) - + except (rmarshal.CannotMarshal, rmarshal.CannotUnmarshall) as e: + msg = "Cannot sandbox function '%s': %s" % (fnname, e) + execute = make_stub(fnname, msg) else: def execute(*args): # marshal the function name and input arguments @@ -153,9 +162,12 @@ result = load_result(loader) loader.check_finished() return result - execute = func_with_new_name(execute, 'sandboxed_' + fnname) + execute.__name__ = 'sandboxed_%s' % (fnname,) + return execute + - ann = MixLevelHelperAnnotator(db.translator.rtyper) - graph = ann.getgraph(execute, args_s, s_result) +def _annotate(rtyper, f, args_s, s_result): + ann = MixLevelHelperAnnotator(rtyper) + graph = ann.getgraph(f, args_s, s_result) ann.finish() return graph diff -Nru pypy-4.0.1+dfsg/rpython/translator/sandbox/test/test_sandbox.py pypy-5.0.1+dfsg/rpython/translator/sandbox/test/test_sandbox.py --- pypy-4.0.1+dfsg/rpython/translator/sandbox/test/test_sandbox.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/sandbox/test/test_sandbox.py 2016-03-19 16:40:12.000000000 +0000 @@ -292,6 +292,21 @@ rescode = pipe.wait() assert rescode == 0 +def test_environ_items(): + def entry_point(argv): + print os.environ.items() + return 0 + + exe = compile(entry_point) + g, f = run_in_subprocess(exe) + expect(f, g, "ll_os.ll_os_envitems", (), []) + expect(f, g, "ll_os.ll_os_write", (1, "[]\n"), 3) + g.close() + tail = f.read() + f.close() + assert tail == "" + + class TestPrintedResults: def run(self, entry_point, args, expected): diff -Nru pypy-4.0.1+dfsg/rpython/translator/simplify.py pypy-5.0.1+dfsg/rpython/translator/simplify.py --- pypy-4.0.1+dfsg/rpython/translator/simplify.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/simplify.py 2016-03-19 16:40:12.000000000 +0000 @@ -24,22 +24,13 @@ if not isinstance(f, lltype._ptr): return None try: - funcobj = f._getobj() + funcobj = f._obj except lltype.DelayedPointer: return None try: - callable = funcobj._callable - except (AttributeError, KeyError, AssertionError): - return None - try: return funcobj.graph except AttributeError: return None - try: - callable = funcobj._callable - return translator._graphof(callable) - except (AttributeError, KeyError, AssertionError): - return None def replace_exitswitch_by_constant(block, const): @@ -414,7 +405,7 @@ CanRemove = {} for _op in ''' newtuple newlist newdict bool - is_ id type issubtype repr str len hash getattr getitem + is_ id type issubtype isinstance repr str len hash getattr getitem pos neg abs hex oct ord invert add sub mul truediv floordiv div mod divmod pow lshift rshift and_ or_ xor int float long lt le eq ne gt ge cmp coerce contains @@ -425,7 +416,6 @@ CanRemove[_op] = True del _op CanRemoveBuiltins = { - isinstance: True, hasattr: True, } diff -Nru pypy-4.0.1+dfsg/rpython/translator/test/test_simplify.py pypy-5.0.1+dfsg/rpython/translator/test/test_simplify.py --- pypy-4.0.1+dfsg/rpython/translator/test/test_simplify.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/test/test_simplify.py 2016-03-19 16:40:12.000000000 +0000 @@ -1,7 +1,7 @@ import py from rpython.translator.translator import TranslationContext, graphof from rpython.translator.backendopt.all import backend_optimizations -from rpython.translator.simplify import (get_graph, transform_dead_op_vars) +from rpython.translator.simplify import get_graph, transform_dead_op_vars from rpython.flowspace.model import Block, Constant, summary from rpython.conftest import option @@ -183,8 +183,10 @@ print op subgraph = get_graph(op.args[0], t) if subgraph is None: - # ignore 'get_errno' and 'set_errno' - if 'et_errno' not in repr(op.args[0]): + # ignore 'get_errno' and 'set_errno', and + # 'RPyGilRelease' and 'RPyGilAcquire' + if ('et_errno' not in repr(op.args[0]) and + 'RPyGil' not in repr(op.args[0])): found.append(op) else: walkgraph(subgraph) diff -Nru pypy-4.0.1+dfsg/rpython/translator/transform.py pypy-5.0.1+dfsg/rpython/translator/transform.py --- pypy-4.0.1+dfsg/rpython/translator/transform.py 2015-11-19 19:21:40.000000000 +0000 +++ pypy-5.0.1+dfsg/rpython/translator/transform.py 2016-03-19 16:40:12.000000000 +0000 @@ -189,8 +189,7 @@ self.links_followed[errlink] = True # fix the annotation of the exceptblock.inputargs etype, evalue = graph.exceptblock.inputargs - s_type = annmodel.SomeType() - s_type.is_type_of = [evalue] + s_type = annmodel.SomeTypeOf([evalue]) s_value = annmodel.SomeInstance(self.bookkeeper.getuniqueclassdef(Exception)) self.setbinding(etype, s_type) self.setbinding(evalue, s_value)